FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "ffmpeg_filter.h"
25 #include "graph/graphprint.h"
26 
27 #include "libavfilter/avfilter.h"
28 #include "libavfilter/buffersink.h"
29 #include "libavfilter/buffersrc.h"
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/bprint.h"
35 #include "libavutil/downmix_info.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/pixfmt.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/timestamp.h"
43 
44 // FIXME private header, used for mid_pred()
45 #include "libavcodec/mathops.h"
46 
47 
48 // data that is local to the filter thread and not visible outside of it
49 typedef struct FilterGraphThread {
51 
53 
54  // Temporary buffer for output frames, since on filtergraph reset
55  // we cannot send them to encoders immediately.
56  // The output index is stored in frame opaque.
58 
59  // index of the next input to request from the scheduler
60  unsigned next_in;
61  // set to 1 after at least one frame passed through this output
62  int got_frame;
63 
64  // EOF status of each input/output, as received by the thread
65  uint8_t *eof_in;
66  uint8_t *eof_out;
68 
69 typedef struct FilterCommand {
70  char *target;
71  char *command;
72  char *arg;
73 
74  double time;
77 
78 static void filter_command_free(void *opaque, uint8_t *data)
79 {
81 
82  av_freep(&fc->target);
83  av_freep(&fc->command);
84  av_freep(&fc->arg);
85 
86  av_free(data);
87 }
88 
90 {
91  AVFrame *frame = ifp->sub2video.frame;
92  int ret;
93 
95 
96  frame->width = ifp->width;
97  frame->height = ifp->height;
98  frame->format = ifp->format;
99  frame->colorspace = ifp->color_space;
100  frame->color_range = ifp->color_range;
101 
103  if (ret < 0)
104  return ret;
105 
106  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
107 
108  return 0;
109 }
110 
111 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
112  AVSubtitleRect *r)
113 {
114  uint32_t *pal, *dst2;
115  uint8_t *src, *src2;
116  int x, y;
117 
118  if (r->type != SUBTITLE_BITMAP) {
119  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
120  return;
121  }
122  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
123  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
124  r->x, r->y, r->w, r->h, w, h
125  );
126  return;
127  }
128 
129  dst += r->y * dst_linesize + r->x * 4;
130  src = r->data[0];
131  pal = (uint32_t *)r->data[1];
132  for (y = 0; y < r->h; y++) {
133  dst2 = (uint32_t *)dst;
134  src2 = src;
135  for (x = 0; x < r->w; x++)
136  *(dst2++) = pal[*(src2++)];
137  dst += dst_linesize;
138  src += r->linesize[0];
139  }
140 }
141 
143 {
144  AVFrame *frame = ifp->sub2video.frame;
145  int ret;
146 
147  av_assert1(frame->data[0]);
148  ifp->sub2video.last_pts = frame->pts = pts;
152  if (ret != AVERROR_EOF && ret < 0)
154  "Error while add the frame to buffer source(%s).\n",
155  av_err2str(ret));
156 }
157 
158 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
159  const AVSubtitle *sub)
160 {
161  AVFrame *frame = ifp->sub2video.frame;
162  int8_t *dst;
163  int dst_linesize;
164  int num_rects;
165  int64_t pts, end_pts;
166 
167  if (sub) {
168  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
169  AV_TIME_BASE_Q, ifp->time_base);
170  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
171  AV_TIME_BASE_Q, ifp->time_base);
172  num_rects = sub->num_rects;
173  } else {
174  /* If we are initializing the system, utilize current heartbeat
175  PTS as the start time, and show until the following subpicture
176  is received. Otherwise, utilize the previous subpicture's end time
177  as the fall-back value. */
178  pts = ifp->sub2video.initialize ?
179  heartbeat_pts : ifp->sub2video.end_pts;
180  end_pts = INT64_MAX;
181  num_rects = 0;
182  }
183  if (sub2video_get_blank_frame(ifp) < 0) {
185  "Impossible to get a blank canvas.\n");
186  return;
187  }
188  dst = frame->data [0];
189  dst_linesize = frame->linesize[0];
190  for (int i = 0; i < num_rects; i++)
191  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
192  sub2video_push_ref(ifp, pts);
193  ifp->sub2video.end_pts = end_pts;
194  ifp->sub2video.initialize = 0;
195 }
196 
197 /* Define a function for appending a list of allowed formats
198  * to an AVBPrint. If nonempty, the list will have a header. */
199 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
200 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
201 { \
202  if (ofp->var == none && !ofp->supported_list) \
203  return; \
204  av_bprintf(bprint, #name "="); \
205  if (ofp->var != none) { \
206  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
207  } else { \
208  const type *p; \
209  \
210  for (p = ofp->supported_list; *p != none; p++) { \
211  av_bprintf(bprint, printf_format "|", get_name(*p)); \
212  } \
213  if (bprint->len > 0) \
214  bprint->str[--bprint->len] = '\0'; \
215  } \
216  av_bprint_chars(bprint, ':', 1); \
217 }
218 
221 
224 
226  "%d", )
227 
228 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
230 
231 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
233 
234 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
235 {
236  if (av_channel_layout_check(&ofp->ch_layout)) {
237  av_bprintf(bprint, "channel_layouts=");
238  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
239  } else if (ofp->ch_layouts) {
240  const AVChannelLayout *p;
241 
242  av_bprintf(bprint, "channel_layouts=");
243  for (p = ofp->ch_layouts; p->nb_channels; p++) {
245  av_bprintf(bprint, "|");
246  }
247  if (bprint->len > 0)
248  bprint->str[--bprint->len] = '\0';
249  } else
250  return;
251  av_bprint_chars(bprint, ':', 1);
252 }
253 
254 static int read_binary(void *logctx, const char *path,
255  uint8_t **data, int *len)
256 {
257  AVIOContext *io = NULL;
258  int64_t fsize;
259  int ret;
260 
261  *data = NULL;
262  *len = 0;
263 
264  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
265  if (ret < 0) {
266  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
267  path, av_err2str(ret));
268  return ret;
269  }
270 
271  fsize = avio_size(io);
272  if (fsize < 0 || fsize > INT_MAX) {
273  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
274  ret = AVERROR(EIO);
275  goto fail;
276  }
277 
278  *data = av_malloc(fsize);
279  if (!*data) {
280  ret = AVERROR(ENOMEM);
281  goto fail;
282  }
283 
284  ret = avio_read(io, *data, fsize);
285  if (ret != fsize) {
286  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
287  ret = ret < 0 ? ret : AVERROR(EIO);
288  goto fail;
289  }
290 
291  *len = fsize;
292 
293  ret = 0;
294 fail:
295  avio_close(io);
296  if (ret < 0) {
297  av_freep(data);
298  *len = 0;
299  }
300  return ret;
301 }
302 
303 static int filter_opt_apply(void *logctx, AVFilterContext *f,
304  const char *key, const char *val)
305 {
306  const AVOption *o = NULL;
307  int ret;
308 
310  if (ret >= 0)
311  return 0;
312 
313  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
315  if (!o)
316  goto err_apply;
317 
318  // key is a valid option name prefixed with '/'
319  // interpret value as a path from which to load the actual option value
320  key++;
321 
322  if (o->type == AV_OPT_TYPE_BINARY) {
323  uint8_t *data;
324  int len;
325 
326  ret = read_binary(logctx, val, &data, &len);
327  if (ret < 0)
328  goto err_load;
329 
331  av_freep(&data);
332  } else {
333  char *data = file_read(val);
334  if (!data) {
335  ret = AVERROR(EIO);
336  goto err_load;
337  }
338 
340  av_freep(&data);
341  }
342  if (ret < 0)
343  goto err_apply;
344 
345  return 0;
346 
347 err_apply:
348  av_log(logctx, AV_LOG_ERROR,
349  "Error applying option '%s' to filter '%s': %s\n",
350  key, f->filter->name, av_err2str(ret));
351  return ret;
352 err_load:
353  av_log(logctx, AV_LOG_ERROR,
354  "Error loading value for option '%s' from file '%s'\n",
355  key, val);
356  return ret;
357 }
358 
359 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
360 {
361  for (size_t i = 0; i < seg->nb_chains; i++) {
362  AVFilterChain *ch = seg->chains[i];
363 
364  for (size_t j = 0; j < ch->nb_filters; j++) {
365  AVFilterParams *p = ch->filters[j];
366  const AVDictionaryEntry *e = NULL;
367 
368  av_assert0(p->filter);
369 
370  while ((e = av_dict_iterate(p->opts, e))) {
371  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
372  if (ret < 0)
373  return ret;
374  }
375 
376  av_dict_free(&p->opts);
377  }
378  }
379 
380  return 0;
381 }
382 
383 static int graph_parse(void *logctx,
384  AVFilterGraph *graph, const char *desc,
386  AVBufferRef *hw_device)
387 {
389  int ret;
390 
391  *inputs = NULL;
392  *outputs = NULL;
393 
394  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
395  if (ret < 0)
396  return ret;
397 
399  if (ret < 0)
400  goto fail;
401 
402  if (hw_device) {
403  for (int i = 0; i < graph->nb_filters; i++) {
404  AVFilterContext *f = graph->filters[i];
405 
406  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
407  continue;
408  f->hw_device_ctx = av_buffer_ref(hw_device);
409  if (!f->hw_device_ctx) {
410  ret = AVERROR(ENOMEM);
411  goto fail;
412  }
413  }
414  }
415 
416  ret = graph_opts_apply(logctx, seg);
417  if (ret < 0)
418  goto fail;
419 
421 
422 fail:
424  return ret;
425 }
426 
427 // Filters can be configured only if the formats of all inputs are known.
429 {
430  for (int i = 0; i < fg->nb_inputs; i++) {
432  if (ifp->format < 0)
433  return 0;
434  }
435  return 1;
436 }
437 
438 static int filter_thread(void *arg);
439 
440 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
441 {
442  AVFilterContext *ctx = inout->filter_ctx;
443  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
444  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
445 
446  if (nb_pads > 1)
447  return av_strdup(ctx->filter->name);
448  return av_asprintf("%s:%s", ctx->filter->name,
449  avfilter_pad_get_name(pads, inout->pad_idx));
450 }
451 
452 static const char *ofilter_item_name(void *obj)
453 {
454  OutputFilterPriv *ofp = obj;
455  return ofp->log_name;
456 }
457 
458 static const AVClass ofilter_class = {
459  .class_name = "OutputFilter",
460  .version = LIBAVUTIL_VERSION_INT,
461  .item_name = ofilter_item_name,
462  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
463  .category = AV_CLASS_CATEGORY_FILTER,
464 };
465 
467 {
468  OutputFilterPriv *ofp;
469  OutputFilter *ofilter;
470 
471  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
472  if (!ofp)
473  return NULL;
474 
475  ofilter = &ofp->ofilter;
476  ofilter->class = &ofilter_class;
477  ofp->log_parent = fg;
478  ofilter->graph = fg;
479  ofilter->type = type;
480  ofp->format = -1;
483  ofp->index = fg->nb_outputs - 1;
484 
485  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
487 
488  return ofilter;
489 }
490 
491 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
492  const ViewSpecifier *vs)
493 {
494  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
495  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
497  int ret;
498 
499  av_assert0(!ifp->bound);
500  ifp->bound = 1;
501 
502  if (ifp->type != ist->par->codec_type &&
504  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
506  return AVERROR(EINVAL);
507  }
508 
509  ifp->type_src = ist->st->codecpar->codec_type;
510 
511  ifp->opts.fallback = av_frame_alloc();
512  if (!ifp->opts.fallback)
513  return AVERROR(ENOMEM);
514 
515  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
516  vs, &ifp->opts, &src);
517  if (ret < 0)
518  return ret;
519 
520  ret = sch_connect(fgp->sch,
521  src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
522  if (ret < 0)
523  return ret;
524 
525  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
526  ifp->sub2video.frame = av_frame_alloc();
527  if (!ifp->sub2video.frame)
528  return AVERROR(ENOMEM);
529 
530  ifp->width = ifp->opts.sub2video_width;
531  ifp->height = ifp->opts.sub2video_height;
532 
533  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
534  palettes for all rectangles are identical or compatible */
535  ifp->format = AV_PIX_FMT_RGB32;
536 
537  ifp->time_base = AV_TIME_BASE_Q;
538 
539  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
540  ifp->width, ifp->height);
541  }
542 
543  return 0;
544 }
545 
547  const ViewSpecifier *vs)
548 {
551  int ret;
552 
553  av_assert0(!ifp->bound);
554  ifp->bound = 1;
555 
556  if (ifp->type != dec->type) {
557  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
559  return AVERROR(EINVAL);
560  }
561 
562  ifp->type_src = ifp->type;
563 
564  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
565  if (ret < 0)
566  return ret;
567 
568  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
569  if (ret < 0)
570  return ret;
571 
572  return 0;
573 }
574 
575 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
576  const AVChannelLayout *layout_requested)
577 {
578  int i, err;
579 
580  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
581  /* Pass the layout through for all orders but UNSPEC */
582  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
583  if (err < 0)
584  return err;
585  return 0;
586  }
587 
588  /* Requested layout is of order UNSPEC */
589  if (!layouts_allowed) {
590  /* Use the default native layout for the requested amount of channels when the
591  encoder doesn't have a list of supported layouts */
592  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
593  return 0;
594  }
595  /* Encoder has a list of supported layouts. Pick the first layout in it with the
596  same amount of channels as the requested layout */
597  for (i = 0; layouts_allowed[i].nb_channels; i++) {
598  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
599  break;
600  }
601  if (layouts_allowed[i].nb_channels) {
602  /* Use it if one is found */
603  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
604  if (err < 0)
605  return err;
606  return 0;
607  }
608  /* If no layout for the amount of channels requested was found, use the default
609  native layout for it. */
610  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
611 
612  return 0;
613 }
614 
615 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
616  const OutputFilterOptions *opts)
617 {
618  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
619  FilterGraph *fg = ofilter->graph;
620  FilterGraphPriv *fgp = fgp_from_fg(fg);
621  int ret;
622 
623  av_assert0(!ofilter->bound);
624  av_assert0(!opts->enc ||
625  ofilter->type == opts->enc->type);
626 
627  ofilter->bound = 1;
628  av_freep(&ofilter->linklabel);
629 
630  ofp->flags = opts->flags;
631  ofp->ts_offset = opts->ts_offset;
632  ofp->enc_timebase = opts->output_tb;
633 
634  ofp->trim_start_us = opts->trim_start_us;
635  ofp->trim_duration_us = opts->trim_duration_us;
636 
637  ofp->name = av_strdup(opts->name);
638  if (!ofp->name)
639  return AVERROR(EINVAL);
640 
641  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
642  if (ret < 0)
643  return ret;
644 
645  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
646  if (ret < 0)
647  return ret;
648 
649  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
650  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
651 
652  if (fgp->is_simple) {
653  // for simple filtergraph there is just one output,
654  // so use only graph-level information for logging
655  ofp->log_parent = NULL;
656  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
657  } else
658  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
659 
660  switch (ofilter->type) {
661  case AVMEDIA_TYPE_VIDEO:
662  ofp->width = opts->width;
663  ofp->height = opts->height;
664  if (opts->format != AV_PIX_FMT_NONE) {
665  ofp->format = opts->format;
666  } else
667  ofp->formats = opts->formats;
668 
669  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
670  ofp->color_space = opts->color_space;
671  else
672  ofp->color_spaces = opts->color_spaces;
673 
674  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
675  ofp->color_range = opts->color_range;
676  else
677  ofp->color_ranges = opts->color_ranges;
678 
680 
681  ofp->fps.last_frame = av_frame_alloc();
682  if (!ofp->fps.last_frame)
683  return AVERROR(ENOMEM);
684 
685  ofp->fps.vsync_method = opts->vsync_method;
686  ofp->fps.framerate = opts->frame_rate;
687  ofp->fps.framerate_max = opts->max_frame_rate;
688  ofp->fps.framerate_supported = opts->frame_rates;
689 
690  // reduce frame rate for mpeg4 to be within the spec limits
691  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
692  ofp->fps.framerate_clip = 65535;
693 
694  ofp->fps.dup_warning = 1000;
695 
696  break;
697  case AVMEDIA_TYPE_AUDIO:
698  if (opts->format != AV_SAMPLE_FMT_NONE) {
699  ofp->format = opts->format;
700  } else {
701  ofp->formats = opts->formats;
702  }
703  if (opts->sample_rate) {
704  ofp->sample_rate = opts->sample_rate;
705  } else
706  ofp->sample_rates = opts->sample_rates;
707  if (opts->ch_layout.nb_channels) {
708  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
709  if (ret < 0)
710  return ret;
711  } else {
712  ofp->ch_layouts = opts->ch_layouts;
713  }
714  break;
715  }
716 
717  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
718  SCH_ENC(sched_idx_enc));
719  if (ret < 0)
720  return ret;
721 
722  return 0;
723 }
724 
726  const OutputFilterOptions *opts)
727 {
728  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
729 
730  av_assert0(!ofilter->bound);
731  av_assert0(ofilter->type == ifp->type);
732 
733  ofilter->bound = 1;
734  av_freep(&ofilter->linklabel);
735 
736  ofp->name = av_strdup(opts->name);
737  if (!ofp->name)
738  return AVERROR(EINVAL);
739 
740  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
741 
742  return 0;
743 }
744 
745 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
746 {
748  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
750  char name[32];
751  int ret;
752 
753  av_assert0(!ifp->bound);
754  ifp->bound = 1;
755 
756  if (ifp->type != ofilter_src->type) {
757  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
758  av_get_media_type_string(ofilter_src->type),
760  return AVERROR(EINVAL);
761  }
762 
763  ifp->type_src = ifp->type;
764 
765  memset(&opts, 0, sizeof(opts));
766 
767  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
768  opts.name = name;
769 
770  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
771  if (ret < 0)
772  return ret;
773 
774  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
775  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
776  if (ret < 0)
777  return ret;
778 
779  return 0;
780 }
781 
783 {
784  InputFilterPriv *ifp;
785  InputFilter *ifilter;
786 
787  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
788  if (!ifp)
789  return NULL;
790 
791  ifilter = &ifp->ifilter;
792  ifilter->graph = fg;
793 
794  ifp->frame = av_frame_alloc();
795  if (!ifp->frame)
796  return NULL;
797 
798  ifp->index = fg->nb_inputs - 1;
799  ifp->format = -1;
802 
804  if (!ifp->frame_queue)
805  return NULL;
806 
807  return ifilter;
808 }
809 
810 void fg_free(FilterGraph **pfg)
811 {
812  FilterGraph *fg = *pfg;
813  FilterGraphPriv *fgp;
814 
815  if (!fg)
816  return;
817  fgp = fgp_from_fg(fg);
818 
819  for (int j = 0; j < fg->nb_inputs; j++) {
820  InputFilter *ifilter = fg->inputs[j];
821  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
822 
823  if (ifp->frame_queue) {
824  AVFrame *frame;
825  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
828  }
830 
831  av_frame_free(&ifp->frame);
832  av_frame_free(&ifp->opts.fallback);
833 
835  av_freep(&ifp->linklabel);
836  av_freep(&ifp->opts.name);
838  av_freep(&ifilter->name);
839  av_freep(&fg->inputs[j]);
840  }
841  av_freep(&fg->inputs);
842  for (int j = 0; j < fg->nb_outputs; j++) {
843  OutputFilter *ofilter = fg->outputs[j];
844  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
845 
847  av_dict_free(&ofp->sws_opts);
848  av_dict_free(&ofp->swr_opts);
849 
850  av_freep(&ofilter->linklabel);
851  av_freep(&ofilter->name);
852  av_freep(&ofilter->apad);
853  av_freep(&ofp->name);
856  av_freep(&fg->outputs[j]);
857  }
858  av_freep(&fg->outputs);
859  av_freep(&fgp->graph_desc);
860 
861  av_frame_free(&fgp->frame);
862  av_frame_free(&fgp->frame_enc);
863 
864  av_freep(pfg);
865 }
866 
867 static const char *fg_item_name(void *obj)
868 {
869  const FilterGraphPriv *fgp = obj;
870 
871  return fgp->log_name;
872 }
873 
874 static const AVClass fg_class = {
875  .class_name = "FilterGraph",
876  .version = LIBAVUTIL_VERSION_INT,
877  .item_name = fg_item_name,
878  .category = AV_CLASS_CATEGORY_FILTER,
879 };
880 
881 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
882 {
883  FilterGraphPriv *fgp;
884  FilterGraph *fg;
885 
887  AVFilterGraph *graph;
888  int ret = 0;
889 
890  fgp = av_mallocz(sizeof(*fgp));
891  if (!fgp) {
892  av_freep(&graph_desc);
893  return AVERROR(ENOMEM);
894  }
895  fg = &fgp->fg;
896 
897  if (pfg) {
898  *pfg = fg;
899  fg->index = -1;
900  } else {
902  if (ret < 0) {
903  av_freep(&graph_desc);
904  av_freep(&fgp);
905  return ret;
906  }
907 
908  fg->index = nb_filtergraphs - 1;
909  }
910 
911  fg->class = &fg_class;
912  fgp->graph_desc = graph_desc;
914  fgp->nb_threads = -1;
915  fgp->sch = sch;
916 
917  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
918 
919  fgp->frame = av_frame_alloc();
920  fgp->frame_enc = av_frame_alloc();
921  if (!fgp->frame || !fgp->frame_enc)
922  return AVERROR(ENOMEM);
923 
924  /* this graph is only used for determining the kinds of inputs
925  * and outputs we have, and is discarded on exit from this function */
926  graph = avfilter_graph_alloc();
927  if (!graph)
928  return AVERROR(ENOMEM);;
929  graph->nb_threads = 1;
930 
931  ret = graph_parse(fg, graph, fgp->graph_desc, &inputs, &outputs,
933  if (ret < 0)
934  goto fail;
935 
936  for (unsigned i = 0; i < graph->nb_filters; i++) {
937  const AVFilter *f = graph->filters[i]->filter;
938  if ((!avfilter_filter_pad_count(f, 0) &&
939  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
940  !strcmp(f->name, "apad")) {
941  fgp->have_sources = 1;
942  break;
943  }
944  }
945 
946  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
947  InputFilter *const ifilter = ifilter_alloc(fg);
948  InputFilterPriv *ifp;
949 
950  if (!ifilter) {
951  ret = AVERROR(ENOMEM);
952  goto fail;
953  }
954 
955  ifp = ifp_from_ifilter(ifilter);
956  ifp->linklabel = cur->name;
957  cur->name = NULL;
958 
959  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
960  cur->pad_idx);
961 
962  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
963  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
964  "currently.\n");
965  ret = AVERROR(ENOSYS);
966  goto fail;
967  }
968 
969  ifilter->name = describe_filter_link(fg, cur, 1);
970  if (!ifilter->name) {
971  ret = AVERROR(ENOMEM);
972  goto fail;
973  }
974  }
975 
976  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
977  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
978  cur->pad_idx);
979  OutputFilter *const ofilter = ofilter_alloc(fg, type);
980 
981  if (!ofilter) {
982  ret = AVERROR(ENOMEM);
983  goto fail;
984  }
985 
986  ofilter->linklabel = cur->name;
987  cur->name = NULL;
988 
989  ofilter->name = describe_filter_link(fg, cur, 0);
990  if (!ofilter->name) {
991  ret = AVERROR(ENOMEM);
992  goto fail;
993  }
994  }
995 
996  if (!fg->nb_outputs) {
997  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
998  ret = AVERROR(ENOSYS);
999  goto fail;
1000  }
1001 
1002  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1003  filter_thread, fgp);
1004  if (ret < 0)
1005  goto fail;
1006  fgp->sch_idx = ret;
1007 
1008 fail:
1011  avfilter_graph_free(&graph);
1012 
1013  if (ret < 0)
1014  return ret;
1015 
1016  return 0;
1017 }
1018 
1020  InputStream *ist,
1021  char *graph_desc,
1022  Scheduler *sch, unsigned sched_idx_enc,
1023  const OutputFilterOptions *opts)
1024 {
1025  const enum AVMediaType type = ist->par->codec_type;
1026  FilterGraph *fg;
1027  FilterGraphPriv *fgp;
1028  int ret;
1029 
1030  ret = fg_create(pfg, graph_desc, sch);
1031  if (ret < 0)
1032  return ret;
1033  fg = *pfg;
1034  fgp = fgp_from_fg(fg);
1035 
1036  fgp->is_simple = 1;
1037 
1038  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1039  av_get_media_type_string(type)[0], opts->name);
1040 
1041  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1042  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1043  "to have exactly 1 input and 1 output. "
1044  "However, it had %d input(s) and %d output(s). Please adjust, "
1045  "or use a complex filtergraph (-filter_complex) instead.\n",
1046  graph_desc, fg->nb_inputs, fg->nb_outputs);
1047  return AVERROR(EINVAL);
1048  }
1049  if (fg->outputs[0]->type != type) {
1050  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1051  "it to %s output stream\n",
1054  return AVERROR(EINVAL);
1055  }
1056 
1057  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1058  if (ret < 0)
1059  return ret;
1060 
1061  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1062  if (ret < 0)
1063  return ret;
1064 
1065  if (opts->nb_threads >= 0)
1066  fgp->nb_threads = opts->nb_threads;
1067 
1068  return 0;
1069 }
1070 
1072 {
1073  FilterGraphPriv *fgp = fgp_from_fg(fg);
1074  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1075  InputStream *ist = NULL;
1076  enum AVMediaType type = ifp->type;
1078  const char *spec;
1079  char *p;
1080  int i, ret;
1081 
1082  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1083  // bind to a standalone decoder
1084  int dec_idx;
1085 
1086  dec_idx = strtol(ifp->linklabel + 4, &p, 0);
1087  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1088  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1089  dec_idx, fgp->graph_desc);
1090  return AVERROR(EINVAL);
1091  }
1092 
1093  if (type == AVMEDIA_TYPE_VIDEO) {
1094  spec = *p == ':' ? p + 1 : p;
1095  ret = view_specifier_parse(&spec, &vs);
1096  if (ret < 0)
1097  return ret;
1098  }
1099 
1100  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1101  if (ret < 0)
1102  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1103  ifilter->name);
1104  return ret;
1105  } else if (ifp->linklabel) {
1107  AVFormatContext *s;
1108  AVStream *st = NULL;
1109  int file_idx;
1110 
1111  // try finding an unbound filtergraph output with this label
1112  for (int i = 0; i < nb_filtergraphs; i++) {
1113  FilterGraph *fg_src = filtergraphs[i];
1114 
1115  if (fg == fg_src)
1116  continue;
1117 
1118  for (int j = 0; j < fg_src->nb_outputs; j++) {
1119  OutputFilter *ofilter = fg_src->outputs[j];
1120 
1121  if (!ofilter->bound && ofilter->linklabel &&
1122  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1123  av_log(fg, AV_LOG_VERBOSE,
1124  "Binding input with label '%s' to filtergraph output %d:%d\n",
1125  ifp->linklabel, i, j);
1126 
1127  ret = ifilter_bind_fg(ifp, fg_src, j);
1128  if (ret < 0)
1129  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1130  ifp->linklabel);
1131  return ret;
1132  }
1133  }
1134  }
1135 
1136  // bind to an explicitly specified demuxer stream
1137  file_idx = strtol(ifp->linklabel, &p, 0);
1138  if (file_idx < 0 || file_idx >= nb_input_files) {
1139  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1140  file_idx, fgp->graph_desc);
1141  return AVERROR(EINVAL);
1142  }
1143  s = input_files[file_idx]->ctx;
1144 
1145  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1146  if (ret < 0) {
1147  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1148  return ret;
1149  }
1150 
1151  if (type == AVMEDIA_TYPE_VIDEO) {
1152  spec = ss.remainder ? ss.remainder : "";
1153  ret = view_specifier_parse(&spec, &vs);
1154  if (ret < 0) {
1156  return ret;
1157  }
1158  }
1159 
1160  for (i = 0; i < s->nb_streams; i++) {
1161  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1162  if (stream_type != type &&
1163  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1164  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1165  continue;
1166  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1167  st = s->streams[i];
1168  break;
1169  }
1170  }
1172  if (!st) {
1173  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1174  "matches no streams.\n", p, fgp->graph_desc);
1175  return AVERROR(EINVAL);
1176  }
1177  ist = input_files[file_idx]->streams[st->index];
1178 
1179  av_log(fg, AV_LOG_VERBOSE,
1180  "Binding input with label '%s' to input stream %d:%d\n",
1181  ifp->linklabel, ist->file->index, ist->index);
1182  } else {
1183  ist = ist_find_unused(type);
1184  if (!ist) {
1185  av_log(fg, AV_LOG_FATAL,
1186  "Cannot find an unused %s input stream to feed the "
1187  "unlabeled input pad %s.\n",
1188  av_get_media_type_string(type), ifilter->name);
1189  return AVERROR(EINVAL);
1190  }
1191 
1192  av_log(fg, AV_LOG_VERBOSE,
1193  "Binding unlabeled input %d to input stream %d:%d\n",
1194  ifp->index, ist->file->index, ist->index);
1195  }
1196  av_assert0(ist);
1197 
1198  ret = ifilter_bind_ist(ifilter, ist, &vs);
1199  if (ret < 0) {
1200  av_log(fg, AV_LOG_ERROR,
1201  "Error binding an input stream to complex filtergraph input %s.\n",
1202  ifilter->name);
1203  return ret;
1204  }
1205 
1206  return 0;
1207 }
1208 
1209 static int bind_inputs(FilterGraph *fg)
1210 {
1211  // bind filtergraph inputs to input streams or other filtergraphs
1212  for (int i = 0; i < fg->nb_inputs; i++) {
1214  int ret;
1215 
1216  if (ifp->bound)
1217  continue;
1218 
1219  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1220  if (ret < 0)
1221  return ret;
1222  }
1223 
1224  return 0;
1225 }
1226 
1228 {
1229  int ret;
1230 
1231  for (int i = 0; i < nb_filtergraphs; i++) {
1233  if (ret < 0)
1234  return ret;
1235  }
1236 
1237  // check that all outputs were bound
1238  for (int i = 0; i < nb_filtergraphs; i++) {
1239  FilterGraph *fg = filtergraphs[i];
1240 
1241  for (int j = 0; j < fg->nb_outputs; j++) {
1242  OutputFilter *output = fg->outputs[j];
1243  if (!output->bound) {
1244  av_log(fg, AV_LOG_FATAL,
1245  "Filter '%s' has output %d (%s) unconnected\n",
1246  output->name, j,
1247  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1248  return AVERROR(EINVAL);
1249  }
1250  }
1251  }
1252 
1253  return 0;
1254 }
1255 
1256 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1257  AVFilterContext **last_filter, int *pad_idx,
1258  const char *filter_name)
1259 {
1260  AVFilterGraph *graph = (*last_filter)->graph;
1262  const AVFilter *trim;
1263  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1264  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1265  int ret = 0;
1266 
1267  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1268  return 0;
1269 
1270  trim = avfilter_get_by_name(name);
1271  if (!trim) {
1272  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1273  "recording time.\n", name);
1274  return AVERROR_FILTER_NOT_FOUND;
1275  }
1276 
1277  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1278  if (!ctx)
1279  return AVERROR(ENOMEM);
1280 
1281  if (duration != INT64_MAX) {
1282  ret = av_opt_set_int(ctx, "durationi", duration,
1284  }
1285  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1286  ret = av_opt_set_int(ctx, "starti", start_time,
1288  }
1289  if (ret < 0) {
1290  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1291  return ret;
1292  }
1293 
1295  if (ret < 0)
1296  return ret;
1297 
1298  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1299  if (ret < 0)
1300  return ret;
1301 
1302  *last_filter = ctx;
1303  *pad_idx = 0;
1304  return 0;
1305 }
1306 
1307 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1308  const char *filter_name, const char *args)
1309 {
1310  AVFilterGraph *graph = (*last_filter)->graph;
1311  const AVFilter *filter = avfilter_get_by_name(filter_name);
1313  int ret;
1314 
1315  if (!filter)
1316  return AVERROR_BUG;
1317 
1319  filter,
1320  filter_name, args, NULL, graph);
1321  if (ret < 0)
1322  return ret;
1323 
1324  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1325  if (ret < 0)
1326  return ret;
1327 
1328  *last_filter = ctx;
1329  *pad_idx = 0;
1330  return 0;
1331 }
1332 
1334  OutputFilter *ofilter, AVFilterInOut *out)
1335 {
1336  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1337  AVFilterContext *last_filter = out->filter_ctx;
1338  AVBPrint bprint;
1339  int pad_idx = out->pad_idx;
1340  int ret;
1341  char name[255];
1342 
1343  snprintf(name, sizeof(name), "out_%s", ofp->name);
1345  avfilter_get_by_name("buffersink"),
1346  name, NULL, NULL, graph);
1347 
1348  if (ret < 0)
1349  return ret;
1350 
1351  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1352  char args[255];
1354  const AVDictionaryEntry *e = NULL;
1355 
1356  snprintf(args, sizeof(args), "%d:%d",
1357  ofp->width, ofp->height);
1358 
1359  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1360  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1361  }
1362 
1363  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1365  name, args, NULL, graph)) < 0)
1366  return ret;
1367  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1368  return ret;
1369 
1370  last_filter = filter;
1371  pad_idx = 0;
1372  }
1373 
1375  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1377  choose_pix_fmts(ofp, &bprint);
1378  choose_color_spaces(ofp, &bprint);
1379  choose_color_ranges(ofp, &bprint);
1380  if (!av_bprint_is_complete(&bprint))
1381  return AVERROR(ENOMEM);
1382 
1383  if (bprint.len) {
1385 
1387  avfilter_get_by_name("format"),
1388  "format", bprint.str, NULL, graph);
1389  av_bprint_finalize(&bprint, NULL);
1390  if (ret < 0)
1391  return ret;
1392  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1393  return ret;
1394 
1395  last_filter = filter;
1396  pad_idx = 0;
1397  }
1398 
1399  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1400  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1401  &last_filter, &pad_idx, name);
1402  if (ret < 0)
1403  return ret;
1404 
1405 
1406  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1407  return ret;
1408 
1409  return 0;
1410 }
1411 
1413  OutputFilter *ofilter, AVFilterInOut *out)
1414 {
1415  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1416  AVFilterContext *last_filter = out->filter_ctx;
1417  int pad_idx = out->pad_idx;
1418  AVBPrint args;
1419  char name[255];
1420  int ret;
1421 
1422  snprintf(name, sizeof(name), "out_%s", ofp->name);
1424  avfilter_get_by_name("abuffersink"),
1425  name, NULL, NULL, graph);
1426  if (ret < 0)
1427  return ret;
1428 
1429 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1430  AVFilterContext *filt_ctx; \
1431  \
1432  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1433  "similarly to -af " filter_name "=%s.\n", arg); \
1434  \
1435  ret = avfilter_graph_create_filter(&filt_ctx, \
1436  avfilter_get_by_name(filter_name), \
1437  filter_name, arg, NULL, graph); \
1438  if (ret < 0) \
1439  goto fail; \
1440  \
1441  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1442  if (ret < 0) \
1443  goto fail; \
1444  \
1445  last_filter = filt_ctx; \
1446  pad_idx = 0; \
1447 } while (0)
1449 
1450  choose_sample_fmts(ofp, &args);
1451  choose_sample_rates(ofp, &args);
1452  choose_channel_layouts(ofp, &args);
1453  if (!av_bprint_is_complete(&args)) {
1454  ret = AVERROR(ENOMEM);
1455  goto fail;
1456  }
1457  if (args.len) {
1459 
1460  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1462  avfilter_get_by_name("aformat"),
1463  name, args.str, NULL, graph);
1464  if (ret < 0)
1465  goto fail;
1466 
1467  ret = avfilter_link(last_filter, pad_idx, format, 0);
1468  if (ret < 0)
1469  goto fail;
1470 
1471  last_filter = format;
1472  pad_idx = 0;
1473  }
1474 
1475  if (ofilter->apad) {
1476  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1477  fgp->have_sources = 1;
1478  }
1479 
1480  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1481  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1482  &last_filter, &pad_idx, name);
1483  if (ret < 0)
1484  goto fail;
1485 
1486  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1487  goto fail;
1488 fail:
1489  av_bprint_finalize(&args, NULL);
1490 
1491  return ret;
1492 }
1493 
1495  OutputFilter *ofilter, AVFilterInOut *out)
1496 {
1497  switch (ofilter->type) {
1498  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1499  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1500  default: av_assert0(0); return 0;
1501  }
1502 }
1503 
1505 {
1506  ifp->sub2video.last_pts = INT64_MIN;
1507  ifp->sub2video.end_pts = INT64_MIN;
1508 
1509  /* sub2video structure has been (re-)initialized.
1510  Mark it as such so that the system will be
1511  initialized with the first received heartbeat. */
1512  ifp->sub2video.initialize = 1;
1513 }
1514 
1516  InputFilter *ifilter, AVFilterInOut *in)
1517 {
1518  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1519 
1520  AVFilterContext *last_filter;
1521  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1522  const AVPixFmtDescriptor *desc;
1523  char name[255];
1524  int ret, pad_idx = 0;
1526  if (!par)
1527  return AVERROR(ENOMEM);
1528 
1529  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1530  sub2video_prepare(ifp);
1531 
1532  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1533  ifp->opts.name);
1534 
1535  ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1536  if (!ifp->filter) {
1537  ret = AVERROR(ENOMEM);
1538  goto fail;
1539  }
1540 
1541  par->format = ifp->format;
1542  par->time_base = ifp->time_base;
1543  par->frame_rate = ifp->opts.framerate;
1544  par->width = ifp->width;
1545  par->height = ifp->height;
1546  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1547  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1548  par->color_space = ifp->color_space;
1549  par->color_range = ifp->color_range;
1550  par->hw_frames_ctx = ifp->hw_frames_ctx;
1551  par->side_data = ifp->side_data;
1552  par->nb_side_data = ifp->nb_side_data;
1553 
1554  ret = av_buffersrc_parameters_set(ifp->filter, par);
1555  if (ret < 0)
1556  goto fail;
1557  av_freep(&par);
1558 
1559  ret = avfilter_init_dict(ifp->filter, NULL);
1560  if (ret < 0)
1561  goto fail;
1562 
1563  last_filter = ifp->filter;
1564 
1566  av_assert0(desc);
1567 
1568  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1569  char crop_buf[64];
1570  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1571  ifp->opts.crop_left, ifp->opts.crop_right,
1572  ifp->opts.crop_top, ifp->opts.crop_bottom,
1573  ifp->opts.crop_left, ifp->opts.crop_top);
1574  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1575  if (ret < 0)
1576  return ret;
1577  }
1578 
1579  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1580  ifp->displaymatrix_applied = 0;
1581  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1582  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1583  int32_t *displaymatrix = ifp->displaymatrix;
1584  double theta;
1585 
1586  theta = get_rotation(displaymatrix);
1587 
1588  if (fabs(theta - 90) < 1.0) {
1589  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1590  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1591  } else if (fabs(theta - 180) < 1.0) {
1592  if (displaymatrix[0] < 0) {
1593  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1594  if (ret < 0)
1595  return ret;
1596  }
1597  if (displaymatrix[4] < 0) {
1598  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1599  }
1600  } else if (fabs(theta - 270) < 1.0) {
1601  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1602  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1603  } else if (fabs(theta) > 1.0) {
1604  char rotate_buf[64];
1605  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1606  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1607  } else if (fabs(theta) < 1.0) {
1608  if (displaymatrix && displaymatrix[4] < 0) {
1609  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1610  }
1611  }
1612  if (ret < 0)
1613  return ret;
1614 
1615  ifp->displaymatrix_applied = 1;
1616  }
1617 
1618  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1619  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1620  &last_filter, &pad_idx, name);
1621  if (ret < 0)
1622  return ret;
1623 
1624  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1625  return ret;
1626  return 0;
1627 fail:
1628  av_freep(&par);
1629 
1630  return ret;
1631 }
1632 
1634  InputFilter *ifilter, AVFilterInOut *in)
1635 {
1636  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1637  AVFilterContext *last_filter;
1638  AVBufferSrcParameters *par;
1639  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1640  AVBPrint args;
1641  char name[255];
1642  int ret, pad_idx = 0;
1643 
1645  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1646  ifp->time_base.num, ifp->time_base.den,
1647  ifp->sample_rate,
1649  if (av_channel_layout_check(&ifp->ch_layout) &&
1651  av_bprintf(&args, ":channel_layout=");
1653  } else
1654  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1655  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1656 
1657  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1658  name, args.str, NULL,
1659  graph)) < 0)
1660  return ret;
1662  if (!par)
1663  return AVERROR(ENOMEM);
1664  par->side_data = ifp->side_data;
1665  par->nb_side_data = ifp->nb_side_data;
1666  ret = av_buffersrc_parameters_set(ifp->filter, par);
1667  av_free(par);
1668  if (ret < 0)
1669  return ret;
1670  last_filter = ifp->filter;
1671 
1672  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1673  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1674  &last_filter, &pad_idx, name);
1675  if (ret < 0)
1676  return ret;
1677 
1678  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1679  return ret;
1680 
1681  return 0;
1682 }
1683 
1685  InputFilter *ifilter, AVFilterInOut *in)
1686 {
1687  switch (ifp_from_ifilter(ifilter)->type) {
1688  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1689  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1690  default: av_assert0(0); return 0;
1691  }
1692 }
1693 
1695 {
1696  for (int i = 0; i < fg->nb_outputs; i++)
1698  for (int i = 0; i < fg->nb_inputs; i++)
1699  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1700  avfilter_graph_free(&fgt->graph);
1701 }
1702 
1704 {
1705  return f->nb_inputs == 0 &&
1706  (!strcmp(f->filter->name, "buffer") ||
1707  !strcmp(f->filter->name, "abuffer"));
1708 }
1709 
1710 static int graph_is_meta(AVFilterGraph *graph)
1711 {
1712  for (unsigned i = 0; i < graph->nb_filters; i++) {
1713  const AVFilterContext *f = graph->filters[i];
1714 
1715  /* in addition to filters flagged as meta, also
1716  * disregard sinks and buffersources (but not other sources,
1717  * since they introduce data we are not aware of)
1718  */
1719  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1720  f->nb_outputs == 0 ||
1722  return 0;
1723  }
1724  return 1;
1725 }
1726 
1727 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1728 
1730 {
1731  FilterGraphPriv *fgp = fgp_from_fg(fg);
1732  AVBufferRef *hw_device;
1733  AVFilterInOut *inputs, *outputs, *cur;
1734  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
1735  int have_input_eof = 0;
1736  const char *graph_desc = fgp->graph_desc;
1737 
1738  cleanup_filtergraph(fg, fgt);
1739  fgt->graph = avfilter_graph_alloc();
1740  if (!fgt->graph)
1741  return AVERROR(ENOMEM);
1742 
1743  if (simple) {
1744  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1745 
1746  if (filter_nbthreads) {
1747  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1748  if (ret < 0)
1749  goto fail;
1750  } else if (fgp->nb_threads >= 0) {
1751  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
1752  if (ret < 0)
1753  return ret;
1754  }
1755 
1756  if (av_dict_count(ofp->sws_opts)) {
1758  &fgt->graph->scale_sws_opts,
1759  '=', ':');
1760  if (ret < 0)
1761  goto fail;
1762  }
1763 
1764  if (av_dict_count(ofp->swr_opts)) {
1765  char *args;
1766  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1767  if (ret < 0)
1768  goto fail;
1769  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1770  av_free(args);
1771  }
1772  } else {
1774  }
1775 
1776  hw_device = hw_device_for_filter();
1777 
1778  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1779  if (ret < 0)
1780  goto fail;
1781 
1782  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1783  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1786  goto fail;
1787  }
1789 
1790  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1791  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1792  if (ret < 0) {
1794  goto fail;
1795  }
1796  }
1798 
1799  if (fgp->disable_conversions)
1801  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1802  goto fail;
1803 
1804  fgp->is_meta = graph_is_meta(fgt->graph);
1805 
1806  /* limit the lists of allowed formats to the ones selected, to
1807  * make sure they stay the same if the filtergraph is reconfigured later */
1808  for (int i = 0; i < fg->nb_outputs; i++) {
1809  const AVFrameSideData *const *sd;
1810  int nb_sd;
1811  OutputFilter *ofilter = fg->outputs[i];
1812  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1813  AVFilterContext *sink = ofp->filter;
1814 
1815  ofp->format = av_buffersink_get_format(sink);
1816 
1817  ofp->width = av_buffersink_get_w(sink);
1818  ofp->height = av_buffersink_get_h(sink);
1821 
1822  // If the timing parameters are not locked yet, get the tentative values
1823  // here but don't lock them. They will only be used if no output frames
1824  // are ever produced.
1825  if (!ofp->tb_out_locked) {
1827  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1828  fr.num > 0 && fr.den > 0)
1829  ofp->fps.framerate = fr;
1830  ofp->tb_out = av_buffersink_get_time_base(sink);
1831  }
1833 
1836  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1837  if (ret < 0)
1838  goto fail;
1840  sd = av_buffersink_get_side_data(sink, &nb_sd);
1841  if (nb_sd)
1842  for (int j = 0; j < nb_sd; j++) {
1844  sd[j], 0);
1845  if (ret < 0) {
1847  goto fail;
1848  }
1849  }
1850  }
1851 
1852  for (int i = 0; i < fg->nb_inputs; i++) {
1854  AVFrame *tmp;
1855  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1856  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1857  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1858  } else {
1859  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
1860  if (ifp->displaymatrix_applied)
1862  }
1864  }
1865  av_frame_free(&tmp);
1866  if (ret < 0)
1867  goto fail;
1868  }
1869  }
1870 
1871  /* send the EOFs for the finished inputs */
1872  for (int i = 0; i < fg->nb_inputs; i++) {
1874  if (fgt->eof_in[i]) {
1876  if (ret < 0)
1877  goto fail;
1878  have_input_eof = 1;
1879  }
1880  }
1881 
1882  if (have_input_eof) {
1883  // make sure the EOF propagates to the end of the graph
1885  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1886  goto fail;
1887  }
1888 
1889  return 0;
1890 fail:
1891  cleanup_filtergraph(fg, fgt);
1892  return ret;
1893 }
1894 
1896 {
1897  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1898  AVFrameSideData *sd;
1899  int ret;
1900 
1901  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
1902  if (ret < 0)
1903  return ret;
1904 
1905  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
1906  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
1907  frame->time_base;
1908 
1909  ifp->format = frame->format;
1910 
1911  ifp->width = frame->width;
1912  ifp->height = frame->height;
1913  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
1914  ifp->color_space = frame->colorspace;
1915  ifp->color_range = frame->color_range;
1916 
1917  ifp->sample_rate = frame->sample_rate;
1918  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
1919  if (ret < 0)
1920  return ret;
1921 
1923  for (int i = 0; i < frame->nb_side_data; i++) {
1924  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
1925 
1926  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL))
1927  continue;
1928 
1930  &ifp->nb_side_data,
1931  frame->side_data[i], 0);
1932  if (ret < 0)
1933  return ret;
1934  }
1935 
1937  if (sd)
1938  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1939  ifp->displaymatrix_present = !!sd;
1940 
1941  /* Copy downmix related side data to InputFilterPriv so it may be propagated
1942  * to the filter chain even though it's not "global", as filters like aresample
1943  * require this information during init and not when remixing a frame */
1945  if (sd) {
1947  &ifp->nb_side_data, sd, 0);
1948  if (ret < 0)
1949  return ret;
1950  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
1951  }
1952  ifp->downmixinfo_present = !!sd;
1953 
1954  return 0;
1955 }
1956 
1958 {
1959  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1960  return fgp->is_simple;
1961 }
1962 
1963 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
1964  double time, const char *target,
1965  const char *command, const char *arg, int all_filters)
1966 {
1967  int ret;
1968 
1969  if (!graph)
1970  return;
1971 
1972  if (time < 0) {
1973  char response[4096];
1974  ret = avfilter_graph_send_command(graph, target, command, arg,
1975  response, sizeof(response),
1976  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1977  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1978  fg->index, ret, response);
1979  } else if (!all_filters) {
1980  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1981  } else {
1982  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
1983  if (ret < 0)
1984  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1985  }
1986 }
1987 
1988 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
1989 {
1990  int nb_requests, nb_requests_max = -1;
1991  int best_input = -1;
1992 
1993  for (int i = 0; i < fg->nb_inputs; i++) {
1994  InputFilter *ifilter = fg->inputs[i];
1995  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1996 
1997  if (fgt->eof_in[i])
1998  continue;
1999 
2000  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2001  if (nb_requests > nb_requests_max) {
2002  nb_requests_max = nb_requests;
2003  best_input = i;
2004  }
2005  }
2006 
2007  av_assert0(best_input >= 0);
2008 
2009  return best_input;
2010 }
2011 
2013 {
2014  OutputFilter *ofilter = &ofp->ofilter;
2015  FPSConvContext *fps = &ofp->fps;
2016  AVRational tb = (AVRational){ 0, 0 };
2017  AVRational fr;
2018  const FrameData *fd;
2019 
2020  fd = frame_data_c(frame);
2021 
2022  // apply -enc_time_base
2023  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2024  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2025  av_log(ofp, AV_LOG_ERROR,
2026  "Demuxing timebase not available - cannot use it for encoding\n");
2027  return AVERROR(EINVAL);
2028  }
2029 
2030  switch (ofp->enc_timebase.num) {
2031  case 0: break;
2032  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2033  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2034  default: tb = ofp->enc_timebase; break;
2035  }
2036 
2037  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2038  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2039  goto finish;
2040  }
2041 
2042  fr = fps->framerate;
2043  if (!fr.num) {
2045  if (fr_sink.num > 0 && fr_sink.den > 0)
2046  fr = fr_sink;
2047  }
2048 
2049  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2050  if (!fr.num && !fps->framerate_max.num) {
2051  fr = (AVRational){25, 1};
2052  av_log(ofp, AV_LOG_WARNING,
2053  "No information "
2054  "about the input framerate is available. Falling "
2055  "back to a default value of 25fps. Use the -r option "
2056  "if you want a different framerate.\n");
2057  }
2058 
2059  if (fps->framerate_max.num &&
2060  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2061  !fr.den))
2062  fr = fps->framerate_max;
2063  }
2064 
2065  if (fr.num > 0) {
2066  if (fps->framerate_supported) {
2067  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2068  fr = fps->framerate_supported[idx];
2069  }
2070  if (fps->framerate_clip) {
2071  av_reduce(&fr.num, &fr.den,
2072  fr.num, fr.den, fps->framerate_clip);
2073  }
2074  }
2075 
2076  if (!(tb.num > 0 && tb.den > 0))
2077  tb = av_inv_q(fr);
2078  if (!(tb.num > 0 && tb.den > 0))
2079  tb = frame->time_base;
2080 
2081  fps->framerate = fr;
2082 finish:
2083  ofp->tb_out = tb;
2084  ofp->tb_out_locked = 1;
2085 
2086  return 0;
2087 }
2088 
2089 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2090  AVRational tb_dst, int64_t start_time)
2091 {
2092  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2093 
2094  AVRational tb = tb_dst;
2095  AVRational filter_tb = frame->time_base;
2096  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2097 
2098  if (frame->pts == AV_NOPTS_VALUE)
2099  goto early_exit;
2100 
2101  tb.den <<= extra_bits;
2102  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2104  float_pts /= 1 << extra_bits;
2105  // when float_pts is not exactly an integer,
2106  // avoid exact midpoints to reduce the chance of rounding differences, this
2107  // can be removed in case the fps code is changed to work with integers
2108  if (float_pts != llrint(float_pts))
2109  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2110 
2111  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2113  frame->time_base = tb_dst;
2114 
2115 early_exit:
2116 
2117  if (debug_ts) {
2118  av_log(logctx, AV_LOG_INFO,
2119  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2120  frame ? av_ts2str(frame->pts) : "NULL",
2121  av_ts2timestr(frame->pts, &tb_dst),
2122  float_pts, tb_dst.num, tb_dst.den);
2123  }
2124 
2125  return float_pts;
2126 }
2127 
2128 /* Convert frame timestamps to the encoder timebase and decide how many times
2129  * should this (and possibly previous) frame be repeated in order to conform to
2130  * desired target framerate (if any).
2131  */
2133  int64_t *nb_frames, int64_t *nb_frames_prev)
2134 {
2135  OutputFilter *ofilter = &ofp->ofilter;
2136  FPSConvContext *fps = &ofp->fps;
2137  double delta0, delta, sync_ipts, duration;
2138 
2139  if (!frame) {
2140  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2141  fps->frames_prev_hist[1],
2142  fps->frames_prev_hist[2]);
2143 
2144  if (!*nb_frames && fps->last_dropped) {
2145  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2146  fps->last_dropped++;
2147  }
2148 
2149  goto finish;
2150  }
2151 
2152  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2153 
2154  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2155  ofp->tb_out, ofp->ts_offset);
2156  /* delta0 is the "drift" between the input frame and
2157  * where it would fall in the output. */
2158  delta0 = sync_ipts - ofp->next_pts;
2159  delta = delta0 + duration;
2160 
2161  // tracks the number of times the PREVIOUS frame should be duplicated,
2162  // mostly for variable framerate (VFR)
2163  *nb_frames_prev = 0;
2164  /* by default, we output a single frame */
2165  *nb_frames = 1;
2166 
2167  if (delta0 < 0 &&
2168  delta > 0 &&
2171  && fps->vsync_method != VSYNC_DROP
2172 #endif
2173  ) {
2174  if (delta0 < -0.6) {
2175  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2176  } else
2177  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2178  sync_ipts = ofp->next_pts;
2179  duration += delta0;
2180  delta0 = 0;
2181  }
2182 
2183  switch (fps->vsync_method) {
2184  case VSYNC_VSCFR:
2185  if (fps->frame_number == 0 && delta0 >= 0.5) {
2186  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2187  delta = duration;
2188  delta0 = 0;
2189  ofp->next_pts = llrint(sync_ipts);
2190  }
2191  case VSYNC_CFR:
2192  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2193  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2194  *nb_frames = 0;
2195  } else if (delta < -1.1)
2196  *nb_frames = 0;
2197  else if (delta > 1.1) {
2198  *nb_frames = llrintf(delta);
2199  if (delta0 > 1.1)
2200  *nb_frames_prev = llrintf(delta0 - 0.6);
2201  }
2202  frame->duration = 1;
2203  break;
2204  case VSYNC_VFR:
2205  if (delta <= -0.6)
2206  *nb_frames = 0;
2207  else if (delta > 0.6)
2208  ofp->next_pts = llrint(sync_ipts);
2209  frame->duration = llrint(duration);
2210  break;
2211 #if FFMPEG_OPT_VSYNC_DROP
2212  case VSYNC_DROP:
2213 #endif
2214  case VSYNC_PASSTHROUGH:
2215  ofp->next_pts = llrint(sync_ipts);
2216  frame->duration = llrint(duration);
2217  break;
2218  default:
2219  av_assert0(0);
2220  }
2221 
2222 finish:
2223  memmove(fps->frames_prev_hist + 1,
2224  fps->frames_prev_hist,
2225  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2226  fps->frames_prev_hist[0] = *nb_frames_prev;
2227 
2228  if (*nb_frames_prev == 0 && fps->last_dropped) {
2229  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2230  av_log(ofp, AV_LOG_VERBOSE,
2231  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2232  fps->frame_number, fps->last_frame->pts);
2233  }
2234  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2235  uint64_t nb_frames_dup;
2236  if (*nb_frames > dts_error_threshold * 30) {
2237  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2238  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2239  *nb_frames = 0;
2240  return;
2241  }
2242  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2243  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2244  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2245  if (nb_frames_dup > fps->dup_warning) {
2246  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2247  fps->dup_warning *= 10;
2248  }
2249  }
2250 
2251  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2252  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2253 }
2254 
2256 {
2258  int ret;
2259 
2260  // we are finished and no frames were ever seen at this output,
2261  // at least initialize the encoder with a dummy frame
2262  if (!fgt->got_frame) {
2263  AVFrame *frame = fgt->frame;
2264  FrameData *fd;
2265 
2266  frame->time_base = ofp->tb_out;
2267  frame->format = ofp->format;
2268 
2269  frame->width = ofp->width;
2270  frame->height = ofp->height;
2271  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2272 
2273  frame->sample_rate = ofp->sample_rate;
2274  if (ofp->ch_layout.nb_channels) {
2275  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2276  if (ret < 0)
2277  return ret;
2278  }
2279  av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
2280  ret = clone_side_data(&frame->side_data, &frame->nb_side_data,
2281  ofp->side_data, ofp->nb_side_data, 0);
2282  if (ret < 0)
2283  return ret;
2284 
2285  fd = frame_data(frame);
2286  if (!fd)
2287  return AVERROR(ENOMEM);
2288 
2289  fd->frame_rate_filter = ofp->fps.framerate;
2290 
2291  av_assert0(!frame->buf[0]);
2292 
2293  av_log(ofp, AV_LOG_WARNING,
2294  "No filtered frames for output stream, trying to "
2295  "initialize anyway.\n");
2296 
2297  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2298  if (ret < 0) {
2300  return ret;
2301  }
2302  }
2303 
2304  fgt->eof_out[ofp->index] = 1;
2305 
2306  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2307  return (ret == AVERROR_EOF) ? 0 : ret;
2308 }
2309 
2311  AVFrame *frame)
2312 {
2314  AVFrame *frame_prev = ofp->fps.last_frame;
2315  enum AVMediaType type = ofp->ofilter.type;
2316 
2317  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2318 
2319  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2320  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2321 
2322  for (int64_t i = 0; i < nb_frames; i++) {
2323  AVFrame *frame_out;
2324  int ret;
2325 
2326  if (type == AVMEDIA_TYPE_VIDEO) {
2327  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2328  frame_prev : frame;
2329  if (!frame_in)
2330  break;
2331 
2332  frame_out = fgp->frame_enc;
2333  ret = av_frame_ref(frame_out, frame_in);
2334  if (ret < 0)
2335  return ret;
2336 
2337  frame_out->pts = ofp->next_pts;
2338 
2339  if (ofp->fps.dropped_keyframe) {
2340  frame_out->flags |= AV_FRAME_FLAG_KEY;
2341  ofp->fps.dropped_keyframe = 0;
2342  }
2343  } else {
2344  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2345  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2347 
2348  frame->time_base = ofp->tb_out;
2349  frame->duration = av_rescale_q(frame->nb_samples,
2350  (AVRational){ 1, frame->sample_rate },
2351  ofp->tb_out);
2352 
2353  ofp->next_pts = frame->pts + frame->duration;
2354 
2355  frame_out = frame;
2356  }
2357 
2358  // send the frame to consumers
2359  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2360  if (ret < 0) {
2361  av_frame_unref(frame_out);
2362 
2363  if (!fgt->eof_out[ofp->index]) {
2364  fgt->eof_out[ofp->index] = 1;
2365  fgp->nb_outputs_done++;
2366  }
2367 
2368  return ret == AVERROR_EOF ? 0 : ret;
2369  }
2370 
2371  if (type == AVMEDIA_TYPE_VIDEO) {
2372  ofp->fps.frame_number++;
2373  ofp->next_pts++;
2374 
2375  if (i == nb_frames_prev && frame)
2376  frame->flags &= ~AV_FRAME_FLAG_KEY;
2377  }
2378 
2379  fgt->got_frame = 1;
2380  }
2381 
2382  if (frame && frame_prev) {
2383  av_frame_unref(frame_prev);
2384  av_frame_move_ref(frame_prev, frame);
2385  }
2386 
2387  if (!frame)
2388  return close_output(ofp, fgt);
2389 
2390  return 0;
2391 }
2392 
2394  AVFrame *frame)
2395 {
2397  AVFilterContext *filter = ofp->filter;
2398  FrameData *fd;
2399  int ret;
2400 
2403  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2404  ret = fg_output_frame(ofp, fgt, NULL);
2405  return (ret < 0) ? ret : 1;
2406  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2407  return 1;
2408  } else if (ret < 0) {
2409  av_log(ofp, AV_LOG_WARNING,
2410  "Error in retrieving a frame from the filtergraph: %s\n",
2411  av_err2str(ret));
2412  return ret;
2413  }
2414 
2415  if (fgt->eof_out[ofp->index]) {
2417  return 0;
2418  }
2419 
2421 
2422  if (debug_ts)
2423  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2424  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2425  frame->time_base.num, frame->time_base.den);
2426 
2427  // Choose the output timebase the first time we get a frame.
2428  if (!ofp->tb_out_locked) {
2429  ret = choose_out_timebase(ofp, frame);
2430  if (ret < 0) {
2431  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2433  return ret;
2434  }
2435  }
2436 
2437  fd = frame_data(frame);
2438  if (!fd) {
2440  return AVERROR(ENOMEM);
2441  }
2442 
2444 
2445  // only use bits_per_raw_sample passed through from the decoder
2446  // if the filtergraph did not touch the frame data
2447  if (!fgp->is_meta)
2448  fd->bits_per_raw_sample = 0;
2449 
2450  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2451  if (!frame->duration) {
2453  if (fr.num > 0 && fr.den > 0)
2454  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2455  }
2456 
2457  fd->frame_rate_filter = ofp->fps.framerate;
2458  }
2459 
2460  ret = fg_output_frame(ofp, fgt, frame);
2462  if (ret < 0)
2463  return ret;
2464 
2465  return 0;
2466 }
2467 
2468 /* retrieve all frames available at filtergraph outputs
2469  * and send them to consumers */
2471  AVFrame *frame)
2472 {
2473  FilterGraphPriv *fgp = fgp_from_fg(fg);
2474  int did_step = 0;
2475 
2476  // graph not configured, just select the input to request
2477  if (!fgt->graph) {
2478  for (int i = 0; i < fg->nb_inputs; i++) {
2480  if (ifp->format < 0 && !fgt->eof_in[i]) {
2481  fgt->next_in = i;
2482  return 0;
2483  }
2484  }
2485 
2486  // This state - graph is not configured, but all inputs are either
2487  // initialized or EOF - should be unreachable because sending EOF to a
2488  // filter without even a fallback format should fail
2489  av_assert0(0);
2490  return AVERROR_BUG;
2491  }
2492 
2493  while (fgp->nb_outputs_done < fg->nb_outputs) {
2494  int ret;
2495 
2497  if (ret == AVERROR(EAGAIN)) {
2498  fgt->next_in = choose_input(fg, fgt);
2499  break;
2500  } else if (ret < 0) {
2501  if (ret == AVERROR_EOF)
2502  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2503  else
2504  av_log(fg, AV_LOG_ERROR,
2505  "Error requesting a frame from the filtergraph: %s\n",
2506  av_err2str(ret));
2507  return ret;
2508  }
2509  fgt->next_in = fg->nb_inputs;
2510 
2511  // return after one iteration, so that scheduler can rate-control us
2512  if (did_step && fgp->have_sources)
2513  return 0;
2514 
2515  /* Reap all buffers present in the buffer sinks */
2516  for (int i = 0; i < fg->nb_outputs; i++) {
2518 
2519  ret = 0;
2520  while (!ret) {
2521  ret = fg_output_step(ofp, fgt, frame);
2522  if (ret < 0)
2523  return ret;
2524  }
2525  }
2526  did_step = 1;
2527  }
2528 
2529  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2530 }
2531 
2533 {
2534  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2535  int64_t pts2;
2536 
2537  /* subtitles seem to be usually muxed ahead of other streams;
2538  if not, subtracting a larger time here is necessary */
2539  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2540 
2541  /* do not send the heartbeat frame if the subtitle is already ahead */
2542  if (pts2 <= ifp->sub2video.last_pts)
2543  return;
2544 
2545  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2546  /* if we have hit the end of the current displayed subpicture,
2547  or if we need to initialize the system, update the
2548  overlayed subpicture and its start/end times */
2549  sub2video_update(ifp, pts2 + 1, NULL);
2550  else
2551  sub2video_push_ref(ifp, pts2);
2552 }
2553 
2554 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2555 {
2556  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2557  int ret;
2558 
2559  if (buffer) {
2560  AVFrame *tmp;
2561 
2562  if (!frame)
2563  return 0;
2564 
2565  tmp = av_frame_alloc();
2566  if (!tmp)
2567  return AVERROR(ENOMEM);
2568 
2570 
2571  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2572  if (ret < 0) {
2573  av_frame_free(&tmp);
2574  return ret;
2575  }
2576 
2577  return 0;
2578  }
2579 
2580  // heartbeat frame
2581  if (frame && !frame->buf[0]) {
2582  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2583  return 0;
2584  }
2585 
2586  if (!frame) {
2587  if (ifp->sub2video.end_pts < INT64_MAX)
2588  sub2video_update(ifp, INT64_MAX, NULL);
2589 
2590  return av_buffersrc_add_frame(ifp->filter, NULL);
2591  }
2592 
2593  ifp->width = frame->width ? frame->width : ifp->width;
2594  ifp->height = frame->height ? frame->height : ifp->height;
2595 
2596  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2597 
2598  return 0;
2599 }
2600 
2601 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2602  int64_t pts, AVRational tb)
2603 {
2604  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2605  int ret;
2606 
2607  if (fgt->eof_in[ifp->index])
2608  return 0;
2609 
2610  fgt->eof_in[ifp->index] = 1;
2611 
2612  if (ifp->filter) {
2613  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2615 
2617  if (ret < 0)
2618  return ret;
2619  } else {
2620  if (ifp->format < 0) {
2621  // the filtergraph was never configured, use the fallback parameters
2622  ifp->format = ifp->opts.fallback->format;
2623  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2624  ifp->width = ifp->opts.fallback->width;
2625  ifp->height = ifp->opts.fallback->height;
2627  ifp->color_space = ifp->opts.fallback->colorspace;
2628  ifp->color_range = ifp->opts.fallback->color_range;
2629  ifp->time_base = ifp->opts.fallback->time_base;
2630 
2632  &ifp->opts.fallback->ch_layout);
2633  if (ret < 0)
2634  return ret;
2635 
2637  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
2638  ifp->opts.fallback->side_data,
2639  ifp->opts.fallback->nb_side_data, 0);
2640  if (ret < 0)
2641  return ret;
2642 
2643  if (ifilter_has_all_input_formats(ifilter->graph)) {
2644  ret = configure_filtergraph(ifilter->graph, fgt);
2645  if (ret < 0) {
2646  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2647  return ret;
2648  }
2649  }
2650  }
2651 
2652  if (ifp->format < 0) {
2653  av_log(ifilter->graph, AV_LOG_ERROR,
2654  "Cannot determine format of input %s after EOF\n",
2655  ifp->opts.name);
2656  return AVERROR_INVALIDDATA;
2657  }
2658  }
2659 
2660  return 0;
2661 }
2662 
2664  VIDEO_CHANGED = (1 << 0),
2665  AUDIO_CHANGED = (1 << 1),
2666  MATRIX_CHANGED = (1 << 2),
2667  DOWNMIX_CHANGED = (1 << 3),
2668  HWACCEL_CHANGED = (1 << 4)
2669 };
2670 
2671 static const char *unknown_if_null(const char *str)
2672 {
2673  return str ? str : "unknown";
2674 }
2675 
2677  InputFilter *ifilter, AVFrame *frame)
2678 {
2679  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2680  FrameData *fd;
2681  AVFrameSideData *sd;
2682  int need_reinit = 0, ret;
2683 
2684  /* determine if the parameters for this input changed */
2685  switch (ifp->type) {
2686  case AVMEDIA_TYPE_AUDIO:
2687  if (ifp->format != frame->format ||
2688  ifp->sample_rate != frame->sample_rate ||
2689  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2690  need_reinit |= AUDIO_CHANGED;
2691  break;
2692  case AVMEDIA_TYPE_VIDEO:
2693  if (ifp->format != frame->format ||
2694  ifp->width != frame->width ||
2695  ifp->height != frame->height ||
2696  ifp->color_space != frame->colorspace ||
2697  ifp->color_range != frame->color_range)
2698  need_reinit |= VIDEO_CHANGED;
2699  break;
2700  }
2701 
2703  if (!ifp->displaymatrix_present ||
2704  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2705  need_reinit |= MATRIX_CHANGED;
2706  } else if (ifp->displaymatrix_present)
2707  need_reinit |= MATRIX_CHANGED;
2708 
2710  if (!ifp->downmixinfo_present ||
2711  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
2712  need_reinit |= DOWNMIX_CHANGED;
2713  } else if (ifp->downmixinfo_present)
2714  need_reinit |= DOWNMIX_CHANGED;
2715 
2716  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
2717  ifp->nb_dropped++;
2718  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
2720  return 0;
2721  }
2722 
2723  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2724  need_reinit = 0;
2725 
2726  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2727  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2728  need_reinit |= HWACCEL_CHANGED;
2729 
2730  if (need_reinit) {
2732  if (ret < 0)
2733  return ret;
2734  }
2735 
2736  /* (re)init the graph if possible, otherwise buffer the frame and return */
2737  if (need_reinit || !fgt->graph) {
2738  AVFrame *tmp = av_frame_alloc();
2739 
2740  if (!tmp)
2741  return AVERROR(ENOMEM);
2742 
2743  if (!ifilter_has_all_input_formats(fg)) {
2745 
2746  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2747  if (ret < 0)
2748  av_frame_free(&tmp);
2749 
2750  return ret;
2751  }
2752 
2753  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2754  av_frame_free(&tmp);
2755  if (ret < 0)
2756  return ret;
2757 
2758  if (fgt->graph) {
2759  AVBPrint reason;
2761  if (need_reinit & AUDIO_CHANGED) {
2762  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2763  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2764  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2765  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2766  }
2767  if (need_reinit & VIDEO_CHANGED) {
2768  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2769  const char *color_space_name = av_color_space_name(frame->colorspace);
2770  const char *color_range_name = av_color_range_name(frame->color_range);
2771  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2772  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2773  unknown_if_null(color_space_name), frame->width, frame->height);
2774  }
2775  if (need_reinit & MATRIX_CHANGED)
2776  av_bprintf(&reason, "display matrix changed, ");
2777  if (need_reinit & DOWNMIX_CHANGED)
2778  av_bprintf(&reason, "downmix medatata changed, ");
2779  if (need_reinit & HWACCEL_CHANGED)
2780  av_bprintf(&reason, "hwaccel changed, ");
2781  if (reason.len > 1)
2782  reason.str[reason.len - 2] = '\0'; // remove last comma
2783  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2784  }
2785 
2786  ret = configure_filtergraph(fg, fgt);
2787  if (ret < 0) {
2788  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2789  return ret;
2790  }
2791  }
2792 
2793  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2794  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2795  frame->time_base = ifp->time_base;
2796 
2797  if (ifp->displaymatrix_applied)
2799 
2800  fd = frame_data(frame);
2801  if (!fd)
2802  return AVERROR(ENOMEM);
2804 
2807  if (ret < 0) {
2809  if (ret != AVERROR_EOF)
2810  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2811  return ret;
2812  }
2813 
2814  return 0;
2815 }
2816 
2817 static void fg_thread_set_name(const FilterGraph *fg)
2818 {
2819  char name[16];
2820  if (filtergraph_is_simple(fg)) {
2821  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2822  snprintf(name, sizeof(name), "%cf%s",
2824  ofp->name);
2825  } else {
2826  snprintf(name, sizeof(name), "fc%d", fg->index);
2827  }
2828 
2830 }
2831 
2833 {
2834  if (fgt->frame_queue_out) {
2835  AVFrame *frame;
2836  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2837  av_frame_free(&frame);
2839  }
2840 
2841  av_frame_free(&fgt->frame);
2842  av_freep(&fgt->eof_in);
2843  av_freep(&fgt->eof_out);
2844 
2845  avfilter_graph_free(&fgt->graph);
2846 
2847  memset(fgt, 0, sizeof(*fgt));
2848 }
2849 
2850 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2851 {
2852  memset(fgt, 0, sizeof(*fgt));
2853 
2854  fgt->frame = av_frame_alloc();
2855  if (!fgt->frame)
2856  goto fail;
2857 
2858  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2859  if (!fgt->eof_in)
2860  goto fail;
2861 
2862  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2863  if (!fgt->eof_out)
2864  goto fail;
2865 
2867  if (!fgt->frame_queue_out)
2868  goto fail;
2869 
2870  return 0;
2871 
2872 fail:
2873  fg_thread_uninit(fgt);
2874  return AVERROR(ENOMEM);
2875 }
2876 
2877 static int filter_thread(void *arg)
2878 {
2879  FilterGraphPriv *fgp = arg;
2880  FilterGraph *fg = &fgp->fg;
2881 
2882  FilterGraphThread fgt;
2883  int ret = 0, input_status = 0;
2884 
2885  ret = fg_thread_init(&fgt, fg);
2886  if (ret < 0)
2887  goto finish;
2888 
2889  fg_thread_set_name(fg);
2890 
2891  // if we have all input parameters the graph can now be configured
2893  ret = configure_filtergraph(fg, &fgt);
2894  if (ret < 0) {
2895  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2896  av_err2str(ret));
2897  goto finish;
2898  }
2899  }
2900 
2901  while (1) {
2902  InputFilter *ifilter;
2903  InputFilterPriv *ifp = NULL;
2904  enum FrameOpaque o;
2905  unsigned input_idx = fgt.next_in;
2906 
2907  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2908  &input_idx, fgt.frame);
2909  if (input_status == AVERROR_EOF) {
2910  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2911  break;
2912  } else if (input_status == AVERROR(EAGAIN)) {
2913  // should only happen when we didn't request any input
2914  av_assert0(input_idx == fg->nb_inputs);
2915  goto read_frames;
2916  }
2917  av_assert0(input_status >= 0);
2918 
2919  o = (intptr_t)fgt.frame->opaque;
2920 
2921  o = (intptr_t)fgt.frame->opaque;
2922 
2923  // message on the control stream
2924  if (input_idx == fg->nb_inputs) {
2925  FilterCommand *fc;
2926 
2927  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2928 
2929  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2930  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2931  fc->all_filters);
2932  av_frame_unref(fgt.frame);
2933  continue;
2934  }
2935 
2936  // we received an input frame or EOF
2937  ifilter = fg->inputs[input_idx];
2938  ifp = ifp_from_ifilter(ifilter);
2939 
2940  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2941  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2942  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2943  !fgt.graph);
2944  } else if (fgt.frame->buf[0]) {
2945  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2946  } else {
2948  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2949  }
2950  av_frame_unref(fgt.frame);
2951  if (ret == AVERROR_EOF) {
2952  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2953  input_idx);
2954  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2955  continue;
2956  }
2957  if (ret < 0)
2958  goto finish;
2959 
2960 read_frames:
2961  // retrieve all newly avalable frames
2962  ret = read_frames(fg, &fgt, fgt.frame);
2963  if (ret == AVERROR_EOF) {
2964  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
2965  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
2966  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
2967  break;
2968  } else if (ret < 0) {
2969  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
2970  av_err2str(ret));
2971  goto finish;
2972  }
2973  }
2974 
2975  for (unsigned i = 0; i < fg->nb_outputs; i++) {
2977 
2978  if (fgt.eof_out[i] || !fgt.graph)
2979  continue;
2980 
2981  ret = fg_output_frame(ofp, &fgt, NULL);
2982  if (ret < 0)
2983  goto finish;
2984  }
2985 
2986 finish:
2987 
2989  print_filtergraph(fg, fgt.graph);
2990 
2991  // EOF is normal termination
2992  if (ret == AVERROR_EOF)
2993  ret = 0;
2994 
2995  fg_thread_uninit(&fgt);
2996 
2997  return ret;
2998 }
2999 
3000 void fg_send_command(FilterGraph *fg, double time, const char *target,
3001  const char *command, const char *arg, int all_filters)
3002 {
3003  FilterGraphPriv *fgp = fgp_from_fg(fg);
3004  AVBufferRef *buf;
3005  FilterCommand *fc;
3006 
3007  fc = av_mallocz(sizeof(*fc));
3008  if (!fc)
3009  return;
3010 
3011  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3012  if (!buf) {
3013  av_freep(&fc);
3014  return;
3015  }
3016 
3017  fc->target = av_strdup(target);
3018  fc->command = av_strdup(command);
3019  fc->arg = av_strdup(arg);
3020  if (!fc->target || !fc->command || !fc->arg) {
3021  av_buffer_unref(&buf);
3022  return;
3023  }
3024 
3025  fc->time = time;
3026  fc->all_filters = all_filters;
3027 
3028  fgp->frame->buf[0] = buf;
3029  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3030 
3031  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3032 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.h:107
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2075
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1684
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:60
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:462
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:661
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:354
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:628
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:360
av_clip
#define av_clip
Definition: common.h:100
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.h:99
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2460
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:358
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:244
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.h:185
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:106
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2080
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1988
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1479
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.h:61
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:254
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.h:65
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:62
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:608
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.h:118
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:982
FrameData
Definition: ffmpeg.h:658
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1963
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.h:139
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:285
OutputFilter::apad
char * apad
Definition: ffmpeg.h:368
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:661
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.h:89
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:380
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:142
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:283
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:608
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1895
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1011
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:458
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2668
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1011
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.h:120
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1494
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:72
AVSubtitleRect
Definition: avcodec.h:2048
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2079
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1752
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:810
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.h:159
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:548
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:672
InputFile::index
int index
Definition: ffmpeg.h:474
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:742
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.h:44
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.h:51
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:615
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2310
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:185
FilterGraph::index
int index
Definition: ffmpeg.h:378
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.h:117
data
const char data[16]
Definition: mxf.c:149
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.h:163
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.h:222
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1694
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:381
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2664
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:128
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:654
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:246
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:262
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1412
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:587
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2393
FilterGraphPriv
Definition: ffmpeg_filter.h:40
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:65
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1729
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.h:181
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1002
InputStream
Definition: ffmpeg.h:437
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:303
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:270
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:291
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3717
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.h:175
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2832
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:303
fail
#define fail()
Definition: checkasm.h:196
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.h:183
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:142
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:961
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.h:195
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:89
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:428
val
static double val(void *priv, double ch)
Definition: aeval.c:77
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.h:178
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:753
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1515
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1710
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:52
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:151
FrameData::tb
AVRational tb
Definition: ffmpeg.h:668
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.h:207
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.h:190
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.h:164
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:365
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:271
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:821
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2532
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.h:196
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.h:219
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:671
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.h:123
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2601
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.h:82
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1071
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:334
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:782
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:918
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:78
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:491
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.h:63
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:2667
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.h:92
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:382
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:452
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:117
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:546
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:366
InputFilter
Definition: ffmpeg.h:352
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.h:54
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:493
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:300
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:80
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2081
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2850
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:273
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:282
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:353
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:279
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.h:224
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:867
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.h:213
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.h:189
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:284
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3657
AVFormatContext
Format I/O context.
Definition: avformat.h:1265
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:639
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:768
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:361
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1295
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.h:218
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.h:114
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:575
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.h:191
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:850
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.h:161
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:445
tmp
static uint8_t tmp[20]
Definition: aes_ctr.c:47
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.h:129
Decoder
Definition: ffmpeg.h:423
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:861
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:299
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:919
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1019
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.h:96
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:584
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:725
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.h:205
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:466
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2255
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:57
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.h:66
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:675
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1426
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:942
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1226
AVFilterGraph
Definition: avfilter.h:582
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.h:133
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.h:113
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:716
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:383
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.h:147
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.h:212
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:453
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:105
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.h:41
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.h:58
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.h:176
FilterGraph
Definition: ffmpeg.h:376
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:931
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1497
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:287
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:739
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:273
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:587
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1957
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:263
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2877
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.h:127
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:49
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.h:131
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.h:216
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:50
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:736
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.h:229
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.h:203
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:183
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.h:115
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2666
FilterCommand::time
double time
Definition: ffmpeg_filter.c:74
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.h:143
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.h:130
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1343
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:527
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.h:52
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:457
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2078
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:66
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.h:56
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1469
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.h:166
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:727
InputFilterPriv::width
int width
Definition: ffmpeg_filter.h:112
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:500
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1703
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1227
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2665
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2395
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2671
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:288
decoders
Decoder ** decoders
Definition: ffmpeg.c:114
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.h:180
nb_decoders
int nb_decoders
Definition: ffmpeg.c:115
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:370
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2470
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2031
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2676
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:954
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:367
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.h:193
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:75
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.h:171
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.h:155
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:359
FPSConvContext
Definition: ffmpeg_filter.h:152
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
InputFilterPriv::index
int index
Definition: ffmpeg_filter.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:673
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:131
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3000
downmix_info.h
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.h:46
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:293
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:204
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1633
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:673
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.h:169
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:79
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:476
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:525
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:383
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:266
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:607
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:676
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:111
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:308
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.h:192
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:919
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2505
AVFilter
Filter definition.
Definition: avfilter.h:211
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2132
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:881
mid_pred
#define mid_pred
Definition: mathops.h:96
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:91
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:745
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:354
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.h:153
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:490
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1307
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.h:223
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:902
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2663
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:507
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:987
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.h:208
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:482
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:751
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:265
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:199
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:440
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.h:105
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:913
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:441
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.h:125
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:917
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.h:102
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.h:49
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:704
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1256
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:264
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:178
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:745
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2012
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.h:214
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.h:226
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:313
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:111
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.h:122
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:943
AVFilterContext
An instance of a filter.
Definition: avfilter.h:269
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:377
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:357
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.h:106
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:449
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2554
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.h:83
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1333
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:129
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:315
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.h:215
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.h:72
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2089
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:373
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:82
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:443
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2439
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:70
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:60
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:874
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:297
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:426
InputFilterPriv::format
int format
Definition: ffmpeg_filter.h:110
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.h:140
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:112
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:158
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.h:188
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1313
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.h:85
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.h:220
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.h:134
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1209
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:585
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:272
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.h:77
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:715
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:463
ffmpeg_filter.h
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.h:200
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:731
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.h:189
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.h:168
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2817
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:165
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1504
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2077
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:71
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:69
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.h:112
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3261
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:372
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:280
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.h:170