FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/mem.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
40 
41 // FIXME private header, used for mid_pred()
42 #include "libavcodec/mathops.h"
43 
44 typedef struct FilterGraphPriv {
46 
47  // name used for logging
48  char log_name[32];
49 
50  int is_simple;
51  // true when the filtergraph contains only meta filters
52  // that do not modify the frame data
53  int is_meta;
54  // source filters are present in the graph
57 
58  unsigned nb_outputs_done;
59 
60  const char *graph_desc;
61 
62  char *nb_threads;
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  int index;
110 
112 
113  // used to hold submitted input
115 
116  /* for filters that are not yet bound to an input stream,
117  * this stores the input linklabel, if any */
118  uint8_t *linklabel;
119 
120  // filter data type
122  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
123  // same as type otherwise
125 
126  int eof;
127  int bound;
128 
129  // parameters configured for this input
130  int format;
131 
132  int width, height;
136 
139 
141 
143 
145 
149 
150  struct {
151  AVFrame *frame;
152 
155 
156  ///< marks if sub2video_update should force an initialization
157  unsigned int initialize;
158  } sub2video;
160 
162 {
163  return (InputFilterPriv*)ifilter;
164 }
165 
166 typedef struct FPSConvContext {
168  /* number of frames emitted by the video-encoding sync code */
170  /* history of nb_frames_prev, i.e. the number of times the
171  * previous frame was duplicated by vsync code in recent
172  * do_video_out() calls */
174 
175  uint64_t dup_warning;
176 
179 
181 
187 
188 typedef struct OutputFilterPriv {
190 
191  int index;
192 
193  void *log_parent;
194  char log_name[32];
195 
196  char *name;
197 
199 
200  /* desired output stream properties */
201  int format;
202  int width, height;
207 
208  // time base in which the output is sent to our downstream
209  // does not need to match the filtersink's timebase
211  // at least one frame with the above timebase was sent
212  // to our downstream, so it cannot change anymore
214 
216 
219 
220  // those are only set if no format is specified and the encoder gives us multiple options
221  // They point directly to the relevant lists of the encoder.
222  const int *formats;
224  const int *sample_rates;
227 
231  // offset for output timestamps, in AV_TIME_BASE_Q
235 
236  unsigned flags;
238 
240 {
241  return (OutputFilterPriv*)ofilter;
242 }
243 
244 typedef struct FilterCommand {
245  char *target;
246  char *command;
247  char *arg;
248 
249  double time;
251 } FilterCommand;
252 
253 static void filter_command_free(void *opaque, uint8_t *data)
254 {
256 
257  av_freep(&fc->target);
258  av_freep(&fc->command);
259  av_freep(&fc->arg);
260 
261  av_free(data);
262 }
263 
265 {
266  AVFrame *frame = ifp->sub2video.frame;
267  int ret;
268 
270 
271  frame->width = ifp->width;
272  frame->height = ifp->height;
273  frame->format = ifp->format;
274  frame->colorspace = ifp->color_space;
275  frame->color_range = ifp->color_range;
276 
278  if (ret < 0)
279  return ret;
280 
281  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
282 
283  return 0;
284 }
285 
286 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
287  AVSubtitleRect *r)
288 {
289  uint32_t *pal, *dst2;
290  uint8_t *src, *src2;
291  int x, y;
292 
293  if (r->type != SUBTITLE_BITMAP) {
294  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
295  return;
296  }
297  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
298  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
299  r->x, r->y, r->w, r->h, w, h
300  );
301  return;
302  }
303 
304  dst += r->y * dst_linesize + r->x * 4;
305  src = r->data[0];
306  pal = (uint32_t *)r->data[1];
307  for (y = 0; y < r->h; y++) {
308  dst2 = (uint32_t *)dst;
309  src2 = src;
310  for (x = 0; x < r->w; x++)
311  *(dst2++) = pal[*(src2++)];
312  dst += dst_linesize;
313  src += r->linesize[0];
314  }
315 }
316 
318 {
319  AVFrame *frame = ifp->sub2video.frame;
320  int ret;
321 
322  av_assert1(frame->data[0]);
323  ifp->sub2video.last_pts = frame->pts = pts;
327  if (ret != AVERROR_EOF && ret < 0)
328  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
329  av_err2str(ret));
330 }
331 
332 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
333  const AVSubtitle *sub)
334 {
335  AVFrame *frame = ifp->sub2video.frame;
336  int8_t *dst;
337  int dst_linesize;
338  int num_rects;
339  int64_t pts, end_pts;
340 
341  if (sub) {
342  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
343  AV_TIME_BASE_Q, ifp->time_base);
344  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
345  AV_TIME_BASE_Q, ifp->time_base);
346  num_rects = sub->num_rects;
347  } else {
348  /* If we are initializing the system, utilize current heartbeat
349  PTS as the start time, and show until the following subpicture
350  is received. Otherwise, utilize the previous subpicture's end time
351  as the fall-back value. */
352  pts = ifp->sub2video.initialize ?
353  heartbeat_pts : ifp->sub2video.end_pts;
354  end_pts = INT64_MAX;
355  num_rects = 0;
356  }
357  if (sub2video_get_blank_frame(ifp) < 0) {
359  "Impossible to get a blank canvas.\n");
360  return;
361  }
362  dst = frame->data [0];
363  dst_linesize = frame->linesize[0];
364  for (int i = 0; i < num_rects; i++)
365  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
366  sub2video_push_ref(ifp, pts);
367  ifp->sub2video.end_pts = end_pts;
368  ifp->sub2video.initialize = 0;
369 }
370 
371 /* Define a function for appending a list of allowed formats
372  * to an AVBPrint. If nonempty, the list will have a header. */
373 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
374 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
375 { \
376  if (ofp->var == none && !ofp->supported_list) \
377  return; \
378  av_bprintf(bprint, #name "="); \
379  if (ofp->var != none) { \
380  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
381  } else { \
382  const type *p; \
383  \
384  for (p = ofp->supported_list; *p != none; p++) { \
385  av_bprintf(bprint, printf_format "|", get_name(*p)); \
386  } \
387  if (bprint->len > 0) \
388  bprint->str[--bprint->len] = '\0'; \
389  } \
390  av_bprint_chars(bprint, ':', 1); \
391 }
392 
395 
398 
400  "%d", )
401 
402 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
404 
405 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
407 
408 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
409 {
410  if (av_channel_layout_check(&ofp->ch_layout)) {
411  av_bprintf(bprint, "channel_layouts=");
412  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
413  } else if (ofp->ch_layouts) {
414  const AVChannelLayout *p;
415 
416  av_bprintf(bprint, "channel_layouts=");
417  for (p = ofp->ch_layouts; p->nb_channels; p++) {
419  av_bprintf(bprint, "|");
420  }
421  if (bprint->len > 0)
422  bprint->str[--bprint->len] = '\0';
423  } else
424  return;
425  av_bprint_chars(bprint, ':', 1);
426 }
427 
428 static int read_binary(const char *path, uint8_t **data, int *len)
429 {
430  AVIOContext *io = NULL;
431  int64_t fsize;
432  int ret;
433 
434  *data = NULL;
435  *len = 0;
436 
437  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
438  if (ret < 0) {
439  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
440  path, av_err2str(ret));
441  return ret;
442  }
443 
444  fsize = avio_size(io);
445  if (fsize < 0 || fsize > INT_MAX) {
446  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
447  ret = AVERROR(EIO);
448  goto fail;
449  }
450 
451  *data = av_malloc(fsize);
452  if (!*data) {
453  ret = AVERROR(ENOMEM);
454  goto fail;
455  }
456 
457  ret = avio_read(io, *data, fsize);
458  if (ret != fsize) {
459  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
460  ret = ret < 0 ? ret : AVERROR(EIO);
461  goto fail;
462  }
463 
464  *len = fsize;
465 
466  ret = 0;
467 fail:
468  avio_close(io);
469  if (ret < 0) {
470  av_freep(data);
471  *len = 0;
472  }
473  return ret;
474 }
475 
476 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
477 {
478  const AVOption *o = NULL;
479  int ret;
480 
482  if (ret >= 0)
483  return 0;
484 
485  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
487  if (!o)
488  goto err_apply;
489 
490  // key is a valid option name prefixed with '/'
491  // interpret value as a path from which to load the actual option value
492  key++;
493 
494  if (o->type == AV_OPT_TYPE_BINARY) {
495  uint8_t *data;
496  int len;
497 
498  ret = read_binary(val, &data, &len);
499  if (ret < 0)
500  goto err_load;
501 
503  av_freep(&data);
504  } else {
505  char *data = file_read(val);
506  if (!data) {
507  ret = AVERROR(EIO);
508  goto err_load;
509  }
510 
512  av_freep(&data);
513  }
514  if (ret < 0)
515  goto err_apply;
516 
517  return 0;
518 
519 err_apply:
521  "Error applying option '%s' to filter '%s': %s\n",
522  key, f->filter->name, av_err2str(ret));
523  return ret;
524 err_load:
526  "Error loading value for option '%s' from file '%s'\n",
527  key, val);
528  return ret;
529 }
530 
532 {
533  for (size_t i = 0; i < seg->nb_chains; i++) {
534  AVFilterChain *ch = seg->chains[i];
535 
536  for (size_t j = 0; j < ch->nb_filters; j++) {
537  AVFilterParams *p = ch->filters[j];
538  const AVDictionaryEntry *e = NULL;
539 
540  av_assert0(p->filter);
541 
542  while ((e = av_dict_iterate(p->opts, e))) {
543  int ret = filter_opt_apply(p->filter, e->key, e->value);
544  if (ret < 0)
545  return ret;
546  }
547 
548  av_dict_free(&p->opts);
549  }
550  }
551 
552  return 0;
553 }
554 
555 static int graph_parse(AVFilterGraph *graph, const char *desc,
557  AVBufferRef *hw_device)
558 {
560  int ret;
561 
562  *inputs = NULL;
563  *outputs = NULL;
564 
565  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
566  if (ret < 0)
567  return ret;
568 
570  if (ret < 0)
571  goto fail;
572 
573  if (hw_device) {
574  for (int i = 0; i < graph->nb_filters; i++) {
575  AVFilterContext *f = graph->filters[i];
576 
577  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
578  continue;
579  f->hw_device_ctx = av_buffer_ref(hw_device);
580  if (!f->hw_device_ctx) {
581  ret = AVERROR(ENOMEM);
582  goto fail;
583  }
584  }
585  }
586 
587  ret = graph_opts_apply(seg);
588  if (ret < 0)
589  goto fail;
590 
592 
593 fail:
595  return ret;
596 }
597 
598 // Filters can be configured only if the formats of all inputs are known.
600 {
601  for (int i = 0; i < fg->nb_inputs; i++) {
603  if (ifp->format < 0)
604  return 0;
605  }
606  return 1;
607 }
608 
609 static int filter_thread(void *arg);
610 
611 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
612 {
613  AVFilterContext *ctx = inout->filter_ctx;
614  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
615  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
616 
617  if (nb_pads > 1)
618  return av_strdup(ctx->filter->name);
619  return av_asprintf("%s:%s", ctx->filter->name,
620  avfilter_pad_get_name(pads, inout->pad_idx));
621 }
622 
623 static const char *ofilter_item_name(void *obj)
624 {
625  OutputFilterPriv *ofp = obj;
626  return ofp->log_name;
627 }
628 
629 static const AVClass ofilter_class = {
630  .class_name = "OutputFilter",
631  .version = LIBAVUTIL_VERSION_INT,
632  .item_name = ofilter_item_name,
633  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
634  .category = AV_CLASS_CATEGORY_FILTER,
635 };
636 
638 {
639  OutputFilterPriv *ofp;
640  OutputFilter *ofilter;
641 
642  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
643  if (!ofp)
644  return NULL;
645 
646  ofilter = &ofp->ofilter;
647  ofilter->class = &ofilter_class;
648  ofp->log_parent = fg;
649  ofilter->graph = fg;
650  ofilter->type = type;
651  ofp->format = -1;
654  ofp->index = fg->nb_outputs - 1;
655 
656  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
658 
659  return ofilter;
660 }
661 
662 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
663 {
664  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
665  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
666  int ret, dec_idx;
667 
668  av_assert0(!ifp->bound);
669  ifp->bound = 1;
670 
671  if (ifp->type != ist->par->codec_type &&
673  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
675  return AVERROR(EINVAL);
676  }
677 
678  ifp->type_src = ist->st->codecpar->codec_type;
679 
680  ifp->opts.fallback = av_frame_alloc();
681  if (!ifp->opts.fallback)
682  return AVERROR(ENOMEM);
683 
684  dec_idx = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
685  &ifp->opts);
686  if (dec_idx < 0)
687  return dec_idx;
688 
689  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
690  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
691  if (ret < 0)
692  return ret;
693 
694  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
695  ifp->sub2video.frame = av_frame_alloc();
696  if (!ifp->sub2video.frame)
697  return AVERROR(ENOMEM);
698 
699  ifp->width = ifp->opts.sub2video_width;
700  ifp->height = ifp->opts.sub2video_height;
701 
702  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
703  palettes for all rectangles are identical or compatible */
704  ifp->format = AV_PIX_FMT_RGB32;
705 
706  ifp->time_base = AV_TIME_BASE_Q;
707 
708  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
709  ifp->width, ifp->height);
710  }
711 
712  return 0;
713 }
714 
716 {
718  int ret, dec_idx;
719 
720  av_assert0(!ifp->bound);
721  ifp->bound = 1;
722 
723  if (ifp->type != dec->type) {
724  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
726  return AVERROR(EINVAL);
727  }
728 
729  ifp->type_src = ifp->type;
730 
731  dec_idx = dec_filter_add(dec, &ifp->ifilter, &ifp->opts);
732  if (dec_idx < 0)
733  return dec_idx;
734 
735  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
736  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
737  if (ret < 0)
738  return ret;
739 
740  return 0;
741 }
742 
743 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
744  const AVChannelLayout *layout_requested)
745 {
746  int i, err;
747 
748  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
749  /* Pass the layout through for all orders but UNSPEC */
750  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
751  if (err < 0)
752  return err;
753  return 0;
754  }
755 
756  /* Requested layout is of order UNSPEC */
757  if (!layouts_allowed) {
758  /* Use the default native layout for the requested amount of channels when the
759  encoder doesn't have a list of supported layouts */
760  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
761  return 0;
762  }
763  /* Encoder has a list of supported layouts. Pick the first layout in it with the
764  same amount of channels as the requested layout */
765  for (i = 0; layouts_allowed[i].nb_channels; i++) {
766  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
767  break;
768  }
769  if (layouts_allowed[i].nb_channels) {
770  /* Use it if one is found */
771  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
772  if (err < 0)
773  return err;
774  return 0;
775  }
776  /* If no layout for the amount of channels requested was found, use the default
777  native layout for it. */
778  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
779 
780  return 0;
781 }
782 
784  unsigned sched_idx_enc,
785  const OutputFilterOptions *opts)
786 {
787  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
788  FilterGraph *fg = ofilter->graph;
789  FilterGraphPriv *fgp = fgp_from_fg(fg);
790  int ret;
791 
792  av_assert0(!ofilter->bound);
793  av_assert0(ofilter->type == ost->type);
794 
795  ofilter->bound = 1;
796  av_freep(&ofilter->linklabel);
797 
798  ofp->flags = opts->flags;
799  ofp->ts_offset = opts->ts_offset;
800  ofp->enc_timebase = opts->output_tb;
801 
802  ofp->trim_start_us = opts->trim_start_us;
803  ofp->trim_duration_us = opts->trim_duration_us;
804 
805  ofp->name = av_strdup(opts->name);
806  if (!ofp->name)
807  return AVERROR(EINVAL);
808 
809  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
810  if (ret < 0)
811  return ret;
812 
813  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
814  if (ret < 0)
815  return ret;
816 
817  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
818  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
819 
820  if (fgp->is_simple) {
821  // for simple filtergraph there is just one output,
822  // so use only graph-level information for logging
823  ofp->log_parent = NULL;
824  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
825  } else
826  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
827 
828  switch (ofilter->type) {
829  case AVMEDIA_TYPE_VIDEO:
830  ofp->width = opts->width;
831  ofp->height = opts->height;
832  if (opts->format != AV_PIX_FMT_NONE) {
833  ofp->format = opts->format;
834  } else
835  ofp->formats = opts->formats;
836 
837  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
838  ofp->color_space = opts->color_space;
839  else
840  ofp->color_spaces = opts->color_spaces;
841 
842  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
843  ofp->color_range = opts->color_range;
844  else
845  ofp->color_ranges = opts->color_ranges;
846 
848 
849  ofp->fps.last_frame = av_frame_alloc();
850  if (!ofp->fps.last_frame)
851  return AVERROR(ENOMEM);
852 
853  ofp->fps.vsync_method = opts->vsync_method;
854  ofp->fps.framerate = ost->frame_rate;
855  ofp->fps.framerate_max = ost->max_frame_rate;
856  ofp->fps.framerate_supported = ost->force_fps || !opts->enc ?
857  NULL : opts->frame_rates;
858 
859  // reduce frame rate for mpeg4 to be within the spec limits
860  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
861  ofp->fps.framerate_clip = 65535;
862 
863  ofp->fps.dup_warning = 1000;
864 
865  break;
866  case AVMEDIA_TYPE_AUDIO:
867  if (opts->format != AV_SAMPLE_FMT_NONE) {
868  ofp->format = opts->format;
869  } else {
870  ofp->formats = opts->formats;
871  }
872  if (opts->sample_rate) {
873  ofp->sample_rate = opts->sample_rate;
874  } else
875  ofp->sample_rates = opts->sample_rates;
876  if (opts->ch_layout.nb_channels) {
877  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
878  if (ret < 0)
879  return ret;
880  } else {
881  ofp->ch_layouts = opts->ch_layouts;
882  }
883  break;
884  }
885 
886  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
887  SCH_ENC(sched_idx_enc));
888  if (ret < 0)
889  return ret;
890 
891  return 0;
892 }
893 
895  const OutputFilterOptions *opts)
896 {
897  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
898 
899  av_assert0(!ofilter->bound);
900  av_assert0(ofilter->type == ifp->type);
901 
902  ofilter->bound = 1;
903  av_freep(&ofilter->linklabel);
904 
905  ofp->name = av_strdup(opts->name);
906  if (!ofp->name)
907  return AVERROR(EINVAL);
908 
909  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
910 
911  return 0;
912 }
913 
914 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
915 {
917  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
919  char name[32];
920  int ret;
921 
922  av_assert0(!ifp->bound);
923  ifp->bound = 1;
924 
925  if (ifp->type != ofilter_src->type) {
926  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
927  av_get_media_type_string(ofilter_src->type),
929  return AVERROR(EINVAL);
930  }
931 
932  ifp->type_src = ifp->type;
933 
934  memset(&opts, 0, sizeof(opts));
935 
936  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
937  opts.name = name;
938 
939  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
940  if (ret < 0)
941  return ret;
942 
943  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
944  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
945  if (ret < 0)
946  return ret;
947 
948  return 0;
949 }
950 
952 {
953  InputFilterPriv *ifp;
954  InputFilter *ifilter;
955 
956  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
957  if (!ifp)
958  return NULL;
959 
960  ifilter = &ifp->ifilter;
961  ifilter->graph = fg;
962 
963  ifp->frame = av_frame_alloc();
964  if (!ifp->frame)
965  return NULL;
966 
967  ifp->index = fg->nb_inputs - 1;
968  ifp->format = -1;
971 
973  if (!ifp->frame_queue)
974  return NULL;
975 
976  return ifilter;
977 }
978 
979 void fg_free(FilterGraph **pfg)
980 {
981  FilterGraph *fg = *pfg;
982  FilterGraphPriv *fgp;
983 
984  if (!fg)
985  return;
986  fgp = fgp_from_fg(fg);
987 
988  for (int j = 0; j < fg->nb_inputs; j++) {
989  InputFilter *ifilter = fg->inputs[j];
990  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
991 
992  if (ifp->frame_queue) {
993  AVFrame *frame;
994  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
997  }
999 
1000  av_frame_free(&ifp->frame);
1001  av_frame_free(&ifp->opts.fallback);
1002 
1004  av_freep(&ifp->linklabel);
1005  av_freep(&ifp->opts.name);
1006  av_freep(&ifilter->name);
1007  av_freep(&fg->inputs[j]);
1008  }
1009  av_freep(&fg->inputs);
1010  for (int j = 0; j < fg->nb_outputs; j++) {
1011  OutputFilter *ofilter = fg->outputs[j];
1012  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1013 
1014  av_frame_free(&ofp->fps.last_frame);
1015  av_dict_free(&ofp->sws_opts);
1016  av_dict_free(&ofp->swr_opts);
1017 
1018  av_freep(&ofilter->linklabel);
1019  av_freep(&ofilter->name);
1020  av_freep(&ofilter->apad);
1021  av_freep(&ofp->name);
1023  av_freep(&fg->outputs[j]);
1024  }
1025  av_freep(&fg->outputs);
1026  av_freep(&fgp->graph_desc);
1027  av_freep(&fgp->nb_threads);
1028 
1029  av_frame_free(&fgp->frame);
1030  av_frame_free(&fgp->frame_enc);
1031 
1032  av_freep(pfg);
1033 }
1034 
1035 static const char *fg_item_name(void *obj)
1036 {
1037  const FilterGraphPriv *fgp = obj;
1038 
1039  return fgp->log_name;
1040 }
1041 
1042 static const AVClass fg_class = {
1043  .class_name = "FilterGraph",
1044  .version = LIBAVUTIL_VERSION_INT,
1045  .item_name = fg_item_name,
1046  .category = AV_CLASS_CATEGORY_FILTER,
1047 };
1048 
1049 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1050 {
1051  FilterGraphPriv *fgp;
1052  FilterGraph *fg;
1053 
1055  AVFilterGraph *graph;
1056  int ret = 0;
1057 
1058  fgp = av_mallocz(sizeof(*fgp));
1059  if (!fgp)
1060  return AVERROR(ENOMEM);
1061  fg = &fgp->fg;
1062 
1063  if (pfg) {
1064  *pfg = fg;
1065  fg->index = -1;
1066  } else {
1068  if (ret < 0) {
1069  av_freep(&fgp);
1070  return ret;
1071  }
1072 
1073  fg->index = nb_filtergraphs - 1;
1074  }
1075 
1076  fg->class = &fg_class;
1077  fgp->graph_desc = graph_desc;
1079  fgp->sch = sch;
1080 
1081  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1082 
1083  fgp->frame = av_frame_alloc();
1084  fgp->frame_enc = av_frame_alloc();
1085  if (!fgp->frame || !fgp->frame_enc)
1086  return AVERROR(ENOMEM);
1087 
1088  /* this graph is only used for determining the kinds of inputs
1089  * and outputs we have, and is discarded on exit from this function */
1090  graph = avfilter_graph_alloc();
1091  if (!graph)
1092  return AVERROR(ENOMEM);;
1093  graph->nb_threads = 1;
1094 
1095  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
1096  if (ret < 0)
1097  goto fail;
1098 
1099  for (unsigned i = 0; i < graph->nb_filters; i++) {
1100  const AVFilter *f = graph->filters[i]->filter;
1101  if (!avfilter_filter_pad_count(f, 0) &&
1102  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) {
1103  fgp->have_sources = 1;
1104  break;
1105  }
1106  }
1107 
1108  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1109  InputFilter *const ifilter = ifilter_alloc(fg);
1110  InputFilterPriv *ifp;
1111 
1112  if (!ifilter) {
1113  ret = AVERROR(ENOMEM);
1114  goto fail;
1115  }
1116 
1117  ifp = ifp_from_ifilter(ifilter);
1118  ifp->linklabel = cur->name;
1119  cur->name = NULL;
1120 
1121  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1122  cur->pad_idx);
1123 
1124  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1125  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1126  "currently.\n");
1127  ret = AVERROR(ENOSYS);
1128  goto fail;
1129  }
1130 
1131  ifilter->name = describe_filter_link(fg, cur, 1);
1132  if (!ifilter->name) {
1133  ret = AVERROR(ENOMEM);
1134  goto fail;
1135  }
1136  }
1137 
1138  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1139  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1140  cur->pad_idx);
1141  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1142 
1143  if (!ofilter) {
1144  ret = AVERROR(ENOMEM);
1145  goto fail;
1146  }
1147 
1148  ofilter->linklabel = cur->name;
1149  cur->name = NULL;
1150 
1151  ofilter->name = describe_filter_link(fg, cur, 0);
1152  if (!ofilter->name) {
1153  ret = AVERROR(ENOMEM);
1154  goto fail;
1155  }
1156  }
1157 
1158  if (!fg->nb_outputs) {
1159  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1160  ret = AVERROR(ENOSYS);
1161  goto fail;
1162  }
1163 
1164  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1165  filter_thread, fgp);
1166  if (ret < 0)
1167  goto fail;
1168  fgp->sch_idx = ret;
1169 
1170 fail:
1173  avfilter_graph_free(&graph);
1174 
1175  if (ret < 0)
1176  return ret;
1177 
1178  return 0;
1179 }
1180 
1182  char *graph_desc,
1183  Scheduler *sch, unsigned sched_idx_enc,
1184  const OutputFilterOptions *opts)
1185 {
1186  FilterGraph *fg;
1187  FilterGraphPriv *fgp;
1188  int ret;
1189 
1190  ret = fg_create(&ost->fg_simple, graph_desc, sch);
1191  if (ret < 0)
1192  return ret;
1193  fg = ost->fg_simple;
1194  fgp = fgp_from_fg(fg);
1195 
1196  fgp->is_simple = 1;
1197 
1198  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1199  av_get_media_type_string(ost->type)[0], opts->name);
1200 
1201  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1202  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1203  "to have exactly 1 input and 1 output. "
1204  "However, it had %d input(s) and %d output(s). Please adjust, "
1205  "or use a complex filtergraph (-filter_complex) instead.\n",
1206  graph_desc, fg->nb_inputs, fg->nb_outputs);
1207  return AVERROR(EINVAL);
1208  }
1209  if (fg->outputs[0]->type != ost->type) {
1210  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1211  "it to %s output stream\n",
1213  av_get_media_type_string(ost->type));
1214  return AVERROR(EINVAL);
1215  }
1216 
1217  ost->filter = fg->outputs[0];
1218 
1219  ret = ifilter_bind_ist(fg->inputs[0], ist);
1220  if (ret < 0)
1221  return ret;
1222 
1223  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc, opts);
1224  if (ret < 0)
1225  return ret;
1226 
1227  if (opts->nb_threads) {
1228  av_freep(&fgp->nb_threads);
1229  fgp->nb_threads = av_strdup(opts->nb_threads);
1230  if (!fgp->nb_threads)
1231  return AVERROR(ENOMEM);
1232  }
1233 
1234  return 0;
1235 }
1236 
1238 {
1239  FilterGraphPriv *fgp = fgp_from_fg(fg);
1240  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1241  InputStream *ist = NULL;
1242  enum AVMediaType type = ifp->type;
1243  int i, ret;
1244 
1245  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1246  // bind to a standalone decoder
1247  int dec_idx;
1248 
1249  dec_idx = strtol(ifp->linklabel + 4, NULL, 0);
1250  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1251  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1252  dec_idx, fgp->graph_desc);
1253  return AVERROR(EINVAL);
1254  }
1255 
1256  ret = ifilter_bind_dec(ifp, decoders[dec_idx]);
1257  if (ret < 0)
1258  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1259  ifilter->name);
1260  return ret;
1261  } else if (ifp->linklabel) {
1262  AVFormatContext *s;
1263  AVStream *st = NULL;
1264  char *p;
1265  int file_idx;
1266 
1267  // try finding an unbound filtergraph output with this label
1268  for (int i = 0; i < nb_filtergraphs; i++) {
1269  FilterGraph *fg_src = filtergraphs[i];
1270 
1271  if (fg == fg_src)
1272  continue;
1273 
1274  for (int j = 0; j < fg_src->nb_outputs; j++) {
1275  OutputFilter *ofilter = fg_src->outputs[j];
1276 
1277  if (!ofilter->bound && ofilter->linklabel &&
1278  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1279  av_log(fg, AV_LOG_VERBOSE,
1280  "Binding input with label '%s' to filtergraph output %d:%d\n",
1281  ifp->linklabel, i, j);
1282 
1283  ret = ifilter_bind_fg(ifp, fg_src, j);
1284  if (ret < 0)
1285  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1286  ifp->linklabel);
1287  return ret;
1288  }
1289  }
1290  }
1291 
1292  // bind to an explicitly specified demuxer stream
1293  file_idx = strtol(ifp->linklabel, &p, 0);
1294  if (file_idx < 0 || file_idx >= nb_input_files) {
1295  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1296  file_idx, fgp->graph_desc);
1297  return AVERROR(EINVAL);
1298  }
1299  s = input_files[file_idx]->ctx;
1300 
1301  for (i = 0; i < s->nb_streams; i++) {
1302  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1303  if (stream_type != type &&
1304  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1305  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1306  continue;
1307  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1308  st = s->streams[i];
1309  break;
1310  }
1311  }
1312  if (!st) {
1313  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1314  "matches no streams.\n", p, fgp->graph_desc);
1315  return AVERROR(EINVAL);
1316  }
1317  ist = input_files[file_idx]->streams[st->index];
1318 
1319  av_log(fg, AV_LOG_VERBOSE,
1320  "Binding input with label '%s' to input stream %d:%d\n",
1321  ifp->linklabel, ist->file->index, ist->index);
1322  } else {
1323  ist = ist_find_unused(type);
1324  if (!ist) {
1325  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1326  "unlabeled input pad %s\n", ifilter->name);
1327  return AVERROR(EINVAL);
1328  }
1329 
1330  av_log(fg, AV_LOG_VERBOSE,
1331  "Binding unlabeled input %d to input stream %d:%d\n",
1332  ifp->index, ist->file->index, ist->index);
1333  }
1334  av_assert0(ist);
1335 
1336  ret = ifilter_bind_ist(ifilter, ist);
1337  if (ret < 0) {
1338  av_log(fg, AV_LOG_ERROR,
1339  "Error binding an input stream to complex filtergraph input %s.\n",
1340  ifilter->name);
1341  return ret;
1342  }
1343 
1344  return 0;
1345 }
1346 
1347 static int bind_inputs(FilterGraph *fg)
1348 {
1349  // bind filtergraph inputs to input streams or other filtergraphs
1350  for (int i = 0; i < fg->nb_inputs; i++) {
1352  int ret;
1353 
1354  if (ifp->bound)
1355  continue;
1356 
1357  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1358  if (ret < 0)
1359  return ret;
1360  }
1361 
1362  return 0;
1363 }
1364 
1366 {
1367  int ret;
1368 
1369  for (int i = 0; i < nb_filtergraphs; i++) {
1371  if (ret < 0)
1372  return ret;
1373  }
1374 
1375  // check that all outputs were bound
1376  for (int i = 0; i < nb_filtergraphs; i++) {
1377  FilterGraph *fg = filtergraphs[i];
1378 
1379  for (int j = 0; j < fg->nb_outputs; j++) {
1380  OutputFilter *output = fg->outputs[j];
1381  if (!output->bound) {
1383  "Filter %s has an unconnected output\n", output->name);
1384  return AVERROR(EINVAL);
1385  }
1386  }
1387  }
1388 
1389  return 0;
1390 }
1391 
1393  AVFilterContext **last_filter, int *pad_idx,
1394  const char *filter_name)
1395 {
1396  AVFilterGraph *graph = (*last_filter)->graph;
1398  const AVFilter *trim;
1399  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1400  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1401  int ret = 0;
1402 
1403  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1404  return 0;
1405 
1406  trim = avfilter_get_by_name(name);
1407  if (!trim) {
1408  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1409  "recording time.\n", name);
1410  return AVERROR_FILTER_NOT_FOUND;
1411  }
1412 
1413  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1414  if (!ctx)
1415  return AVERROR(ENOMEM);
1416 
1417  if (duration != INT64_MAX) {
1418  ret = av_opt_set_int(ctx, "durationi", duration,
1420  }
1421  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1422  ret = av_opt_set_int(ctx, "starti", start_time,
1424  }
1425  if (ret < 0) {
1426  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1427  return ret;
1428  }
1429 
1431  if (ret < 0)
1432  return ret;
1433 
1434  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1435  if (ret < 0)
1436  return ret;
1437 
1438  *last_filter = ctx;
1439  *pad_idx = 0;
1440  return 0;
1441 }
1442 
1443 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1444  const char *filter_name, const char *args)
1445 {
1446  AVFilterGraph *graph = (*last_filter)->graph;
1448  int ret;
1449 
1451  avfilter_get_by_name(filter_name),
1452  filter_name, args, NULL, graph);
1453  if (ret < 0)
1454  return ret;
1455 
1456  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1457  if (ret < 0)
1458  return ret;
1459 
1460  *last_filter = ctx;
1461  *pad_idx = 0;
1462  return 0;
1463 }
1464 
1466  OutputFilter *ofilter, AVFilterInOut *out)
1467 {
1468  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1469  AVFilterContext *last_filter = out->filter_ctx;
1470  AVBPrint bprint;
1471  int pad_idx = out->pad_idx;
1472  int ret;
1473  char name[255];
1474 
1475  snprintf(name, sizeof(name), "out_%s", ofp->name);
1477  avfilter_get_by_name("buffersink"),
1478  name, NULL, NULL, graph);
1479 
1480  if (ret < 0)
1481  return ret;
1482 
1483  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1484  char args[255];
1486  const AVDictionaryEntry *e = NULL;
1487 
1488  snprintf(args, sizeof(args), "%d:%d",
1489  ofp->width, ofp->height);
1490 
1491  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1492  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1493  }
1494 
1495  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1497  name, args, NULL, graph)) < 0)
1498  return ret;
1499  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1500  return ret;
1501 
1502  last_filter = filter;
1503  pad_idx = 0;
1504  }
1505 
1507  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1509  choose_pix_fmts(ofp, &bprint);
1510  choose_color_spaces(ofp, &bprint);
1511  choose_color_ranges(ofp, &bprint);
1512  if (!av_bprint_is_complete(&bprint))
1513  return AVERROR(ENOMEM);
1514 
1515  if (bprint.len) {
1517 
1519  avfilter_get_by_name("format"),
1520  "format", bprint.str, NULL, graph);
1521  av_bprint_finalize(&bprint, NULL);
1522  if (ret < 0)
1523  return ret;
1524  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1525  return ret;
1526 
1527  last_filter = filter;
1528  pad_idx = 0;
1529  }
1530 
1531  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1533  &last_filter, &pad_idx, name);
1534  if (ret < 0)
1535  return ret;
1536 
1537 
1538  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1539  return ret;
1540 
1541  return 0;
1542 }
1543 
1545  OutputFilter *ofilter, AVFilterInOut *out)
1546 {
1547  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1548  AVFilterContext *last_filter = out->filter_ctx;
1549  int pad_idx = out->pad_idx;
1550  AVBPrint args;
1551  char name[255];
1552  int ret;
1553 
1554  snprintf(name, sizeof(name), "out_%s", ofp->name);
1556  avfilter_get_by_name("abuffersink"),
1557  name, NULL, NULL, graph);
1558  if (ret < 0)
1559  return ret;
1560  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1561  return ret;
1562 
1563 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1564  AVFilterContext *filt_ctx; \
1565  \
1566  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1567  "similarly to -af " filter_name "=%s.\n", arg); \
1568  \
1569  ret = avfilter_graph_create_filter(&filt_ctx, \
1570  avfilter_get_by_name(filter_name), \
1571  filter_name, arg, NULL, graph); \
1572  if (ret < 0) \
1573  goto fail; \
1574  \
1575  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1576  if (ret < 0) \
1577  goto fail; \
1578  \
1579  last_filter = filt_ctx; \
1580  pad_idx = 0; \
1581 } while (0)
1583 
1584  choose_sample_fmts(ofp, &args);
1585  choose_sample_rates(ofp, &args);
1586  choose_channel_layouts(ofp, &args);
1587  if (!av_bprint_is_complete(&args)) {
1588  ret = AVERROR(ENOMEM);
1589  goto fail;
1590  }
1591  if (args.len) {
1593 
1594  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1596  avfilter_get_by_name("aformat"),
1597  name, args.str, NULL, graph);
1598  if (ret < 0)
1599  goto fail;
1600 
1601  ret = avfilter_link(last_filter, pad_idx, format, 0);
1602  if (ret < 0)
1603  goto fail;
1604 
1605  last_filter = format;
1606  pad_idx = 0;
1607  }
1608 
1609  if (ofilter->apad)
1610  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1611 
1612  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1614  &last_filter, &pad_idx, name);
1615  if (ret < 0)
1616  goto fail;
1617 
1618  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1619  goto fail;
1620 fail:
1621  av_bprint_finalize(&args, NULL);
1622 
1623  return ret;
1624 }
1625 
1627  OutputFilter *ofilter, AVFilterInOut *out)
1628 {
1629  switch (ofilter->type) {
1630  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
1631  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
1632  default: av_assert0(0); return 0;
1633  }
1634 }
1635 
1637 {
1638  ifp->sub2video.last_pts = INT64_MIN;
1639  ifp->sub2video.end_pts = INT64_MIN;
1640 
1641  /* sub2video structure has been (re-)initialized.
1642  Mark it as such so that the system will be
1643  initialized with the first received heartbeat. */
1644  ifp->sub2video.initialize = 1;
1645 }
1646 
1648  InputFilter *ifilter, AVFilterInOut *in)
1649 {
1650  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1651 
1652  AVFilterContext *last_filter;
1653  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1654  const AVPixFmtDescriptor *desc;
1655  AVRational fr = ifp->opts.framerate;
1656  AVRational sar;
1657  AVBPrint args;
1658  char name[255];
1659  int ret, pad_idx = 0;
1661  if (!par)
1662  return AVERROR(ENOMEM);
1663 
1664  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1665  sub2video_prepare(ifp);
1666 
1667  sar = ifp->sample_aspect_ratio;
1668  if(!sar.den)
1669  sar = (AVRational){0,1};
1671  av_bprintf(&args,
1672  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1673  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1674  ifp->width, ifp->height, ifp->format,
1675  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1676  ifp->color_space, ifp->color_range);
1677  if (fr.num && fr.den)
1678  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1679  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1680  ifp->opts.name);
1681 
1682 
1683  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1684  args.str, NULL, graph)) < 0)
1685  goto fail;
1686  par->hw_frames_ctx = ifp->hw_frames_ctx;
1687  ret = av_buffersrc_parameters_set(ifp->filter, par);
1688  if (ret < 0)
1689  goto fail;
1690  av_freep(&par);
1691  last_filter = ifp->filter;
1692 
1694  av_assert0(desc);
1695 
1696  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1697  char crop_buf[64];
1698  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1699  ifp->opts.crop_left, ifp->opts.crop_right,
1700  ifp->opts.crop_top, ifp->opts.crop_bottom,
1701  ifp->opts.crop_left, ifp->opts.crop_top);
1702  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1703  if (ret < 0)
1704  return ret;
1705  }
1706 
1707  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1708  ifp->displaymatrix_applied = 0;
1709  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1710  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1711  int32_t *displaymatrix = ifp->displaymatrix;
1712  double theta;
1713 
1714  theta = get_rotation(displaymatrix);
1715 
1716  if (fabs(theta - 90) < 1.0) {
1717  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1718  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1719  } else if (fabs(theta - 180) < 1.0) {
1720  if (displaymatrix[0] < 0) {
1721  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1722  if (ret < 0)
1723  return ret;
1724  }
1725  if (displaymatrix[4] < 0) {
1726  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1727  }
1728  } else if (fabs(theta - 270) < 1.0) {
1729  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1730  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1731  } else if (fabs(theta) > 1.0) {
1732  char rotate_buf[64];
1733  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1734  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1735  } else if (fabs(theta) < 1.0) {
1736  if (displaymatrix && displaymatrix[4] < 0) {
1737  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1738  }
1739  }
1740  if (ret < 0)
1741  return ret;
1742 
1743  ifp->displaymatrix_applied = 1;
1744  }
1745 
1746  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1748  &last_filter, &pad_idx, name);
1749  if (ret < 0)
1750  return ret;
1751 
1752  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1753  return ret;
1754  return 0;
1755 fail:
1756  av_freep(&par);
1757 
1758  return ret;
1759 }
1760 
1762  InputFilter *ifilter, AVFilterInOut *in)
1763 {
1764  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1765  AVFilterContext *last_filter;
1766  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1767  AVBPrint args;
1768  char name[255];
1769  int ret, pad_idx = 0;
1770 
1772  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1773  ifp->time_base.num, ifp->time_base.den,
1774  ifp->sample_rate,
1776  if (av_channel_layout_check(&ifp->ch_layout) &&
1778  av_bprintf(&args, ":channel_layout=");
1780  } else
1781  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1782  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1783 
1784  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1785  name, args.str, NULL,
1786  graph)) < 0)
1787  return ret;
1788  last_filter = ifp->filter;
1789 
1790  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1792  &last_filter, &pad_idx, name);
1793  if (ret < 0)
1794  return ret;
1795 
1796  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1797  return ret;
1798 
1799  return 0;
1800 }
1801 
1803  InputFilter *ifilter, AVFilterInOut *in)
1804 {
1805  switch (ifp_from_ifilter(ifilter)->type) {
1806  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1807  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1808  default: av_assert0(0); return 0;
1809  }
1810 }
1811 
1813 {
1814  for (int i = 0; i < fg->nb_outputs; i++)
1816  for (int i = 0; i < fg->nb_inputs; i++)
1817  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1818  avfilter_graph_free(&fgt->graph);
1819 }
1820 
1822 {
1823  return f->nb_inputs == 0 &&
1824  (!strcmp(f->filter->name, "buffer") ||
1825  !strcmp(f->filter->name, "abuffer"));
1826 }
1827 
1828 static int graph_is_meta(AVFilterGraph *graph)
1829 {
1830  for (unsigned i = 0; i < graph->nb_filters; i++) {
1831  const AVFilterContext *f = graph->filters[i];
1832 
1833  /* in addition to filters flagged as meta, also
1834  * disregard sinks and buffersources (but not other sources,
1835  * since they introduce data we are not aware of)
1836  */
1837  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1838  f->nb_outputs == 0 ||
1840  return 0;
1841  }
1842  return 1;
1843 }
1844 
1845 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1846 
1848 {
1849  FilterGraphPriv *fgp = fgp_from_fg(fg);
1850  AVBufferRef *hw_device;
1851  AVFilterInOut *inputs, *outputs, *cur;
1852  int ret, i, simple = filtergraph_is_simple(fg);
1853  int have_input_eof = 0;
1854  const char *graph_desc = fgp->graph_desc;
1855 
1856  cleanup_filtergraph(fg, fgt);
1857  fgt->graph = avfilter_graph_alloc();
1858  if (!fgt->graph)
1859  return AVERROR(ENOMEM);
1860 
1861  if (simple) {
1862  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1863 
1864  if (filter_nbthreads) {
1865  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1866  if (ret < 0)
1867  goto fail;
1868  } else if (fgp->nb_threads) {
1869  ret = av_opt_set(fgt->graph, "threads", fgp->nb_threads, 0);
1870  if (ret < 0)
1871  return ret;
1872  }
1873 
1874  if (av_dict_count(ofp->sws_opts)) {
1876  &fgt->graph->scale_sws_opts,
1877  '=', ':');
1878  if (ret < 0)
1879  goto fail;
1880  }
1881 
1882  if (av_dict_count(ofp->swr_opts)) {
1883  char *args;
1884  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1885  if (ret < 0)
1886  goto fail;
1887  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1888  av_free(args);
1889  }
1890  } else {
1892  }
1893 
1894  hw_device = hw_device_for_filter();
1895 
1896  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1897  goto fail;
1898 
1899  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1900  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1903  goto fail;
1904  }
1906 
1907  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1908  ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
1909  if (ret < 0) {
1911  goto fail;
1912  }
1913  }
1915 
1916  if (fgp->disable_conversions)
1918  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1919  goto fail;
1920 
1921  fgp->is_meta = graph_is_meta(fgt->graph);
1922 
1923  /* limit the lists of allowed formats to the ones selected, to
1924  * make sure they stay the same if the filtergraph is reconfigured later */
1925  for (int i = 0; i < fg->nb_outputs; i++) {
1926  OutputFilter *ofilter = fg->outputs[i];
1927  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1928  AVFilterContext *sink = ofp->filter;
1929 
1930  ofp->format = av_buffersink_get_format(sink);
1931 
1932  ofp->width = av_buffersink_get_w(sink);
1933  ofp->height = av_buffersink_get_h(sink);
1936 
1937  // If the timing parameters are not locked yet, get the tentative values
1938  // here but don't lock them. They will only be used if no output frames
1939  // are ever produced.
1940  if (!ofp->tb_out_locked) {
1942  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1943  fr.num > 0 && fr.den > 0)
1944  ofp->fps.framerate = fr;
1945  ofp->tb_out = av_buffersink_get_time_base(sink);
1946  }
1948 
1951  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1952  if (ret < 0)
1953  goto fail;
1954  }
1955 
1956  for (int i = 0; i < fg->nb_inputs; i++) {
1958  AVFrame *tmp;
1959  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1960  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1961  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1962  } else {
1964  }
1965  av_frame_free(&tmp);
1966  if (ret < 0)
1967  goto fail;
1968  }
1969  }
1970 
1971  /* send the EOFs for the finished inputs */
1972  for (int i = 0; i < fg->nb_inputs; i++) {
1974  if (fgt->eof_in[i]) {
1976  if (ret < 0)
1977  goto fail;
1978  have_input_eof = 1;
1979  }
1980  }
1981 
1982  if (have_input_eof) {
1983  // make sure the EOF propagates to the end of the graph
1985  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1986  goto fail;
1987  }
1988 
1989  return 0;
1990 fail:
1991  cleanup_filtergraph(fg, fgt);
1992  return ret;
1993 }
1994 
1996 {
1997  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1998  AVFrameSideData *sd;
1999  int ret;
2000 
2001  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2002  if (ret < 0)
2003  return ret;
2004 
2005  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2006  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2007  frame->time_base;
2008 
2009  ifp->format = frame->format;
2010 
2011  ifp->width = frame->width;
2012  ifp->height = frame->height;
2013  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2014  ifp->color_space = frame->colorspace;
2015  ifp->color_range = frame->color_range;
2016 
2017  ifp->sample_rate = frame->sample_rate;
2018  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2019  if (ret < 0)
2020  return ret;
2021 
2023  if (sd)
2024  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2025  ifp->displaymatrix_present = !!sd;
2026 
2027  return 0;
2028 }
2029 
2031 {
2032  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2033  return fgp->is_simple;
2034 }
2035 
2036 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2037  double time, const char *target,
2038  const char *command, const char *arg, int all_filters)
2039 {
2040  int ret;
2041 
2042  if (!graph)
2043  return;
2044 
2045  if (time < 0) {
2046  char response[4096];
2047  ret = avfilter_graph_send_command(graph, target, command, arg,
2048  response, sizeof(response),
2049  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2050  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2051  fg->index, ret, response);
2052  } else if (!all_filters) {
2053  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2054  } else {
2055  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2056  if (ret < 0)
2057  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2058  }
2059 }
2060 
2061 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2062 {
2063  int nb_requests, nb_requests_max = -1;
2064  int best_input = -1;
2065 
2066  for (int i = 0; i < fg->nb_inputs; i++) {
2067  InputFilter *ifilter = fg->inputs[i];
2068  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2069 
2070  if (fgt->eof_in[i])
2071  continue;
2072 
2073  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2074  if (nb_requests > nb_requests_max) {
2075  nb_requests_max = nb_requests;
2076  best_input = i;
2077  }
2078  }
2079 
2080  av_assert0(best_input >= 0);
2081 
2082  return best_input;
2083 }
2084 
2086 {
2087  OutputFilter *ofilter = &ofp->ofilter;
2088  FPSConvContext *fps = &ofp->fps;
2089  AVRational tb = (AVRational){ 0, 0 };
2090  AVRational fr;
2091  const FrameData *fd;
2092 
2093  fd = frame_data_c(frame);
2094 
2095  // apply -enc_time_base
2096  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2097  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2098  av_log(ofp, AV_LOG_ERROR,
2099  "Demuxing timebase not available - cannot use it for encoding\n");
2100  return AVERROR(EINVAL);
2101  }
2102 
2103  switch (ofp->enc_timebase.num) {
2104  case 0: break;
2105  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2106  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2107  default: tb = ofp->enc_timebase; break;
2108  }
2109 
2110  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2111  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2112  goto finish;
2113  }
2114 
2115  fr = fps->framerate;
2116  if (!fr.num) {
2118  if (fr_sink.num > 0 && fr_sink.den > 0)
2119  fr = fr_sink;
2120  }
2121 
2122  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2123  if (!fr.num && !fps->framerate_max.num) {
2124  fr = (AVRational){25, 1};
2125  av_log(ofp, AV_LOG_WARNING,
2126  "No information "
2127  "about the input framerate is available. Falling "
2128  "back to a default value of 25fps. Use the -r option "
2129  "if you want a different framerate.\n");
2130  }
2131 
2132  if (fps->framerate_max.num &&
2133  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2134  !fr.den))
2135  fr = fps->framerate_max;
2136  }
2137 
2138  if (fr.num > 0) {
2139  if (fps->framerate_supported) {
2140  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2141  fr = fps->framerate_supported[idx];
2142  }
2143  if (fps->framerate_clip) {
2144  av_reduce(&fr.num, &fr.den,
2145  fr.num, fr.den, fps->framerate_clip);
2146  }
2147  }
2148 
2149  if (!(tb.num > 0 && tb.den > 0))
2150  tb = av_inv_q(fr);
2151  if (!(tb.num > 0 && tb.den > 0))
2152  tb = frame->time_base;
2153 
2154  fps->framerate = fr;
2155 finish:
2156  ofp->tb_out = tb;
2157  ofp->tb_out_locked = 1;
2158 
2159  return 0;
2160 }
2161 
2164 {
2165  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2166 
2167  AVRational tb = tb_dst;
2168  AVRational filter_tb = frame->time_base;
2169  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2170 
2171  if (frame->pts == AV_NOPTS_VALUE)
2172  goto early_exit;
2173 
2174  tb.den <<= extra_bits;
2175  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2177  float_pts /= 1 << extra_bits;
2178  // when float_pts is not exactly an integer,
2179  // avoid exact midpoints to reduce the chance of rounding differences, this
2180  // can be removed in case the fps code is changed to work with integers
2181  if (float_pts != llrint(float_pts))
2182  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2183 
2184  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2186  frame->time_base = tb_dst;
2187 
2188 early_exit:
2189 
2190  if (debug_ts) {
2191  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2192  frame ? av_ts2str(frame->pts) : "NULL",
2193  av_ts2timestr(frame->pts, &tb_dst),
2194  float_pts, tb_dst.num, tb_dst.den);
2195  }
2196 
2197  return float_pts;
2198 }
2199 
2200 /* Convert frame timestamps to the encoder timebase and decide how many times
2201  * should this (and possibly previous) frame be repeated in order to conform to
2202  * desired target framerate (if any).
2203  */
2205  int64_t *nb_frames, int64_t *nb_frames_prev)
2206 {
2207  OutputFilter *ofilter = &ofp->ofilter;
2208  FPSConvContext *fps = &ofp->fps;
2209  double delta0, delta, sync_ipts, duration;
2210 
2211  if (!frame) {
2212  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2213  fps->frames_prev_hist[1],
2214  fps->frames_prev_hist[2]);
2215 
2216  if (!*nb_frames && fps->last_dropped) {
2217  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2218  fps->last_dropped++;
2219  }
2220 
2221  goto finish;
2222  }
2223 
2224  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2225 
2226  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2227  /* delta0 is the "drift" between the input frame and
2228  * where it would fall in the output. */
2229  delta0 = sync_ipts - ofp->next_pts;
2230  delta = delta0 + duration;
2231 
2232  // tracks the number of times the PREVIOUS frame should be duplicated,
2233  // mostly for variable framerate (VFR)
2234  *nb_frames_prev = 0;
2235  /* by default, we output a single frame */
2236  *nb_frames = 1;
2237 
2238  if (delta0 < 0 &&
2239  delta > 0 &&
2242  && fps->vsync_method != VSYNC_DROP
2243 #endif
2244  ) {
2245  if (delta0 < -0.6) {
2246  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2247  } else
2248  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2249  sync_ipts = ofp->next_pts;
2250  duration += delta0;
2251  delta0 = 0;
2252  }
2253 
2254  switch (fps->vsync_method) {
2255  case VSYNC_VSCFR:
2256  if (fps->frame_number == 0 && delta0 >= 0.5) {
2257  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2258  delta = duration;
2259  delta0 = 0;
2260  ofp->next_pts = llrint(sync_ipts);
2261  }
2262  case VSYNC_CFR:
2263  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2264  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2265  *nb_frames = 0;
2266  } else if (delta < -1.1)
2267  *nb_frames = 0;
2268  else if (delta > 1.1) {
2269  *nb_frames = llrintf(delta);
2270  if (delta0 > 1.1)
2271  *nb_frames_prev = llrintf(delta0 - 0.6);
2272  }
2273  frame->duration = 1;
2274  break;
2275  case VSYNC_VFR:
2276  if (delta <= -0.6)
2277  *nb_frames = 0;
2278  else if (delta > 0.6)
2279  ofp->next_pts = llrint(sync_ipts);
2280  frame->duration = llrint(duration);
2281  break;
2282 #if FFMPEG_OPT_VSYNC_DROP
2283  case VSYNC_DROP:
2284 #endif
2285  case VSYNC_PASSTHROUGH:
2286  ofp->next_pts = llrint(sync_ipts);
2287  frame->duration = llrint(duration);
2288  break;
2289  default:
2290  av_assert0(0);
2291  }
2292 
2293 finish:
2294  memmove(fps->frames_prev_hist + 1,
2295  fps->frames_prev_hist,
2296  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2297  fps->frames_prev_hist[0] = *nb_frames_prev;
2298 
2299  if (*nb_frames_prev == 0 && fps->last_dropped) {
2300  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2301  av_log(ofp, AV_LOG_VERBOSE,
2302  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2303  fps->frame_number, fps->last_frame->pts);
2304  }
2305  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2306  uint64_t nb_frames_dup;
2307  if (*nb_frames > dts_error_threshold * 30) {
2308  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2309  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2310  *nb_frames = 0;
2311  return;
2312  }
2313  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2314  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2315  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2316  if (nb_frames_dup > fps->dup_warning) {
2317  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2318  fps->dup_warning *= 10;
2319  }
2320  }
2321 
2322  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2323  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2324 }
2325 
2327 {
2329  int ret;
2330 
2331  // we are finished and no frames were ever seen at this output,
2332  // at least initialize the encoder with a dummy frame
2333  if (!fgt->got_frame) {
2334  AVFrame *frame = fgt->frame;
2335  FrameData *fd;
2336 
2337  frame->time_base = ofp->tb_out;
2338  frame->format = ofp->format;
2339 
2340  frame->width = ofp->width;
2341  frame->height = ofp->height;
2342  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2343 
2344  frame->sample_rate = ofp->sample_rate;
2345  if (ofp->ch_layout.nb_channels) {
2346  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2347  if (ret < 0)
2348  return ret;
2349  }
2350 
2351  fd = frame_data(frame);
2352  if (!fd)
2353  return AVERROR(ENOMEM);
2354 
2355  fd->frame_rate_filter = ofp->fps.framerate;
2356 
2357  av_assert0(!frame->buf[0]);
2358 
2359  av_log(ofp, AV_LOG_WARNING,
2360  "No filtered frames for output stream, trying to "
2361  "initialize anyway.\n");
2362 
2363  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2364  if (ret < 0) {
2366  return ret;
2367  }
2368  }
2369 
2370  fgt->eof_out[ofp->index] = 1;
2371 
2372  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2373  return (ret == AVERROR_EOF) ? 0 : ret;
2374 }
2375 
2377  AVFrame *frame)
2378 {
2380  AVFrame *frame_prev = ofp->fps.last_frame;
2381  enum AVMediaType type = ofp->ofilter.type;
2382 
2383  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2384 
2385  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2386  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2387 
2388  for (int64_t i = 0; i < nb_frames; i++) {
2389  AVFrame *frame_out;
2390  int ret;
2391 
2392  if (type == AVMEDIA_TYPE_VIDEO) {
2393  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2394  frame_prev : frame;
2395  if (!frame_in)
2396  break;
2397 
2398  frame_out = fgp->frame_enc;
2399  ret = av_frame_ref(frame_out, frame_in);
2400  if (ret < 0)
2401  return ret;
2402 
2403  frame_out->pts = ofp->next_pts;
2404 
2405  if (ofp->fps.dropped_keyframe) {
2406  frame_out->flags |= AV_FRAME_FLAG_KEY;
2407  ofp->fps.dropped_keyframe = 0;
2408  }
2409  } else {
2410  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2411  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2413 
2414  frame->time_base = ofp->tb_out;
2415  frame->duration = av_rescale_q(frame->nb_samples,
2416  (AVRational){ 1, frame->sample_rate },
2417  ofp->tb_out);
2418 
2419  ofp->next_pts = frame->pts + frame->duration;
2420 
2421  frame_out = frame;
2422  }
2423 
2424  // send the frame to consumers
2425  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2426  if (ret < 0) {
2427  av_frame_unref(frame_out);
2428 
2429  if (!fgt->eof_out[ofp->index]) {
2430  fgt->eof_out[ofp->index] = 1;
2431  fgp->nb_outputs_done++;
2432  }
2433 
2434  return ret == AVERROR_EOF ? 0 : ret;
2435  }
2436 
2437  if (type == AVMEDIA_TYPE_VIDEO) {
2438  ofp->fps.frame_number++;
2439  ofp->next_pts++;
2440 
2441  if (i == nb_frames_prev && frame)
2442  frame->flags &= ~AV_FRAME_FLAG_KEY;
2443  }
2444 
2445  fgt->got_frame = 1;
2446  }
2447 
2448  if (frame && frame_prev) {
2449  av_frame_unref(frame_prev);
2450  av_frame_move_ref(frame_prev, frame);
2451  }
2452 
2453  if (!frame)
2454  return close_output(ofp, fgt);
2455 
2456  return 0;
2457 }
2458 
2460  AVFrame *frame)
2461 {
2463  AVFilterContext *filter = ofp->filter;
2464  FrameData *fd;
2465  int ret;
2466 
2469  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2470  ret = fg_output_frame(ofp, fgt, NULL);
2471  return (ret < 0) ? ret : 1;
2472  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2473  return 1;
2474  } else if (ret < 0) {
2475  av_log(ofp, AV_LOG_WARNING,
2476  "Error in retrieving a frame from the filtergraph: %s\n",
2477  av_err2str(ret));
2478  return ret;
2479  }
2480 
2481  if (fgt->eof_out[ofp->index]) {
2483  return 0;
2484  }
2485 
2487 
2488  if (debug_ts)
2489  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2490  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2491  frame->time_base.num, frame->time_base.den);
2492 
2493  // Choose the output timebase the first time we get a frame.
2494  if (!ofp->tb_out_locked) {
2495  ret = choose_out_timebase(ofp, frame);
2496  if (ret < 0) {
2497  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2499  return ret;
2500  }
2501  }
2502 
2503  fd = frame_data(frame);
2504  if (!fd) {
2506  return AVERROR(ENOMEM);
2507  }
2508 
2510 
2511  // only use bits_per_raw_sample passed through from the decoder
2512  // if the filtergraph did not touch the frame data
2513  if (!fgp->is_meta)
2514  fd->bits_per_raw_sample = 0;
2515 
2516  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2517  if (!frame->duration) {
2519  if (fr.num > 0 && fr.den > 0)
2520  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2521  }
2522 
2523  fd->frame_rate_filter = ofp->fps.framerate;
2524  }
2525 
2526  ret = fg_output_frame(ofp, fgt, frame);
2528  if (ret < 0)
2529  return ret;
2530 
2531  return 0;
2532 }
2533 
2534 /* retrieve all frames available at filtergraph outputs
2535  * and send them to consumers */
2537  AVFrame *frame)
2538 {
2539  FilterGraphPriv *fgp = fgp_from_fg(fg);
2540  int did_step = 0;
2541 
2542  // graph not configured, just select the input to request
2543  if (!fgt->graph) {
2544  for (int i = 0; i < fg->nb_inputs; i++) {
2546  if (ifp->format < 0 && !fgt->eof_in[i]) {
2547  fgt->next_in = i;
2548  return 0;
2549  }
2550  }
2551 
2552  // This state - graph is not configured, but all inputs are either
2553  // initialized or EOF - should be unreachable because sending EOF to a
2554  // filter without even a fallback format should fail
2555  av_assert0(0);
2556  return AVERROR_BUG;
2557  }
2558 
2559  while (fgp->nb_outputs_done < fg->nb_outputs) {
2560  int ret;
2561 
2563  if (ret == AVERROR(EAGAIN)) {
2564  fgt->next_in = choose_input(fg, fgt);
2565  break;
2566  } else if (ret < 0) {
2567  if (ret == AVERROR_EOF)
2568  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2569  else
2570  av_log(fg, AV_LOG_ERROR,
2571  "Error requesting a frame from the filtergraph: %s\n",
2572  av_err2str(ret));
2573  return ret;
2574  }
2575  fgt->next_in = fg->nb_inputs;
2576 
2577  // return after one iteration, so that scheduler can rate-control us
2578  if (did_step && fgp->have_sources)
2579  return 0;
2580 
2581  /* Reap all buffers present in the buffer sinks */
2582  for (int i = 0; i < fg->nb_outputs; i++) {
2584 
2585  ret = 0;
2586  while (!ret) {
2587  ret = fg_output_step(ofp, fgt, frame);
2588  if (ret < 0)
2589  return ret;
2590  }
2591  }
2592  did_step = 1;
2593  }
2594 
2595  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2596 }
2597 
2599 {
2600  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2601  int64_t pts2;
2602 
2603  /* subtitles seem to be usually muxed ahead of other streams;
2604  if not, subtracting a larger time here is necessary */
2605  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2606 
2607  /* do not send the heartbeat frame if the subtitle is already ahead */
2608  if (pts2 <= ifp->sub2video.last_pts)
2609  return;
2610 
2611  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2612  /* if we have hit the end of the current displayed subpicture,
2613  or if we need to initialize the system, update the
2614  overlayed subpicture and its start/end times */
2615  sub2video_update(ifp, pts2 + 1, NULL);
2616  else
2617  sub2video_push_ref(ifp, pts2);
2618 }
2619 
2620 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2621 {
2622  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2623  int ret;
2624 
2625  if (buffer) {
2626  AVFrame *tmp;
2627 
2628  if (!frame)
2629  return 0;
2630 
2631  tmp = av_frame_alloc();
2632  if (!tmp)
2633  return AVERROR(ENOMEM);
2634 
2636 
2637  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2638  if (ret < 0) {
2639  av_frame_free(&tmp);
2640  return ret;
2641  }
2642 
2643  return 0;
2644  }
2645 
2646  // heartbeat frame
2647  if (frame && !frame->buf[0]) {
2648  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2649  return 0;
2650  }
2651 
2652  if (!frame) {
2653  if (ifp->sub2video.end_pts < INT64_MAX)
2654  sub2video_update(ifp, INT64_MAX, NULL);
2655 
2656  return av_buffersrc_add_frame(ifp->filter, NULL);
2657  }
2658 
2659  ifp->width = frame->width ? frame->width : ifp->width;
2660  ifp->height = frame->height ? frame->height : ifp->height;
2661 
2662  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2663 
2664  return 0;
2665 }
2666 
2667 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2668  int64_t pts, AVRational tb)
2669 {
2670  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2671  int ret;
2672 
2673  if (fgt->eof_in[ifp->index])
2674  return 0;
2675 
2676  fgt->eof_in[ifp->index] = 1;
2677 
2678  if (ifp->filter) {
2679  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2681 
2683  if (ret < 0)
2684  return ret;
2685  } else {
2686  if (ifp->format < 0) {
2687  // the filtergraph was never configured, use the fallback parameters
2688  ifp->format = ifp->opts.fallback->format;
2689  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2690  ifp->width = ifp->opts.fallback->width;
2691  ifp->height = ifp->opts.fallback->height;
2693  ifp->color_space = ifp->opts.fallback->colorspace;
2694  ifp->color_range = ifp->opts.fallback->color_range;
2695  ifp->time_base = ifp->opts.fallback->time_base;
2696 
2698  &ifp->opts.fallback->ch_layout);
2699  if (ret < 0)
2700  return ret;
2701 
2702  if (ifilter_has_all_input_formats(ifilter->graph)) {
2703  ret = configure_filtergraph(ifilter->graph, fgt);
2704  if (ret < 0) {
2705  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2706  return ret;
2707  }
2708  }
2709  }
2710 
2711  if (ifp->format < 0) {
2713  "Cannot determine format of input %s after EOF\n",
2714  ifp->opts.name);
2715  return AVERROR_INVALIDDATA;
2716  }
2717  }
2718 
2719  return 0;
2720 }
2721 
2723  VIDEO_CHANGED = (1 << 0),
2724  AUDIO_CHANGED = (1 << 1),
2725  MATRIX_CHANGED = (1 << 2),
2726  HWACCEL_CHANGED = (1 << 3)
2727 };
2728 
2729 static const char *unknown_if_null(const char *str)
2730 {
2731  return str ? str : "unknown";
2732 }
2733 
2735  InputFilter *ifilter, AVFrame *frame)
2736 {
2737  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2738  FrameData *fd;
2739  AVFrameSideData *sd;
2740  int need_reinit = 0, ret;
2741 
2742  /* determine if the parameters for this input changed */
2743  switch (ifp->type) {
2744  case AVMEDIA_TYPE_AUDIO:
2745  if (ifp->format != frame->format ||
2746  ifp->sample_rate != frame->sample_rate ||
2747  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2748  need_reinit |= AUDIO_CHANGED;
2749  break;
2750  case AVMEDIA_TYPE_VIDEO:
2751  if (ifp->format != frame->format ||
2752  ifp->width != frame->width ||
2753  ifp->height != frame->height ||
2754  ifp->color_space != frame->colorspace ||
2755  ifp->color_range != frame->color_range)
2756  need_reinit |= VIDEO_CHANGED;
2757  break;
2758  }
2759 
2761  if (!ifp->displaymatrix_present ||
2762  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2763  need_reinit |= MATRIX_CHANGED;
2764  } else if (ifp->displaymatrix_present)
2765  need_reinit |= MATRIX_CHANGED;
2766 
2767  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2768  need_reinit = 0;
2769 
2770  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2771  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2772  need_reinit |= HWACCEL_CHANGED;
2773 
2774  if (need_reinit) {
2776  if (ret < 0)
2777  return ret;
2778  }
2779 
2780  /* (re)init the graph if possible, otherwise buffer the frame and return */
2781  if (need_reinit || !fgt->graph) {
2782  AVFrame *tmp = av_frame_alloc();
2783 
2784  if (!tmp)
2785  return AVERROR(ENOMEM);
2786 
2787  if (!ifilter_has_all_input_formats(fg)) {
2789 
2790  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2791  if (ret < 0)
2792  av_frame_free(&tmp);
2793 
2794  return ret;
2795  }
2796 
2797  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2798  av_frame_free(&tmp);
2799  if (ret < 0)
2800  return ret;
2801 
2802  if (fgt->graph) {
2803  AVBPrint reason;
2805  if (need_reinit & AUDIO_CHANGED) {
2806  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2807  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2808  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2809  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2810  }
2811  if (need_reinit & VIDEO_CHANGED) {
2812  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2813  const char *color_space_name = av_color_space_name(frame->colorspace);
2814  const char *color_range_name = av_color_range_name(frame->color_range);
2815  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2816  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2817  unknown_if_null(color_space_name), frame->width, frame->height);
2818  }
2819  if (need_reinit & MATRIX_CHANGED)
2820  av_bprintf(&reason, "display matrix changed, ");
2821  if (need_reinit & HWACCEL_CHANGED)
2822  av_bprintf(&reason, "hwaccel changed, ");
2823  if (reason.len > 1)
2824  reason.str[reason.len - 2] = '\0'; // remove last comma
2825  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2826  }
2827 
2828  ret = configure_filtergraph(fg, fgt);
2829  if (ret < 0) {
2830  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2831  return ret;
2832  }
2833  }
2834 
2835  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2836  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2837  frame->time_base = ifp->time_base;
2838 
2839  if (ifp->displaymatrix_applied)
2841 
2842  fd = frame_data(frame);
2843  if (!fd)
2844  return AVERROR(ENOMEM);
2846 
2849  if (ret < 0) {
2851  if (ret != AVERROR_EOF)
2852  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2853  return ret;
2854  }
2855 
2856  return 0;
2857 }
2858 
2859 static void fg_thread_set_name(const FilterGraph *fg)
2860 {
2861  char name[16];
2862  if (filtergraph_is_simple(fg)) {
2863  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2864  snprintf(name, sizeof(name), "%cf%s",
2866  ofp->name);
2867  } else {
2868  snprintf(name, sizeof(name), "fc%d", fg->index);
2869  }
2870 
2872 }
2873 
2875 {
2876  if (fgt->frame_queue_out) {
2877  AVFrame *frame;
2878  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2879  av_frame_free(&frame);
2881  }
2882 
2883  av_frame_free(&fgt->frame);
2884  av_freep(&fgt->eof_in);
2885  av_freep(&fgt->eof_out);
2886 
2887  avfilter_graph_free(&fgt->graph);
2888 
2889  memset(fgt, 0, sizeof(*fgt));
2890 }
2891 
2892 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2893 {
2894  memset(fgt, 0, sizeof(*fgt));
2895 
2896  fgt->frame = av_frame_alloc();
2897  if (!fgt->frame)
2898  goto fail;
2899 
2900  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2901  if (!fgt->eof_in)
2902  goto fail;
2903 
2904  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2905  if (!fgt->eof_out)
2906  goto fail;
2907 
2909  if (!fgt->frame_queue_out)
2910  goto fail;
2911 
2912  return 0;
2913 
2914 fail:
2915  fg_thread_uninit(fgt);
2916  return AVERROR(ENOMEM);
2917 }
2918 
2919 static int filter_thread(void *arg)
2920 {
2921  FilterGraphPriv *fgp = arg;
2922  FilterGraph *fg = &fgp->fg;
2923 
2924  FilterGraphThread fgt;
2925  int ret = 0, input_status = 0;
2926 
2927  ret = fg_thread_init(&fgt, fg);
2928  if (ret < 0)
2929  goto finish;
2930 
2931  fg_thread_set_name(fg);
2932 
2933  // if we have all input parameters the graph can now be configured
2935  ret = configure_filtergraph(fg, &fgt);
2936  if (ret < 0) {
2937  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2938  av_err2str(ret));
2939  goto finish;
2940  }
2941  }
2942 
2943  while (1) {
2944  InputFilter *ifilter;
2945  InputFilterPriv *ifp;
2946  enum FrameOpaque o;
2947  unsigned input_idx = fgt.next_in;
2948 
2949  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2950  &input_idx, fgt.frame);
2951  if (input_status == AVERROR_EOF) {
2952  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2953  break;
2954  } else if (input_status == AVERROR(EAGAIN)) {
2955  // should only happen when we didn't request any input
2956  av_assert0(input_idx == fg->nb_inputs);
2957  goto read_frames;
2958  }
2959  av_assert0(input_status >= 0);
2960 
2961  o = (intptr_t)fgt.frame->opaque;
2962 
2963  o = (intptr_t)fgt.frame->opaque;
2964 
2965  // message on the control stream
2966  if (input_idx == fg->nb_inputs) {
2967  FilterCommand *fc;
2968 
2969  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2970 
2971  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2972  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2973  fc->all_filters);
2974  av_frame_unref(fgt.frame);
2975  continue;
2976  }
2977 
2978  // we received an input frame or EOF
2979  ifilter = fg->inputs[input_idx];
2980  ifp = ifp_from_ifilter(ifilter);
2981 
2982  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2983  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2984  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2985  !fgt.graph);
2986  } else if (fgt.frame->buf[0]) {
2987  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2988  } else {
2990  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2991  }
2992  av_frame_unref(fgt.frame);
2993  if (ret == AVERROR_EOF) {
2994  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2995  input_idx);
2996  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2997  continue;
2998  }
2999  if (ret < 0)
3000  goto finish;
3001 
3002 read_frames:
3003  // retrieve all newly avalable frames
3004  ret = read_frames(fg, &fgt, fgt.frame);
3005  if (ret == AVERROR_EOF) {
3006  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3007  break;
3008  } else if (ret < 0) {
3009  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3010  av_err2str(ret));
3011  goto finish;
3012  }
3013  }
3014 
3015  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3017 
3018  if (fgt.eof_out[i] || !fgt.graph)
3019  continue;
3020 
3021  ret = fg_output_frame(ofp, &fgt, NULL);
3022  if (ret < 0)
3023  goto finish;
3024  }
3025 
3026 finish:
3027  // EOF is normal termination
3028  if (ret == AVERROR_EOF)
3029  ret = 0;
3030 
3031  fg_thread_uninit(&fgt);
3032 
3033  return ret;
3034 }
3035 
3036 void fg_send_command(FilterGraph *fg, double time, const char *target,
3037  const char *command, const char *arg, int all_filters)
3038 {
3039  FilterGraphPriv *fgp = fgp_from_fg(fg);
3040  AVBufferRef *buf;
3041  FilterCommand *fc;
3042 
3043  fc = av_mallocz(sizeof(*fc));
3044  if (!fc)
3045  return;
3046 
3047  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3048  if (!buf) {
3049  av_freep(&fc);
3050  return;
3051  }
3052 
3053  fc->target = av_strdup(target);
3054  fc->command = av_strdup(command);
3055  fc->arg = av_strdup(arg);
3056  if (!fc->target || !fc->command || !fc->arg) {
3057  av_buffer_unref(&buf);
3058  return;
3059  }
3060 
3061  fc->time = time;
3062  fc->all_filters = all_filters;
3063 
3064  fgp->frame->buf[0] = buf;
3065  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3066 
3067  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3068 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:126
AVSubtitle
Definition: avcodec.h:2232
formats
formats
Definition: signature.h:47
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1802
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:640
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:653
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:210
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:625
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:331
av_clip
#define av_clip
Definition: common.h:100
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:121
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2392
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:329
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:198
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:105
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2237
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2061
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1475
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:786
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:138
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:976
FrameData
Definition: ffmpeg.h:649
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2036
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:153
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:262
OutputFilter::apad
char * apad
Definition: ffmpeg.h:339
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:290
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:949
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:111
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:351
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:260
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1995
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:629
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2726
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:61
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:140
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:247
AVSubtitleRect
Definition: avcodec.h:2205
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2236
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:979
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:173
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:522
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:664
InputFile::index
int index
Definition: ffmpeg.h:453
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:914
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
AVFrame::width
int width
Definition: frame.h:446
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:48
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:55
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2376
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
FilterGraph::index
int index
Definition: ffmpeg.h:349
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:137
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:177
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:232
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1812
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:352
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2723
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AVDictionary
Definition: dict.c:34
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:646
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:239
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:316
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:240
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:587
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2459
FilterGraphPriv
Definition: ffmpeg_filter.c:44
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:594
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1847
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:194
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:408
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:75
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:69
OutputFilterOptions
Definition: ffmpeg.h:280
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:247
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:268
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
finish
static void finish(void)
Definition: movenc.c:373
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3341
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:188
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2874
fail
#define fail()
Definition: checkasm.h:188
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.c:196
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:317
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:264
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:599
val
static double val(void *priv, double ch)
Definition: aeval.c:77
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:191
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:120
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1647
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:748
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1828
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:141
FrameData::tb
AVRational tb
Definition: ffmpeg.h:659
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:217
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:203
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:178
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:336
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:248
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:799
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2598
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:1334
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:229
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:662
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2667
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1237
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:190
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:951
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1090
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:253
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:114
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:353
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:623
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1465
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:116
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:337
InputFilter
Definition: ffmpeg.h:323
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:58
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:876
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, InputFilterOptions *opts)
Definition: ffmpeg_demux.c:985
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:277
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2238
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2892
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:250
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:531
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:259
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1181
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:324
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:234
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1035
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1186
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:223
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:202
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:261
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3281
AVFormatContext
Format I/O context.
Definition: avformat.h:1260
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:633
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
FilterGraphPriv::nb_threads
char * nb_threads
Definition: ffmpeg_filter.c:62
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:332
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1294
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:228
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:134
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:815
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:743
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:204
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1022
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:175
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:416
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts)
Definition: ffmpeg_dec.c:1378
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:146
Decoder
Definition: ffmpeg.h:394
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1033
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:276
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1091
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:762
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:118
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:894
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:476
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:215
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:637
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2326
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:662
mathops.h
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1544
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:666
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1424
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1114
AVFilterGraph
Definition: avfilter.h:760
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:246
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:133
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:652
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:354
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:222
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:800
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:424
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:104
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:269
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:45
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:189
FilterGraph
Definition: ffmpeg.h:347
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1103
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1493
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:264
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:911
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:277
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:765
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2030
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:65
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1957
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:241
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
configure_output_filter
static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1626
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2919
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:144
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:386
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:148
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:311
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:226
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:104
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:908
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:213
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:173
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2162
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:135
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2725
FilterCommand::time
double time
Definition: ffmpeg_filter.c:249
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1392
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:157
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:147
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:555
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1342
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:501
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:428
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:56
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:453
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2235
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:60
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1465
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:180
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1015
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:132
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1821
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1365
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2724
SCH_DEC
#define SCH_DEC(decoder)
Definition: ffmpeg_sched.h:117
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2327
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2729
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:265
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
decoders
Decoder ** decoders
Definition: ffmpeg.c:113
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:193
nb_decoders
int nb_decoders
Definition: ffmpeg.c:114
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:341
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2536
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:804
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2188
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2734
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:948
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:834
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:206
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:250
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:185
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:169
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:117
FPSConvContext
Definition: ffmpeg_filter.c:166
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:109
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:664
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3036
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:50
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:270
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:192
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1761
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:183
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
InputFilterPriv::sub2video
struct InputFilterPriv::@6 sub2video
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:455
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:635
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:608
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:126
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:110
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:307
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:205
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:783
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:897
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2437
AVFilter
Filter definition.
Definition: avfilter.h:201
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2204
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:161
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1049
mid_pred
#define mid_pred
Definition: mathops.h:96
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:325
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:778
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:167
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:469
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1443
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1074
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:233
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2722
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:481
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:981
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:218
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:446
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:754
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:243
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:373
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:611
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:127
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:412
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1089
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:142
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:124
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:437
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:53
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:242
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:168
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:914
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2085
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:236
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:224
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:286
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1115
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:348
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:444
OutputFilter
Definition: ffmpeg.h:328
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2620
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:312
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:225
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:344
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:78
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:414
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2371
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:245
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1042
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:274
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:397
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:130
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:154
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:111
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:332
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:201
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1312
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:56
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:230
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec)
Definition: ffmpeg_filter.c:715
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:91
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1347
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:763
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:460
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:651
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:459
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:210
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:903
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:202
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:123
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:182
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2859
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:157
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1636
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2234
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:246
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:244
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:132
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:343
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:76
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:257
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:184