FFmpeg
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "float.h"
22 
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct GraphMonitorContext {
36  const AVClass *class;
37 
38  int w, h;
39  float opacity;
40  int mode;
41  int flags;
43 
44  int64_t pts;
45  int64_t next_pts;
50  uint8_t bg[4];
52 
53 enum {
54  MODE_QUEUE = 1 << 0,
55  MODE_FCIN = 1 << 1,
56  MODE_FCOUT = 1 << 2,
57  MODE_PTS = 1 << 3,
58  MODE_TIME = 1 << 4,
59  MODE_TB = 1 << 5,
60  MODE_FMT = 1 << 6,
61  MODE_SIZE = 1 << 7,
62  MODE_RATE = 1 << 8,
63 };
64 
65 #define OFFSET(x) offsetof(GraphMonitorContext, x)
66 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
67 
68 static const AVOption graphmonitor_options[] = {
69  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
70  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
71  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
72  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
73  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
74  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
75  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
76  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
77  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
78  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
79  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
80  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
81  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
82  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
83  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
84  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
85  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
86  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
87  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
88  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
89  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
90  { NULL }
91 };
92 
94 {
95  AVFilterLink *outlink = ctx->outputs[0];
96  static const enum AVPixelFormat pix_fmts[] = {
99  };
100  int ret;
101 
103  if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0)
104  return ret;
105 
106  return 0;
107 }
108 
110 {
111  int bg = AV_RN32(s->bg);
112 
113  for (int i = 0; i < out->height; i++)
114  for (int j = 0; j < out->width; j++)
115  AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
116 }
117 
118 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
119 {
120  const uint8_t *font;
121  int font_height;
122  int i;
123 
124  font = avpriv_cga_font, font_height = 8;
125 
126  if (y + 8 >= pic->height ||
127  x + strlen(txt) * 8 >= pic->width)
128  return;
129 
130  for (i = 0; txt[i]; i++) {
131  int char_y, mask;
132 
133  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
134  for (char_y = 0; char_y < font_height; char_y++) {
135  for (mask = 0x80; mask; mask >>= 1) {
136  if (font[txt[i] * font_height + char_y] & mask) {
137  p[0] = color[0];
138  p[1] = color[1];
139  p[2] = color[2];
140  }
141  p += 4;
142  }
143  p += pic->linesize[0] - 8 * 4;
144  }
145  }
146 }
147 
149 {
150  for (int j = 0; j < filter->nb_inputs; j++) {
151  AVFilterLink *l = filter->inputs[j];
152  size_t frames = ff_inlink_queued_frames(l);
153 
154  if (frames)
155  return 1;
156  }
157 
158  for (int j = 0; j < filter->nb_outputs; j++) {
159  AVFilterLink *l = filter->outputs[j];
160  size_t frames = ff_inlink_queued_frames(l);
161 
162  if (frames)
163  return 1;
164  }
165 
166  return 0;
167 }
168 
170  int xpos, int ypos,
171  AVFilterLink *l,
172  size_t frames)
173 {
174  GraphMonitorContext *s = ctx->priv;
175  char buffer[1024] = { 0 };
176 
177  if (s->flags & MODE_FMT) {
178  if (l->type == AVMEDIA_TYPE_VIDEO) {
179  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
181  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
182  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
184  }
185  drawtext(out, xpos, ypos, buffer, s->white);
186  xpos += strlen(buffer) * 8;
187  }
188  if (s->flags & MODE_SIZE) {
189  if (l->type == AVMEDIA_TYPE_VIDEO) {
190  snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
191  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
192  snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
193  }
194  drawtext(out, xpos, ypos, buffer, s->white);
195  xpos += strlen(buffer) * 8;
196  }
197  if (s->flags & MODE_RATE) {
198  if (l->type == AVMEDIA_TYPE_VIDEO) {
199  snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
200  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
201  snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
202  }
203  drawtext(out, xpos, ypos, buffer, s->white);
204  xpos += strlen(buffer) * 8;
205  }
206  if (s->flags & MODE_TB) {
207  snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
208  drawtext(out, xpos, ypos, buffer, s->white);
209  xpos += strlen(buffer) * 8;
210  }
211  if (s->flags & MODE_QUEUE) {
212  snprintf(buffer, sizeof(buffer)-1, " | queue: ");
213  drawtext(out, xpos, ypos, buffer, s->white);
214  xpos += strlen(buffer) * 8;
215  snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
216  drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
217  xpos += strlen(buffer) * 8;
218  }
219  if (s->flags & MODE_FCIN) {
220  snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
221  drawtext(out, xpos, ypos, buffer, s->white);
222  xpos += strlen(buffer) * 8;
223  }
224  if (s->flags & MODE_FCOUT) {
225  snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
226  drawtext(out, xpos, ypos, buffer, s->white);
227  xpos += strlen(buffer) * 8;
228  }
229  if (s->flags & MODE_PTS) {
230  snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
231  drawtext(out, xpos, ypos, buffer, s->white);
232  xpos += strlen(buffer) * 8;
233  }
234  if (s->flags & MODE_TIME) {
235  snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
236  drawtext(out, xpos, ypos, buffer, s->white);
237  xpos += strlen(buffer) * 8;
238  }
239 }
240 
241 static int create_frame(AVFilterContext *ctx, int64_t pts)
242 {
243  GraphMonitorContext *s = ctx->priv;
244  AVFilterLink *outlink = ctx->outputs[0];
245  AVFrame *out;
246  int xpos, ypos = 0;
247 
248  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
249  if (!out)
250  return AVERROR(ENOMEM);
251 
252  clear_image(s, out, outlink);
253 
254  for (int i = 0; i < ctx->graph->nb_filters; i++) {
255  AVFilterContext *filter = ctx->graph->filters[i];
256  char buffer[1024] = { 0 };
257 
258  if (s->mode && !filter_have_queued(filter))
259  continue;
260 
261  xpos = 0;
262  drawtext(out, xpos, ypos, filter->name, s->white);
263  xpos += strlen(filter->name) * 8 + 10;
264  drawtext(out, xpos, ypos, filter->filter->name, s->white);
265  ypos += 10;
266  for (int j = 0; j < filter->nb_inputs; j++) {
267  AVFilterLink *l = filter->inputs[j];
268  size_t frames = ff_inlink_queued_frames(l);
269 
270  if (s->mode && !frames)
271  continue;
272 
273  xpos = 10;
274  snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
275  drawtext(out, xpos, ypos, buffer, s->white);
276  xpos += strlen(buffer) * 8;
277  drawtext(out, xpos, ypos, l->src->name, s->white);
278  xpos += strlen(l->src->name) * 8 + 10;
279  draw_items(ctx, out, xpos, ypos, l, frames);
280  ypos += 10;
281  }
282 
283  ypos += 2;
284  for (int j = 0; j < filter->nb_outputs; j++) {
285  AVFilterLink *l = filter->outputs[j];
286  size_t frames = ff_inlink_queued_frames(l);
287 
288  if (s->mode && !frames)
289  continue;
290 
291  xpos = 10;
292  snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
293  drawtext(out, xpos, ypos, buffer, s->white);
294  xpos += strlen(buffer) * 8;
295  drawtext(out, xpos, ypos, l->dst->name, s->white);
296  xpos += strlen(l->dst->name) * 8 + 10;
297  draw_items(ctx, out, xpos, ypos, l, frames);
298  ypos += 10;
299  }
300  ypos += 5;
301  }
302 
303  out->pts = pts;
304  s->pts = pts + 1;
305  return ff_filter_frame(outlink, out);
306 }
307 
309 {
310  GraphMonitorContext *s = ctx->priv;
311  AVFilterLink *inlink = ctx->inputs[0];
312  AVFilterLink *outlink = ctx->outputs[0];
313  int64_t pts = AV_NOPTS_VALUE;
314 
316 
318  AVFrame *frame = NULL;
319  int ret;
320 
322  if (ret < 0)
323  return ret;
324  if (ret > 0) {
325  pts = frame->pts;
327  }
328  }
329 
330  if (pts != AV_NOPTS_VALUE) {
331  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
332  if (s->pts == AV_NOPTS_VALUE)
333  s->pts = pts;
334  s->next_pts = pts;
335  }
336 
337  if (s->pts < s->next_pts && ff_outlink_frame_wanted(outlink))
338  return create_frame(ctx, s->pts);
339 
342 
343  return FFERROR_NOT_READY;
344 }
345 
346 static int config_output(AVFilterLink *outlink)
347 {
348  GraphMonitorContext *s = outlink->src->priv;
349 
350  s->bg[3] = 255 * s->opacity;
351  s->white[0] = s->white[1] = s->white[2] = 255;
352  s->yellow[0] = s->yellow[1] = 255;
353  s->red[0] = 255;
354  s->green[1] = 255;
355  s->pts = AV_NOPTS_VALUE;
356  s->next_pts = AV_NOPTS_VALUE;
357  outlink->w = s->w;
358  outlink->h = s->h;
359  outlink->sample_aspect_ratio = (AVRational){1,1};
360  outlink->frame_rate = s->frame_rate;
361  outlink->time_base = av_inv_q(s->frame_rate);
362 
363  return 0;
364 }
365 
366 #if CONFIG_GRAPHMONITOR_FILTER
367 
368 AVFILTER_DEFINE_CLASS(graphmonitor);
369 
370 static const AVFilterPad graphmonitor_inputs[] = {
371  {
372  .name = "default",
373  .type = AVMEDIA_TYPE_VIDEO,
374  },
375  { NULL }
376 };
377 
378 static const AVFilterPad graphmonitor_outputs[] = {
379  {
380  .name = "default",
381  .type = AVMEDIA_TYPE_VIDEO,
382  .config_props = config_output,
383  },
384  { NULL }
385 };
386 
388  .name = "graphmonitor",
389  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
390  .priv_size = sizeof(GraphMonitorContext),
391  .priv_class = &graphmonitor_class,
393  .activate = activate,
394  .inputs = graphmonitor_inputs,
395  .outputs = graphmonitor_outputs,
396 };
397 
398 #endif // CONFIG_GRAPHMONITOR_FILTER
399 
400 #if CONFIG_AGRAPHMONITOR_FILTER
401 
402 #define agraphmonitor_options graphmonitor_options
403 AVFILTER_DEFINE_CLASS(agraphmonitor);
404 
405 static const AVFilterPad agraphmonitor_inputs[] = {
406  {
407  .name = "default",
408  .type = AVMEDIA_TYPE_AUDIO,
409  },
410  { NULL }
411 };
412 
413 static const AVFilterPad agraphmonitor_outputs[] = {
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .config_props = config_output,
418  },
419  { NULL }
420 };
421 
423  .name = "agraphmonitor",
424  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
425  .priv_size = sizeof(GraphMonitorContext),
426  .priv_class = &agraphmonitor_class,
428  .activate = activate,
429  .inputs = agraphmonitor_inputs,
430  .outputs = agraphmonitor_outputs,
431 };
432 #endif // CONFIG_AGRAPHMONITOR_FILTER
GraphMonitorContext::mode
int mode
Definition: f_graphmonitor.c:40
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
MODE_TB
@ MODE_TB
Definition: f_graphmonitor.c:59
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
MODE_SIZE
@ MODE_SIZE
Definition: f_graphmonitor.c:61
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:582
MODE_RATE
@ MODE_RATE
Definition: f_graphmonitor.c:62
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:236
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
GraphMonitorContext::opacity
float opacity
Definition: f_graphmonitor.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
AVFrame::width
int width
Definition: frame.h:358
w
uint8_t w
Definition: llviddspenc.c:38
OFFSET
#define OFFSET(x)
Definition: f_graphmonitor.c:65
AVOption
AVOption.
Definition: opt.h:246
float.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
create_frame
static int create_frame(AVFilterContext *ctx, int64_t pts)
Definition: f_graphmonitor.c:241
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
ff_avf_agraphmonitor
AVFilter ff_avf_agraphmonitor
formats.h
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1476
GraphMonitorContext::bg
uint8_t bg[4]
Definition: f_graphmonitor.c:50
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:353
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
pts
static int64_t pts
Definition: transcode_aac.c:647
MODE_FCOUT
@ MODE_FCOUT
Definition: f_graphmonitor.c:56
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: f_graphmonitor.c:93
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
GraphMonitorContext::green
uint8_t green[4]
Definition: f_graphmonitor.c:49
MODE_FCIN
@ MODE_FCIN
Definition: f_graphmonitor.c:55
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:484
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
GraphMonitorContext::white
uint8_t white[4]
Definition: f_graphmonitor.c:46
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
MODE_TIME
@ MODE_TIME
Definition: f_graphmonitor.c:58
GraphMonitorContext::pts
int64_t pts
Definition: f_graphmonitor.c:44
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
GraphMonitorContext::red
uint8_t red[4]
Definition: f_graphmonitor.c:48
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:233
AV_RN32
#define AV_RN32(p)
Definition: intreadwrite.h:364
AVFilterContext::name
char * name
name of this filter instance
Definition: avfilter.h:343
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
VF
#define VF
Definition: f_graphmonitor.c:66
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1446
activate
static int activate(AVFilterContext *ctx)
Definition: f_graphmonitor.c:308
eval.h
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
GraphMonitorContext::next_pts
int64_t next_pts
Definition: f_graphmonitor.c:45
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_vf_graphmonitor
AVFilter ff_vf_graphmonitor
GraphMonitorContext
Definition: f_graphmonitor.c:35
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
xga_font_data.h
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
Definition: f_graphmonitor.c:118
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:314
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
uint8_t
uint8_t
Definition: audio_convert.c:194
graphmonitor_options
static const AVOption graphmonitor_options[]
Definition: f_graphmonitor.c:68
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
GraphMonitorContext::frame_rate
AVRational frame_rate
Definition: f_graphmonitor.c:42
AVFilter
Filter definition.
Definition: avfilter.h:144
MODE_QUEUE
@ MODE_QUEUE
Definition: f_graphmonitor.c:54
ret
ret
Definition: filter_design.txt:187
filter_have_queued
static int filter_have_queued(AVFilterContext *filter)
Definition: f_graphmonitor.c:148
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
GraphMonitorContext::w
int w
Definition: f_graphmonitor.c:38
clear_image
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
Definition: f_graphmonitor.c:109
config_output
static int config_output(AVFilterLink *outlink)
Definition: f_graphmonitor.c:346
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:264
AVFrame::height
int height
Definition: frame.h:358
GraphMonitorContext::h
int h
Definition: f_graphmonitor.c:38
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
MODE_PTS
@ MODE_PTS
Definition: f_graphmonitor.c:57
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
MODE_FMT
@ MODE_FMT
Definition: f_graphmonitor.c:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
GraphMonitorContext::flags
int flags
Definition: f_graphmonitor.c:41
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:222
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
GraphMonitorContext::yellow
uint8_t yellow[4]
Definition: f_graphmonitor.c:47
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
snprintf
#define snprintf
Definition: snprintf.h:34
draw_items
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
Definition: f_graphmonitor.c:169
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465