FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "float.h"
22 
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct GraphMonitorContext {
36  const AVClass *class;
37 
38  int w, h;
39  float opacity;
40  int mode;
41  int flags;
43 
44  int64_t pts;
49  uint8_t bg[4];
51 
52 enum {
53  MODE_QUEUE = 1 << 0,
54  MODE_FCIN = 1 << 1,
55  MODE_FCOUT = 1 << 2,
56  MODE_PTS = 1 << 3,
57  MODE_TIME = 1 << 4,
58  MODE_TB = 1 << 5,
59  MODE_FMT = 1 << 6,
60  MODE_SIZE = 1 << 7,
61  MODE_RATE = 1 << 8,
62 };
63 
64 #define OFFSET(x) offsetof(GraphMonitorContext, x)
65 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
66 
67 static const AVOption graphmonitor_options[] = {
68  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
69  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
70  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
71  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
72  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
73  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
74  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
75  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
76  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
77  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
78  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
79  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
80  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
81  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
82  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
83  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
84  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
85  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
86  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
87  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
88  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
89  { NULL }
90 };
91 
93 {
94  AVFilterLink *outlink = ctx->outputs[0];
95  static const enum AVPixelFormat pix_fmts[] = {
98  };
99  int ret;
100 
101  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
102  if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0)
103  return ret;
104 
105  return 0;
106 }
107 
109 {
110  int bg = AV_RN32(s->bg);
111 
112  for (int i = 0; i < out->height; i++)
113  for (int j = 0; j < out->width; j++)
114  AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
115 }
116 
117 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
118 {
119  const uint8_t *font;
120  int font_height;
121  int i;
122 
123  font = avpriv_cga_font, font_height = 8;
124 
125  if (y + 8 >= pic->height ||
126  x + strlen(txt) * 8 >= pic->width)
127  return;
128 
129  for (i = 0; txt[i]; i++) {
130  int char_y, mask;
131 
132  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
133  for (char_y = 0; char_y < font_height; char_y++) {
134  for (mask = 0x80; mask; mask >>= 1) {
135  if (font[txt[i] * font_height + char_y] & mask) {
136  p[0] = color[0];
137  p[1] = color[1];
138  p[2] = color[2];
139  }
140  p += 4;
141  }
142  p += pic->linesize[0] - 8 * 4;
143  }
144  }
145 }
146 
148 {
149  for (int j = 0; j < filter->nb_inputs; j++) {
150  AVFilterLink *l = filter->inputs[j];
151  size_t frames = ff_inlink_queued_frames(l);
152 
153  if (frames)
154  return 1;
155  }
156 
157  for (int j = 0; j < filter->nb_outputs; j++) {
158  AVFilterLink *l = filter->outputs[j];
159  size_t frames = ff_inlink_queued_frames(l);
160 
161  if (frames)
162  return 1;
163  }
164 
165  return 0;
166 }
167 
169  int xpos, int ypos,
170  AVFilterLink *l,
171  size_t frames)
172 {
173  GraphMonitorContext *s = ctx->priv;
174  char buffer[1024] = { 0 };
175 
176  if (s->flags & MODE_FMT) {
177  if (l->type == AVMEDIA_TYPE_VIDEO) {
178  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
180  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
181  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
183  }
184  drawtext(out, xpos, ypos, buffer, s->white);
185  xpos += strlen(buffer) * 8;
186  }
187  if (s->flags & MODE_SIZE) {
188  if (l->type == AVMEDIA_TYPE_VIDEO) {
189  snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
190  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
191  snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
192  }
193  drawtext(out, xpos, ypos, buffer, s->white);
194  xpos += strlen(buffer) * 8;
195  }
196  if (s->flags & MODE_RATE) {
197  if (l->type == AVMEDIA_TYPE_VIDEO) {
198  snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
199  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
200  snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
201  }
202  drawtext(out, xpos, ypos, buffer, s->white);
203  xpos += strlen(buffer) * 8;
204  }
205  if (s->flags & MODE_TB) {
206  snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
207  drawtext(out, xpos, ypos, buffer, s->white);
208  xpos += strlen(buffer) * 8;
209  }
210  if (s->flags & MODE_QUEUE) {
211  snprintf(buffer, sizeof(buffer)-1, " | queue: ");
212  drawtext(out, xpos, ypos, buffer, s->white);
213  xpos += strlen(buffer) * 8;
214  snprintf(buffer, sizeof(buffer)-1, "%"PRId64, frames);
215  drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
216  xpos += strlen(buffer) * 8;
217  }
218  if (s->flags & MODE_FCIN) {
219  snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
220  drawtext(out, xpos, ypos, buffer, s->white);
221  xpos += strlen(buffer) * 8;
222  }
223  if (s->flags & MODE_FCOUT) {
224  snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
225  drawtext(out, xpos, ypos, buffer, s->white);
226  xpos += strlen(buffer) * 8;
227  }
228  if (s->flags & MODE_PTS) {
229  snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
230  drawtext(out, xpos, ypos, buffer, s->white);
231  xpos += strlen(buffer) * 8;
232  }
233  if (s->flags & MODE_TIME) {
234  snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
235  drawtext(out, xpos, ypos, buffer, s->white);
236  xpos += strlen(buffer) * 8;
237  }
238 }
239 
240 static int create_frame(AVFilterContext *ctx, int64_t pts)
241 {
242  GraphMonitorContext *s = ctx->priv;
243  AVFilterLink *outlink = ctx->outputs[0];
244  AVFrame *out;
245  int xpos, ypos = 0;
246 
247  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
248  if (!out)
249  return AVERROR(ENOMEM);
250 
251  clear_image(s, out, outlink);
252 
253  for (int i = 0; i < ctx->graph->nb_filters; i++) {
254  AVFilterContext *filter = ctx->graph->filters[i];
255  char buffer[1024] = { 0 };
256 
257  if (s->mode && !filter_have_queued(filter))
258  continue;
259 
260  xpos = 0;
261  drawtext(out, xpos, ypos, filter->name, s->white);
262  xpos += strlen(filter->name) * 8 + 10;
263  drawtext(out, xpos, ypos, filter->filter->name, s->white);
264  ypos += 10;
265  for (int j = 0; j < filter->nb_inputs; j++) {
266  AVFilterLink *l = filter->inputs[j];
267  size_t frames = ff_inlink_queued_frames(l);
268 
269  if (s->mode && !frames)
270  continue;
271 
272  xpos = 10;
273  snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
274  drawtext(out, xpos, ypos, buffer, s->white);
275  xpos += strlen(buffer) * 8;
276  drawtext(out, xpos, ypos, l->src->name, s->white);
277  xpos += strlen(l->src->name) * 8 + 10;
278  draw_items(ctx, out, xpos, ypos, l, frames);
279  ypos += 10;
280  }
281 
282  ypos += 2;
283  for (int j = 0; j < filter->nb_outputs; j++) {
284  AVFilterLink *l = filter->outputs[j];
285  size_t frames = ff_inlink_queued_frames(l);
286 
287  if (s->mode && !frames)
288  continue;
289 
290  xpos = 10;
291  snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
292  drawtext(out, xpos, ypos, buffer, s->white);
293  xpos += strlen(buffer) * 8;
294  drawtext(out, xpos, ypos, l->dst->name, s->white);
295  xpos += strlen(l->dst->name) * 8 + 10;
296  draw_items(ctx, out, xpos, ypos, l, frames);
297  ypos += 10;
298  }
299  ypos += 5;
300  }
301 
302  out->pts = pts;
303  s->pts = pts;
304  return ff_filter_frame(outlink, out);
305 }
306 
308 {
309  GraphMonitorContext *s = ctx->priv;
310  AVFilterLink *inlink = ctx->inputs[0];
311  AVFilterLink *outlink = ctx->outputs[0];
312  int64_t pts = AV_NOPTS_VALUE;
313 
314  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
315 
316  if (ff_inlink_queued_frames(inlink)) {
317  AVFrame *frame = NULL;
318  int ret;
319 
320  ret = ff_inlink_consume_frame(inlink, &frame);
321  if (ret < 0)
322  return ret;
323  if (ret > 0) {
324  pts = frame->pts;
325  av_frame_free(&frame);
326  }
327  }
328 
329  if (pts != AV_NOPTS_VALUE) {
330  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
331  if (s->pts < pts && ff_outlink_frame_wanted(outlink))
332  return create_frame(ctx, pts);
333  }
334 
335  FF_FILTER_FORWARD_STATUS(inlink, outlink);
336  FF_FILTER_FORWARD_WANTED(outlink, inlink);
337 
338  return FFERROR_NOT_READY;
339 }
340 
341 static int config_output(AVFilterLink *outlink)
342 {
343  GraphMonitorContext *s = outlink->src->priv;
344 
345  s->bg[3] = 255 * s->opacity;
346  s->white[0] = s->white[1] = s->white[2] = 255;
347  s->yellow[0] = s->yellow[1] = 255;
348  s->red[0] = 255;
349  s->green[1] = 255;
350  outlink->w = s->w;
351  outlink->h = s->h;
352  outlink->sample_aspect_ratio = (AVRational){1,1};
353  outlink->frame_rate = s->frame_rate;
354  outlink->time_base = av_inv_q(s->frame_rate);
355 
356  return 0;
357 }
358 
359 #if CONFIG_GRAPHMONITOR_FILTER
360 
361 AVFILTER_DEFINE_CLASS(graphmonitor);
362 
363 static const AVFilterPad graphmonitor_inputs[] = {
364  {
365  .name = "default",
366  .type = AVMEDIA_TYPE_VIDEO,
367  },
368  { NULL }
369 };
370 
371 static const AVFilterPad graphmonitor_outputs[] = {
372  {
373  .name = "default",
374  .type = AVMEDIA_TYPE_VIDEO,
375  .config_props = config_output,
376  },
377  { NULL }
378 };
379 
381  .name = "graphmonitor",
382  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
383  .priv_size = sizeof(GraphMonitorContext),
384  .priv_class = &graphmonitor_class,
386  .activate = activate,
387  .inputs = graphmonitor_inputs,
388  .outputs = graphmonitor_outputs,
389 };
390 
391 #endif // CONFIG_GRAPHMONITOR_FILTER
392 
393 #if CONFIG_AGRAPHMONITOR_FILTER
394 
395 #define agraphmonitor_options graphmonitor_options
396 AVFILTER_DEFINE_CLASS(agraphmonitor);
397 
398 static const AVFilterPad agraphmonitor_inputs[] = {
399  {
400  .name = "default",
401  .type = AVMEDIA_TYPE_AUDIO,
402  },
403  { NULL }
404 };
405 
406 static const AVFilterPad agraphmonitor_outputs[] = {
407  {
408  .name = "default",
409  .type = AVMEDIA_TYPE_VIDEO,
410  .config_props = config_output,
411  },
412  { NULL }
413 };
414 
416  .name = "agraphmonitor",
417  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
418  .priv_size = sizeof(GraphMonitorContext),
419  .priv_class = &agraphmonitor_class,
421  .activate = activate,
422  .inputs = agraphmonitor_inputs,
423  .outputs = agraphmonitor_outputs,
424 };
425 #endif // CONFIG_AGRAPHMONITOR_FILTER
AVFilterContext ** filters
Definition: avfilter.h:842
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1481
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
int num
Numerator.
Definition: rational.h:59
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFilter ff_avf_agraphmonitor
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
struct AVFilterGraph * graph
filtergraph this filter belongs to
Definition: avfilter.h:355
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
char * name
name of this filter instance
Definition: avfilter.h:343
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:153
uint8_t
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
static AVFrame * frame
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
int width
Definition: frame.h:284
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
static const uint16_t mask[17]
Definition: lzw.c:38
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
#define FF_FILTER_FORWARD_WANTED(outlink, inlink)
Forward the frame_wanted_out flag from an output link to an input link.
Definition: filters.h:254
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
uint8_t w
Definition: llviddspenc.c:38
static int config_output(AVFilterLink *outlink)
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
int frames
Definition: movenc.c:65
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVFilter ff_vf_graphmonitor
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
static const AVOption graphmonitor_options[]
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
static int filter_have_queued(AVFilterContext *filter)
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:236
const char * name
Filter name.
Definition: avfilter.h:148
unsigned nb_filters
Definition: avfilter.h:843
#define snprintf
Definition: snprintf.h:34
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1451
offset must point to two consecutive integers
Definition: opt.h:233
#define AV_RN32(p)
Definition: intreadwrite.h:364
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define FF_FILTER_FORWARD_STATUS(inlink, outlink)
Acknowledge the status on an input link and forward it to an output link.
Definition: filters.h:226
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:596
#define VF
static int activate(AVFilterContext *ctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
int den
Denominator.
Definition: rational.h:60
static int query_formats(AVFilterContext *ctx)
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
#define OFFSET(x)
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:284
FILE * out
Definition: movenc.c:54
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2362
internal API functions
static int create_frame(AVFilterContext *ctx, int64_t pts)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:102
CGA/EGA/VGA ROM font data.
simple arithmetic expression evaluator