FFmpeg
setpts.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2008 Victor Paesa
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video presentation timestamp (PTS) modification filter
25  */
26 
27 #include "config_components.h"
28 
29 #include <inttypes.h>
30 
31 #include "libavutil/eval.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/time.h"
36 #include "audio.h"
37 #include "avfilter.h"
38 #include "filters.h"
39 #include "internal.h"
40 #include "video.h"
41 
42 static const char *const var_names[] = {
43  "FRAME_RATE", ///< defined only for constant frame-rate video
44  "INTERLACED", ///< tell if the current frame is interlaced
45  "N", ///< frame / sample number (starting at zero)
46  "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
47  "NB_SAMPLES", ///< number of samples in the current frame (only audio)
48 #if FF_API_FRAME_PKT
49  "POS", ///< original position in the file of the frame
50 #endif
51  "PREV_INPTS", ///< previous input PTS
52  "PREV_INT", ///< previous input time in seconds
53  "PREV_OUTPTS", ///< previous output PTS
54  "PREV_OUTT", ///< previous output time in seconds
55  "PTS", ///< original pts in the file of the frame
56  "SAMPLE_RATE", ///< sample rate (only audio)
57  "STARTPTS", ///< PTS at start of movie
58  "STARTT", ///< time at start of movie
59  "T", ///< original time in the file of the frame
60  "TB", ///< timebase
61  "RTCTIME", ///< wallclock (RTC) time in micro seconds
62  "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
63  "S", // Number of samples in the current frame
64  "SR", // Audio sample rate
65  "FR", ///< defined only for constant frame-rate video
66  "T_CHANGE", ///< time of first frame after latest command was applied
67  NULL
68 };
69 
70 enum var_name {
76 #if FF_API_FRAME_PKT
77  VAR_POS,
78 #endif
96 };
97 
98 typedef struct SetPTSContext {
99  const AVClass *class;
100  char *expr_str;
104 } SetPTSContext;
105 
106 #define V(name_) \
107  setpts->var_values[VAR_##name_]
108 
110 {
111  SetPTSContext *setpts = ctx->priv;
112  int ret;
113 
114  if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
115  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
116  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
117  return ret;
118  }
119 
120  V(N) = 0.0;
121  V(S) = 0.0;
122  V(PREV_INPTS) = NAN;
123  V(PREV_INT) = NAN;
124  V(PREV_OUTPTS) = NAN;
125  V(PREV_OUTT) = NAN;
126  V(STARTPTS) = NAN;
127  V(STARTT) = NAN;
128  V(T_CHANGE) = NAN;
129  return 0;
130 }
131 
133 {
134  AVFilterContext *ctx = inlink->dst;
135  SetPTSContext *setpts = ctx->priv;
136 
137  setpts->type = inlink->type;
138  V(TB) = av_q2d(inlink->time_base);
139  V(RTCSTART) = av_gettime();
140 
141  V(SR) = V(SAMPLE_RATE) =
142  setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
143 
144  V(FRAME_RATE) = V(FR) =
145  inlink->frame_rate.num && inlink->frame_rate.den ?
146  av_q2d(inlink->frame_rate) : NAN;
147 
148  av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
149  V(TB), V(FRAME_RATE), V(SAMPLE_RATE));
150  return 0;
151 }
152 
153 static int config_output_video(AVFilterLink *outlink)
154 {
155  outlink->frame_rate = (AVRational){ 1, 0 };
156 
157  return 0;
158 }
159 
160 #define BUF_SIZE 64
161 
162 static inline char *double2int64str(char *buf, double v)
163 {
164  if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
165  else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
166  return buf;
167 }
168 
169 static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
170 {
171  if (isnan(V(STARTPTS))) {
172  V(STARTPTS) = TS2D(pts);
173  V(STARTT ) = TS2T(pts, inlink->time_base);
174  }
175  if (isnan(V(T_CHANGE))) {
176  V(T_CHANGE) = TS2T(pts, inlink->time_base);
177  }
178  V(PTS ) = TS2D(pts);
179  V(T ) = TS2T(pts, inlink->time_base);
180 #if FF_API_FRAME_PKT
182  V(POS ) = !frame || frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
184 #endif
185  V(RTCTIME ) = av_gettime();
186 
187  if (frame) {
188  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
189  V(INTERLACED) = !!(frame->flags & AV_FRAME_FLAG_INTERLACED);
190  } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
191  V(S) = frame->nb_samples;
192  V(NB_SAMPLES) = frame->nb_samples;
193  }
194  }
195 
196  return av_expr_eval(setpts->expr, setpts->var_values, NULL);
197 }
198 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
199 
201 {
202  SetPTSContext *setpts = inlink->dst->priv;
203  int64_t in_pts = frame->pts;
204  double d;
205 
206  d = eval_pts(setpts, inlink, frame, frame->pts);
207  frame->pts = D2TS(d);
208  frame->duration = 0;
209 
210  av_log(inlink->dst, AV_LOG_TRACE,
211  "N:%"PRId64" PTS:%s T:%f",
212  (int64_t)V(N), d2istr(V(PTS)), V(T));
213  switch (inlink->type) {
214  case AVMEDIA_TYPE_VIDEO:
215  av_log(inlink->dst, AV_LOG_TRACE, " INTERLACED:%"PRId64,
216  (int64_t)V(INTERLACED));
217  break;
218  case AVMEDIA_TYPE_AUDIO:
219  av_log(inlink->dst, AV_LOG_TRACE, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
220  (int64_t)V(NB_SAMPLES),
221  (int64_t)V(NB_CONSUMED_SAMPLES));
222  break;
223  }
224  av_log(inlink->dst, AV_LOG_TRACE, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
225 
226  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
227  V(N) += 1.0;
228  } else {
229  V(N) += frame->nb_samples;
230  }
231 
232  V(PREV_INPTS ) = TS2D(in_pts);
233  V(PREV_INT ) = TS2T(in_pts, inlink->time_base);
234  V(PREV_OUTPTS) = TS2D(frame->pts);
235  V(PREV_OUTT) = TS2T(frame->pts, inlink->time_base);
236  if (setpts->type == AVMEDIA_TYPE_AUDIO) {
237  V(NB_CONSUMED_SAMPLES) += frame->nb_samples;
238  }
239  return ff_filter_frame(inlink->dst->outputs[0], frame);
240 }
241 
243 {
244  SetPTSContext *setpts = ctx->priv;
245  AVFilterLink *inlink = ctx->inputs[0];
246  AVFilterLink *outlink = ctx->outputs[0];
247  AVFrame *in;
248  int status;
249  int64_t pts;
250  int ret;
251 
253 
255  if (ret < 0)
256  return ret;
257  if (ret > 0)
258  return filter_frame(inlink, in);
259 
261  double d = eval_pts(setpts, inlink, NULL, pts);
262 
263  av_log(ctx, AV_LOG_TRACE, "N:EOF PTS:%s T:%f -> PTS:%s T:%f\n",
264  d2istr(V(PTS)), V(T), d2istr(d), TS2T(d, inlink->time_base));
265  ff_outlink_set_status(outlink, status, D2TS(d));
266  return 0;
267  }
268 
270 
271  return FFERROR_NOT_READY;
272 }
273 
275 {
276  SetPTSContext *setpts = ctx->priv;
277  av_expr_free(setpts->expr);
278  setpts->expr = NULL;
279 }
280 
281 static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg,
282  char *res, int res_len, int flags)
283 {
284  SetPTSContext *setpts = ctx->priv;
285  AVExpr *new_expr;
286  int ret;
287 
288  ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
289 
290  if (ret < 0)
291  return ret;
292 
293  if (!strcmp(cmd, "expr")) {
294  ret = av_expr_parse(&new_expr, arg, var_names, NULL, NULL, NULL, NULL, 0, ctx);
295  // Only free and replace previous expression if new one succeeds,
296  // otherwise defensively keep everything intact even if reporting an error.
297  if (ret < 0) {
298  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", arg);
299  } else {
300  av_expr_free(setpts->expr);
301  setpts->expr = new_expr;
302  V(T_CHANGE) = NAN;
303  }
304  } else {
305  ret = AVERROR(EINVAL);
306  }
307 
308  return ret;
309 }
310 #undef V
311 
312 #define OFFSET(x) offsetof(SetPTSContext, x)
313 #define V AV_OPT_FLAG_VIDEO_PARAM
314 #define A AV_OPT_FLAG_AUDIO_PARAM
315 #define R AV_OPT_FLAG_RUNTIME_PARAM
316 #define F AV_OPT_FLAG_FILTERING_PARAM
317 
318 #if CONFIG_SETPTS_FILTER
319 static const AVOption setpts_options[] = {
320  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = V|F|R },
321  { NULL }
322 };
323 AVFILTER_DEFINE_CLASS(setpts);
324 
325 static const AVFilterPad avfilter_vf_setpts_inputs[] = {
326  {
327  .name = "default",
328  .type = AVMEDIA_TYPE_VIDEO,
329  .config_props = config_input,
330  },
331 };
332 
333 static const AVFilterPad outputs_video[] = {
334  {
335  .name = "default",
336  .type = AVMEDIA_TYPE_VIDEO,
337  .config_props = config_output_video,
338  },
339 };
340 
341 const AVFilter ff_vf_setpts = {
342  .name = "setpts",
343  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
344  .init = init,
345  .activate = activate,
346  .uninit = uninit,
347  .process_command = process_command,
349 
350  .priv_size = sizeof(SetPTSContext),
351  .priv_class = &setpts_class,
352 
353  FILTER_INPUTS(avfilter_vf_setpts_inputs),
354  FILTER_OUTPUTS(outputs_video),
355 };
356 #endif /* CONFIG_SETPTS_FILTER */
357 
358 #if CONFIG_ASETPTS_FILTER
359 
360 static const AVOption asetpts_options[] = {
361  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = A|F|R },
362  { NULL }
363 };
364 AVFILTER_DEFINE_CLASS(asetpts);
365 
366 static const AVFilterPad asetpts_inputs[] = {
367  {
368  .name = "default",
369  .type = AVMEDIA_TYPE_AUDIO,
370  .config_props = config_input,
371  },
372 };
373 
374 const AVFilter ff_af_asetpts = {
375  .name = "asetpts",
376  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
377  .init = init,
378  .activate = activate,
379  .uninit = uninit,
380  .process_command = process_command,
381  .priv_size = sizeof(SetPTSContext),
382  .priv_class = &asetpts_class,
384  FILTER_INPUTS(asetpts_inputs),
386 };
387 #endif /* CONFIG_ASETPTS_FILTER */
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
var_name
var_name
Definition: noise.c:47
BUF_SIZE
#define BUF_SIZE
Definition: setpts.c:160
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
SetPTSContext
Definition: setpts.c:98
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
D2TS
#define D2TS(d)
Definition: internal.h:257
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
VAR_STARTT
@ VAR_STARTT
Definition: setpts.c:86
AVOption
AVOption.
Definition: opt.h:346
VAR_FRAME_RATE
@ VAR_FRAME_RATE
Definition: setpts.c:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
VAR_SAMPLE_RATE
@ VAR_SAMPLE_RATE
Definition: setpts.c:84
mathematics.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
VAR_N
@ VAR_N
Definition: setpts.c:73
video.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: setpts.c:200
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1442
VAR_PREV_INT
@ VAR_PREV_INT
Definition: setpts.c:80
VAR_RTCSTART
@ VAR_RTCSTART
Definition: setpts.c:90
SetPTSContext::var_values
double var_values[VAR_VARS_NB]
Definition: setpts.c:102
pts
static int64_t pts
Definition: transcode_aac.c:644
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
OFFSET
#define OFFSET(x)
Definition: setpts.c:312
T
#define T(x)
Definition: vpx_arith.h:29
TS2T
#define TS2T(ts, tb)
Definition: internal.h:259
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: setpts.c:109
activate
static int activate(AVFilterContext *ctx)
Definition: setpts.c:242
VAR_SR
@ VAR_SR
Definition: setpts.c:92
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
eval_pts
static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
Definition: setpts.c:169
filters.h
VAR_PREV_INPTS
@ VAR_PREV_INPTS
Definition: setpts.c:79
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
AVExpr
Definition: eval.c:158
NAN
#define NAN
Definition: mathematics.h:115
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
TS2D
#define TS2D(ts)
Definition: internal.h:258
NULL
#define NULL
Definition: coverity.c:32
d2istr
#define d2istr(v)
Definition: setpts.c:198
VAR_POS
@ VAR_POS
Definition: noise.c:56
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
VAR_RTCTIME
@ VAR_RTCTIME
Definition: setpts.c:89
VAR_PTS
@ VAR_PTS
Definition: setpts.c:83
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:33
time.h
SetPTSContext::expr_str
char * expr_str
Definition: setpts.c:100
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1389
R
#define R
Definition: setpts.c:315
eval.h
POS
#define POS(c_idx, x, y)
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
INTERLACED
#define INTERLACED
Definition: a64multienc.c:40
VAR_T_CHANGE
@ VAR_T_CHANGE
Definition: setpts.c:94
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:887
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
VAR_NB_CONSUMED_SAMPLES
@ VAR_NB_CONSUMED_SAMPLES
Definition: setpts.c:74
N
#define N
Definition: af_mcompand.c:54
TB
#define TB(i)
Definition: prosumer.c:204
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:323
ff_vf_setpts
const AVFilter ff_vf_setpts
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: setpts.c:281
A
#define A
Definition: setpts.c:314
internal.h
F
#define F
Definition: setpts.c:316
VAR_TB
@ VAR_TB
Definition: setpts.c:88
VAR_FR
@ VAR_FR
Definition: setpts.c:93
double2int64str
static char * double2int64str(char *buf, double v)
Definition: setpts.c:162
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
VAR_PREV_OUTT
@ VAR_PREV_OUTT
Definition: setpts.c:82
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
VAR_PREV_OUTPTS
@ VAR_PREV_OUTPTS
Definition: setpts.c:81
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
config_output_video
static int config_output_video(AVFilterLink *outlink)
Definition: setpts.c:153
status
ov_status_e status
Definition: dnn_backend_openvino.c:121
SetPTSContext::expr
AVExpr * expr
Definition: setpts.c:101
avfilter.h
VAR_NB_SAMPLES
@ VAR_NB_SAMPLES
Definition: setpts.c:75
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
var_names
static const char *const var_names[]
Definition: setpts.c:42
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
VAR_STARTPTS
@ VAR_STARTPTS
Definition: setpts.c:85
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
VAR_S
@ VAR_S
Definition: setpts.c:91
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
VAR_VARS_NB
@ VAR_VARS_NB
Definition: setpts.c:95
d
d
Definition: ffmpeg_filter.c:424
config_input
static int config_input(AVFilterLink *inlink)
Definition: setpts.c:132
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_af_asetpts
const AVFilter ff_af_asetpts
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: setpts.c:274
snprintf
#define snprintf
Definition: snprintf.h:34
SetPTSContext::type
enum AVMediaType type
Definition: setpts.c:103
VAR_T
@ VAR_T
Definition: setpts.c:87
V
#define V(name_)
Definition: setpts.c:313
VAR_INTERLACED
@ VAR_INTERLACED
Definition: setpts.c:72