FFmpeg
f_interleave.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio and video interleaver
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/opt.h"
31 
32 #include "avfilter.h"
33 #include "filters.h"
34 #include "internal.h"
35 #include "audio.h"
36 #include "video.h"
37 
38 typedef struct InterleaveContext {
39  const AVClass *class;
40  int nb_inputs;
42  int64_t pts;
44 
45 #define DURATION_LONGEST 0
46 #define DURATION_SHORTEST 1
47 #define DURATION_FIRST 2
48 
49 #define OFFSET(x) offsetof(InterleaveContext, x)
50 
51 #define DEFINE_OPTIONS(filt_name, flags_) \
52 static const AVOption filt_name##_options[] = { \
53  { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
54  { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
55  { "duration", "how to determine the end-of-stream", \
56  OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, flags_, .unit = "duration" }, \
57  { "longest", "Duration of longest input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, 0, 0, flags_, .unit = "duration" }, \
58  { "shortest", "Duration of shortest input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, 0, 0, flags_, .unit = "duration" }, \
59  { "first", "Duration of first input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, 0, 0, flags_, .unit = "duration" }, \
60  { NULL } \
61 }
62 
64 {
65  AVFilterLink *outlink = ctx->outputs[0];
66  InterleaveContext *s = ctx->priv;
67  int64_t q_pts, pts = INT64_MAX;
68  int i, nb_eofs = 0, input_idx = -1;
69  int first_eof = 0;
70  int64_t rpts;
71  int status;
72  int nb_inputs_with_frames = 0;
73 
75 
76  for (i = 0; i < ctx->nb_inputs; i++) {
77  int is_eof = !!ff_inlink_acknowledge_status(ctx->inputs[i], &status, &rpts);
78 
79  nb_eofs += is_eof;
80  if (i == 0)
81  first_eof = is_eof;
82  }
83 
84  if ((nb_eofs > 0 && s->duration_mode == DURATION_SHORTEST) ||
85  (nb_eofs == ctx->nb_inputs && s->duration_mode == DURATION_LONGEST) ||
86  (first_eof && s->duration_mode == DURATION_FIRST)) {
87  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
88  return 0;
89  }
90 
91  for (i = 0; i < ctx->nb_inputs; i++) {
92  if (!ff_inlink_queued_frames(ctx->inputs[i]))
93  continue;
94  nb_inputs_with_frames++;
95  }
96 
97  if (nb_inputs_with_frames >= ctx->nb_inputs - nb_eofs) {
98  for (i = 0; i < ctx->nb_inputs; i++) {
99  AVFrame *frame;
100 
101  if (ff_inlink_queued_frames(ctx->inputs[i]) == 0)
102  continue;
103 
104  frame = ff_inlink_peek_frame(ctx->inputs[i], 0);
105  if (frame->pts == AV_NOPTS_VALUE) {
106  int ret;
107 
109  "NOPTS value for input frame cannot be accepted, frame discarded\n");
110  ret = ff_inlink_consume_frame(ctx->inputs[i], &frame);
111  if (ret < 0)
112  return ret;
114  return AVERROR_INVALIDDATA;
115  }
116 
117  q_pts = av_rescale_q(frame->pts, ctx->inputs[i]->time_base, AV_TIME_BASE_Q);
118  if (q_pts < pts) {
119  pts = q_pts;
120  input_idx = i;
121  }
122  }
123 
124  if (input_idx >= 0) {
125  AVFrame *frame;
126  int ret;
127 
128  ret = ff_inlink_consume_frame(ctx->inputs[input_idx], &frame);
129  if (ret < 0)
130  return ret;
131 
132  frame->pts = s->pts = pts;
133  return ff_filter_frame(outlink, frame);
134  }
135  }
136 
137  for (i = 0; i < ctx->nb_inputs; i++) {
138  if (ff_inlink_queued_frames(ctx->inputs[i]))
139  continue;
140  if (ff_outlink_frame_wanted(outlink) &&
141  !ff_outlink_get_status(ctx->inputs[i])) {
142  ff_inlink_request_frame(ctx->inputs[i]);
143  return 0;
144  }
145  }
146 
147  if (i == ctx->nb_inputs - nb_eofs && ff_outlink_frame_wanted(outlink)) {
148  ff_filter_set_ready(ctx, 100);
149  return 0;
150  }
151 
152  return FFERROR_NOT_READY;
153 }
154 
156 {
157  InterleaveContext *s = ctx->priv;
158  const AVFilterPad *outpad = &ctx->filter->outputs[0];
159  int i, ret;
160 
161  for (i = 0; i < s->nb_inputs; i++) {
162  AVFilterPad inpad = { 0 };
163 
164  inpad.name = av_asprintf("input%d", i);
165  if (!inpad.name)
166  return AVERROR(ENOMEM);
167  inpad.type = outpad->type;
168 
169  switch (outpad->type) {
170  case AVMEDIA_TYPE_VIDEO:
172  case AVMEDIA_TYPE_AUDIO:
174  default:
175  av_assert0(0);
176  }
177  if ((ret = ff_append_inpad_free_name(ctx, &inpad)) < 0)
178  return ret;
179  }
180 
181  return 0;
182 }
183 
184 static int config_output(AVFilterLink *outlink)
185 {
186  AVFilterContext *ctx = outlink->src;
187  AVFilterLink *inlink0 = ctx->inputs[0];
188  int i;
189 
190  if (outlink->type == AVMEDIA_TYPE_VIDEO) {
191  outlink->time_base = AV_TIME_BASE_Q;
192  outlink->w = inlink0->w;
193  outlink->h = inlink0->h;
194  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
195  outlink->format = inlink0->format;
196  outlink->frame_rate = (AVRational) {1, 0};
197  for (i = 1; i < ctx->nb_inputs; i++) {
198  AVFilterLink *inlink = ctx->inputs[i];
199 
200  if (outlink->w != inlink->w ||
201  outlink->h != inlink->h ||
202  outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
203  outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
204  av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
205  "(size %dx%d, SAR %d:%d) do not match the corresponding "
206  "output link parameters (%dx%d, SAR %d:%d)\n",
207  ctx->input_pads[i].name, inlink->w, inlink->h,
208  inlink->sample_aspect_ratio.num,
209  inlink->sample_aspect_ratio.den,
210  outlink->w, outlink->h,
211  outlink->sample_aspect_ratio.num,
212  outlink->sample_aspect_ratio.den);
213  return AVERROR(EINVAL);
214  }
215  }
216  }
217  return 0;
218 }
219 
220 #if CONFIG_INTERLEAVE_FILTER
221 
224 
225 static const AVFilterPad interleave_outputs[] = {
226  {
227  .name = "default",
228  .type = AVMEDIA_TYPE_VIDEO,
229  .config_props = config_output,
230  },
231 };
232 
233 const AVFilter ff_vf_interleave = {
234  .name = "interleave",
235  .description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
236  .priv_size = sizeof(InterleaveContext),
237  .init = init,
238  .activate = activate,
239  FILTER_OUTPUTS(interleave_outputs),
240  .priv_class = &interleave_class,
242 };
243 
244 #endif
245 
246 #if CONFIG_AINTERLEAVE_FILTER
247 
249 AVFILTER_DEFINE_CLASS(ainterleave);
250 
251 static const AVFilterPad ainterleave_outputs[] = {
252  {
253  .name = "default",
254  .type = AVMEDIA_TYPE_AUDIO,
255  .config_props = config_output,
256  },
257 };
258 
259 const AVFilter ff_af_ainterleave = {
260  .name = "ainterleave",
261  .description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
262  .priv_size = sizeof(InterleaveContext),
263  .init = init,
264  .activate = activate,
265  FILTER_OUTPUTS(ainterleave_outputs),
266  .priv_class = &ainterleave_class,
268 };
269 
270 #endif
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
config_output
static int config_output(AVFilterLink *outlink)
Definition: f_interleave.c:184
activate
static int activate(AVFilterContext *ctx)
Definition: f_interleave.c:63
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1442
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ff_af_ainterleave
const AVFilter ff_af_ainterleave
pts
static int64_t pts
Definition: transcode_aac.c:644
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
ff_vf_interleave
const AVFilter ff_vf_interleave
AV_OPT_FLAG_AUDIO_PARAM
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:274
DEFINE_OPTIONS
#define DEFINE_OPTIONS(filt_name, flags_)
Definition: f_interleave.c:51
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1568
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
filters.h
ff_null_get_audio_buffer
AVFrame * ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
get_audio_buffer() handler for filters which simply pass audio along
Definition: audio.c:40
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
init
static av_cold int init(AVFilterContext *ctx)
Definition: f_interleave.c:155
ff_inlink_peek_frame
AVFrame * ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
Access a frame in the link fifo without consuming it.
Definition: avfilter.c:1483
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:132
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1389
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1405
interleave
static void interleave(uint8_t *dst, uint8_t *src, int w, int h, int dst_linesize, int src_linesize, enum FilterMode mode, int swap)
Definition: vf_il.c:110
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:298
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
DURATION_SHORTEST
#define DURATION_SHORTEST
Definition: f_interleave.c:46
InterleaveContext::nb_inputs
int nb_inputs
Definition: f_interleave.c:40
AVFilterPad::get_buffer
union AVFilterPad::@267 get_buffer
Callback functions to get a video/audio buffers.
InterleaveContext
Definition: f_interleave.c:38
ff_null_get_video_buffer
AVFrame * ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
Definition: video.c:44
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:323
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
InterleaveContext::duration_mode
int duration_mode
Definition: f_interleave.c:41
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:275
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
InterleaveContext::pts
int64_t pts
Definition: f_interleave.c:42
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
AVFilterPad::type
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:44
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVFilterPad::video
AVFrame *(* video)(AVFilterLink *link, int w, int h)
Definition: internal.h:74
status
ov_status_e status
Definition: dnn_backend_openvino.c:121
AVRational::den
int den
Denominator.
Definition: rational.h:60
avfilter.h
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1593
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
AVFilterPad::audio
AVFrame *(* audio)(AVFilterLink *link, int nb_samples)
Definition: internal.h:75
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
DURATION_FIRST
#define DURATION_FIRST
Definition: f_interleave.c:47
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
DURATION_LONGEST
#define DURATION_LONGEST
Definition: f_interleave.c:45
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:235