FFmpeg
avf_aphasemeter.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia aphasemeter filter
24  */
25 
26 #include <float.h>
27 
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/parseutils.h"
32 #include "libavutil/timestamp.h"
33 #include "avfilter.h"
34 #include "filters.h"
35 #include "formats.h"
36 #include "audio.h"
37 #include "video.h"
38 #include "internal.h"
39 
40 typedef struct AudioPhaseMeterContext {
41  const AVClass *class;
43  int64_t last_pts;
44  int do_video;
46  int w, h;
48  int contrast[4];
49  uint8_t *mpc_str;
50  uint8_t mpc[4];
52  int is_mono;
57  float tolerance;
58  float angle;
59  float phase;
61  int64_t duration;
62  int64_t frame_end;
63  int64_t mono_idx[2];
64  int64_t out_phase_idx[2];
66 
67 #define MAX_DURATION (24*60*60*1000000LL)
68 #define OFFSET(x) offsetof(AudioPhaseMeterContext, x)
69 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
70 #define get_duration(index) (index[1] - index[0])
71 
72 static const AVOption aphasemeter_options[] = {
73  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
74  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
75  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="800x400"}, 0, 0, FLAGS },
76  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="800x400"}, 0, 0, FLAGS },
77  { "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=2}, 0, 255, FLAGS },
78  { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=7}, 0, 255, FLAGS },
79  { "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=1}, 0, 255, FLAGS },
80  { "mpc", "set median phase color", OFFSET(mpc_str), AV_OPT_TYPE_STRING, {.str = "none"}, 0, 0, FLAGS },
81  { "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
82  { "phasing", "set mono and out-of-phase detection output", OFFSET(do_phasing_detection), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
83  { "tolerance", "set phase tolerance for mono detection", OFFSET(tolerance), AV_OPT_TYPE_FLOAT, {.dbl = 0.}, 0, 1, FLAGS },
84  { "t", "set phase tolerance for mono detection", OFFSET(tolerance), AV_OPT_TYPE_FLOAT, {.dbl = 0.}, 0, 1, FLAGS },
85  { "angle", "set angle threshold for out-of-phase detection", OFFSET(angle), AV_OPT_TYPE_FLOAT, {.dbl = 170.}, 90, 180, FLAGS },
86  { "a", "set angle threshold for out-of-phase detection", OFFSET(angle), AV_OPT_TYPE_FLOAT, {.dbl = 170.}, 90, 180, FLAGS },
87  { "duration", "set minimum mono or out-of-phase duration in seconds", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=2000000}, 0, MAX_DURATION, FLAGS },
88  { "d", "set minimum mono or out-of-phase duration in seconds", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=2000000}, 0, MAX_DURATION, FLAGS },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(aphasemeter);
93 
95 {
96  AudioPhaseMeterContext *s = ctx->priv;
99  AVFilterLink *inlink = ctx->inputs[0];
100  AVFilterLink *outlink = ctx->outputs[0];
102  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
103  int ret;
104 
106  if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0 ||
107  (ret = ff_formats_ref (formats, &outlink->incfg.formats )) < 0 ||
109  (ret = ff_channel_layouts_ref (layout , &inlink->outcfg.channel_layouts)) < 0 ||
110  (ret = ff_channel_layouts_ref (layout , &outlink->incfg.channel_layouts)) < 0)
111  return ret;
112 
114  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
115  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
116  return ret;
117 
118  if (s->do_video) {
119  AVFilterLink *outlink = ctx->outputs[1];
120 
122  if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
123  return ret;
124  }
125 
126  return 0;
127 }
128 
130 {
131  AVFilterContext *ctx = inlink->dst;
132  AudioPhaseMeterContext *s = ctx->priv;
133  s->duration = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
134 
135  if (s->do_video)
136  s->nb_samples = FFMAX(1, av_rescale(inlink->sample_rate, s->frame_rate.den, s->frame_rate.num));
137 
138  return 0;
139 }
140 
141 static int config_video_output(AVFilterLink *outlink)
142 {
143  AVFilterContext *ctx = outlink->src;
144  AudioPhaseMeterContext *s = ctx->priv;
145 
146  s->last_pts = AV_NOPTS_VALUE;
147 
148  outlink->w = s->w;
149  outlink->h = s->h;
150  outlink->sample_aspect_ratio = (AVRational){1,1};
151  outlink->frame_rate = s->frame_rate;
152  outlink->time_base = av_inv_q(outlink->frame_rate);
153 
154  if (!strcmp(s->mpc_str, "none"))
155  s->draw_median_phase = 0;
156  else if (av_parse_color(s->mpc, s->mpc_str, -1, ctx) >= 0)
157  s->draw_median_phase = 1;
158  else
159  return AVERROR(EINVAL);
160 
161  return 0;
162 }
163 
164 static inline int get_x(float phase, int w)
165 {
166  return (phase + 1.f) / 2.f * (w - 1.f);
167 }
168 
169 static inline void add_metadata(AVFrame *insamples, const char *key, char *value)
170 {
171  char buf[128];
172 
173  snprintf(buf, sizeof(buf), "lavfi.aphasemeter.%s", key);
174  av_dict_set(&insamples->metadata, buf, value, 0);
175 }
176 
177 static inline void update_mono_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int mono_measurement)
178 {
179  int64_t mono_duration;
180  if (!s->is_mono && mono_measurement) {
181  s->is_mono = 1;
182  s->start_mono_presence = 1;
183  s->mono_idx[0] = insamples->pts;
184  }
185  if (s->is_mono && mono_measurement && s->start_mono_presence) {
186  s->mono_idx[1] = s->frame_end;
187  mono_duration = get_duration(s->mono_idx);
188  if (mono_duration >= s->duration) {
189  add_metadata(insamples, "mono_start", av_ts2timestr(s->mono_idx[0], &s->time_base));
190  av_log(s, AV_LOG_INFO, "mono_start: %s\n", av_ts2timestr(s->mono_idx[0], &s->time_base));
191  s->start_mono_presence = 0;
192  }
193  }
194  if (s->is_mono && !mono_measurement) {
195  s->mono_idx[1] = insamples ? insamples->pts : s->frame_end;
196  mono_duration = get_duration(s->mono_idx);
197  if (mono_duration >= s->duration) {
198  if (insamples) {
199  add_metadata(insamples, "mono_end", av_ts2timestr(s->mono_idx[1], &s->time_base));
200  add_metadata(insamples, "mono_duration", av_ts2timestr(mono_duration, &s->time_base));
201  }
202  av_log(s, AV_LOG_INFO, "mono_end: %s | mono_duration: %s\n", av_ts2timestr(s->mono_idx[1], &s->time_base), av_ts2timestr(mono_duration, &s->time_base));
203  }
204  s->is_mono = 0;
205  }
206 }
207 
208 static inline void update_out_phase_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int out_phase_measurement)
209 {
210  int64_t out_phase_duration;
211  if (!s->is_out_phase && out_phase_measurement) {
212  s->is_out_phase = 1;
213  s->start_out_phase_presence = 1;
214  s->out_phase_idx[0] = insamples->pts;
215  }
216  if (s->is_out_phase && out_phase_measurement && s->start_out_phase_presence) {
217  s->out_phase_idx[1] = s->frame_end;
218  out_phase_duration = get_duration(s->out_phase_idx);
219  if (out_phase_duration >= s->duration) {
220  add_metadata(insamples, "out_phase_start", av_ts2timestr(s->out_phase_idx[0], &s->time_base));
221  av_log(s, AV_LOG_INFO, "out_phase_start: %s\n", av_ts2timestr(s->out_phase_idx[0], &s->time_base));
222  s->start_out_phase_presence = 0;
223  }
224  }
225  if (s->is_out_phase && !out_phase_measurement) {
226  s->out_phase_idx[1] = insamples ? insamples->pts : s->frame_end;
227  out_phase_duration = get_duration(s->out_phase_idx);
228  if (out_phase_duration >= s->duration) {
229  if (insamples) {
230  add_metadata(insamples, "out_phase_end", av_ts2timestr(s->out_phase_idx[1], &s->time_base));
231  add_metadata(insamples, "out_phase_duration", av_ts2timestr(out_phase_duration, &s->time_base));
232  }
233  av_log(s, AV_LOG_INFO, "out_phase_end: %s | out_phase_duration: %s\n", av_ts2timestr(s->out_phase_idx[1], &s->time_base), av_ts2timestr(out_phase_duration, &s->time_base));
234  }
235  s->is_out_phase = 0;
236  }
237 }
238 
240 {
241  AVFilterContext *ctx = inlink->dst;
242  AudioPhaseMeterContext *s = ctx->priv;
243  AVFilterLink *outlink = s->do_video ? ctx->outputs[1] : NULL;
244  AVFilterLink *aoutlink = ctx->outputs[0];
245  AVDictionary **metadata;
246  const int rc = s->contrast[0];
247  const int gc = s->contrast[1];
248  const int bc = s->contrast[2];
249  float fphase = 0;
250  AVFrame *out;
251  uint8_t *dst;
252  int i, ret;
253  int mono_measurement;
254  int out_phase_measurement;
255  float tolerance = 1.0f - s->tolerance;
256  float angle = cosf(s->angle/180.0f*M_PIf);
257  int64_t new_pts;
258 
259  if (s->do_video && (!s->out || s->out->width != outlink->w ||
260  s->out->height != outlink->h)) {
261  av_frame_free(&s->out);
262  s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
263  if (!s->out) {
264  ret = AVERROR(ENOMEM);
265  goto fail;
266  }
267 
268  out = s->out;
269  for (i = 0; i < outlink->h; i++)
270  memset(out->data[0] + i * out->linesize[0], 0, outlink->w * 4);
271  } else if (s->do_video) {
272  ret = ff_inlink_make_frame_writable(outlink, &s->out);
273  if (ret < 0)
274  goto fail;
275  out = s->out;
276  for (i = outlink->h - 1; i >= 10; i--)
277  memmove(out->data[0] + (i ) * out->linesize[0],
278  out->data[0] + (i-1) * out->linesize[0],
279  outlink->w * 4);
280  for (i = 0; i < outlink->w; i++)
281  AV_WL32(out->data[0] + i * 4, 0);
282  }
283 
284  for (i = 0; i < in->nb_samples; i++) {
285  const float *src = (float *)in->data[0] + i * 2;
286  const float f = src[0] * src[1] / (src[0]*src[0] + src[1] * src[1]) * 2;
287  const float phase = isnan(f) ? 1 : f;
288  const int x = get_x(phase, s->w);
289 
290  if (s->do_video) {
291  dst = out->data[0] + x * 4;
292  dst[0] = FFMIN(255, dst[0] + rc);
293  dst[1] = FFMIN(255, dst[1] + gc);
294  dst[2] = FFMIN(255, dst[2] + bc);
295  dst[3] = 255;
296  }
297  fphase += phase;
298  }
299  fphase /= in->nb_samples;
300  s->phase = fphase;
301 
302  if (s->do_video) {
303  if (s->draw_median_phase) {
304  dst = out->data[0] + get_x(fphase, s->w) * 4;
305  AV_WL32(dst, AV_RL32(s->mpc));
306  }
307 
308  for (i = 1; i < 10 && i < outlink->h; i++)
309  memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4);
310  }
311 
312  metadata = &in->metadata;
313  if (metadata) {
314  uint8_t value[128];
315 
316  snprintf(value, sizeof(value), "%f", fphase);
317  add_metadata(in, "phase", value);
318  }
319 
320  if (s->do_phasing_detection) {
321  s->time_base = inlink->time_base;
322  s->frame_end = in->pts + av_rescale_q(in->nb_samples,
323  (AVRational){ 1, in->sample_rate }, inlink->time_base);
324 
325  mono_measurement = (tolerance - fphase) < FLT_EPSILON;
326  out_phase_measurement = (angle - fphase) > FLT_EPSILON;
327 
328  update_mono_detection(s, in, mono_measurement);
329  update_out_phase_detection(s, in, out_phase_measurement);
330  }
331 
332  if (s->do_video)
333  new_pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base);
334  if (s->do_video && new_pts != s->last_pts) {
335  AVFrame *clone;
336 
337  s->out->pts = s->last_pts = new_pts;
338  s->out->duration = 1;
339 
340  clone = av_frame_clone(s->out);
341  if (!clone) {
342  ret = AVERROR(ENOMEM);
343  goto fail;
344  }
345  ret = ff_filter_frame(outlink, clone);
346  if (ret < 0)
347  goto fail;
348  }
349  s->in = NULL;
350  return ff_filter_frame(aoutlink, in);
351 fail:
352  av_frame_free(&in);
353  s->in = NULL;
354  return ret;
355 }
356 
358 {
359  AVFilterLink *inlink = ctx->inputs[0];
360  AVFilterLink *outlink = ctx->outputs[0];
361  AudioPhaseMeterContext *s = ctx->priv;
362  int ret;
363 
365  if (s->do_video)
367 
368  if (!s->in) {
369  if (s->nb_samples > 0)
370  ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &s->in);
371  else
373  if (ret < 0)
374  return ret;
375  if (ret > 0)
376  return filter_frame(inlink, s->in);
377  }
378 
381  if (s->do_video)
382  FF_FILTER_FORWARD_WANTED(ctx->outputs[1], inlink);
383 
384  return FFERROR_NOT_READY;
385 }
386 
388 {
389  AudioPhaseMeterContext *s = ctx->priv;
390 
391  if (s->do_phasing_detection) {
394  }
395  av_frame_free(&s->out);
396 }
397 
399 {
400  AudioPhaseMeterContext *s = ctx->priv;
401  AVFilterPad pad;
402  int ret;
403 
404  pad = (AVFilterPad){
405  .name = "out0",
406  .type = AVMEDIA_TYPE_AUDIO,
407  };
408  ret = ff_append_outpad(ctx, &pad);
409  if (ret < 0)
410  return ret;
411 
412  if (s->do_video) {
413  pad = (AVFilterPad){
414  .name = "out1",
415  .type = AVMEDIA_TYPE_VIDEO,
416  .config_props = config_video_output,
417  };
418  ret = ff_append_outpad(ctx, &pad);
419  if (ret < 0)
420  return ret;
421  }
422 
423  return 0;
424 }
425 
426 static const AVFilterPad inputs[] = {
427  {
428  .name = "default",
429  .type = AVMEDIA_TYPE_AUDIO,
430  .config_props = config_input,
431  },
432 };
433 
435  .name = "aphasemeter",
436  .description = NULL_IF_CONFIG_SMALL("Convert input audio to phase meter video output."),
437  .init = init,
438  .uninit = uninit,
439  .priv_size = sizeof(AudioPhaseMeterContext),
441  .activate = activate,
442  .outputs = NULL,
444  .priv_class = &aphasemeter_class,
446 };
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
inputs
static const AVFilterPad inputs[]
Definition: avf_aphasemeter.c:426
config_video_output
static int config_video_output(AVFilterLink *outlink)
Definition: avf_aphasemeter.c:141
AudioPhaseMeterContext::start_mono_presence
int start_mono_presence
Definition: avf_aphasemeter.c:54
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AudioPhaseMeterContext::duration
int64_t duration
Definition: avf_aphasemeter.c:61
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVFilterFormatsConfig::samplerates
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:515
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:436
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
AVFilterFormatsConfig::channel_layouts
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:520
out
FILE * out
Definition: movenc.c:55
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:379
init
static av_cold int init(AVFilterContext *ctx)
Definition: avf_aphasemeter.c:398
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:674
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:356
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:248
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
config_input
static int config_input(AVFilterLink *inlink)
Definition: avf_aphasemeter.c:129
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
AudioPhaseMeterContext::in
AVFrame * in
Definition: avf_aphasemeter.c:42
w
uint8_t w
Definition: llviddspenc.c:38
AudioPhaseMeterContext::w
int w
Definition: avf_aphasemeter.c:46
AVOption
AVOption.
Definition: opt.h:346
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Definition: opt.h:249
FLAGS
#define FLAGS
Definition: avf_aphasemeter.c:69
float.h
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
AudioPhaseMeterContext::nb_samples
int nb_samples
Definition: avf_aphasemeter.c:56
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1442
AudioPhaseMeterContext::start_out_phase_presence
int start_out_phase_presence
Definition: avf_aphasemeter.c:55
cosf
#define cosf(x)
Definition: libm.h:78
fail
#define fail()
Definition: checkasm.h:179
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AudioPhaseMeterContext::is_out_phase
int is_out_phase
Definition: avf_aphasemeter.c:53
update_out_phase_detection
static void update_out_phase_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int out_phase_measurement)
Definition: avf_aphasemeter.c:208
av_cold
#define av_cold
Definition: attributes.h:90
duration
int64_t duration
Definition: movenc.c:65
AudioPhaseMeterContext::out
AVFrame * out
Definition: avf_aphasemeter.c:42
intreadwrite.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: avf_aphasemeter.c:239
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:679
AudioPhaseMeterContext::frame_rate
AVRational frame_rate
Definition: avf_aphasemeter.c:47
AudioPhaseMeterContext::mono_idx
int64_t mono_idx[2]
Definition: avf_aphasemeter.c:63
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: avf_aphasemeter.c:94
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:593
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
key
const char * key
Definition: hwcontext_opencl.c:189
AudioPhaseMeterContext::h
int h
Definition: avf_aphasemeter.c:46
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
ff_inlink_make_frame_writable
int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
Make sure a frame is writable.
Definition: avfilter.c:1489
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1462
NULL
#define NULL
Definition: coverity.c:32
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
aphasemeter_options
static const AVOption aphasemeter_options[]
Definition: avf_aphasemeter.c:72
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
parseutils.h
AudioPhaseMeterContext::do_phasing_detection
int do_phasing_detection
Definition: avf_aphasemeter.c:45
update_mono_detection
static void update_mono_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int mono_measurement)
Definition: avf_aphasemeter.c:177
AudioPhaseMeterContext::frame_end
int64_t frame_end
Definition: avf_aphasemeter.c:62
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:522
AudioPhaseMeterContext::do_video
int do_video
Definition: avf_aphasemeter.c:44
AudioPhaseMeterContext
Definition: avf_aphasemeter.c:40
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aphasemeter)
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
MAX_DURATION
#define MAX_DURATION
Definition: avf_aphasemeter.c:67
get_x
static int get_x(float phase, int w)
Definition: avf_aphasemeter.c:164
f
f
Definition: af_crystalizer.c:121
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
AudioPhaseMeterContext::last_pts
int64_t last_pts
Definition: avf_aphasemeter.c:43
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AudioPhaseMeterContext::mpc_str
uint8_t * mpc_str
Definition: avf_aphasemeter.c:49
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AudioPhaseMeterContext::is_mono
int is_mono
Definition: avf_aphasemeter.c:52
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
OFFSET
#define OFFSET(x)
Definition: avf_aphasemeter.c:68
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
AudioPhaseMeterContext::phase
float phase
Definition: avf_aphasemeter.c:59
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
M_PIf
#define M_PIf
Definition: mathematics.h:70
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ff_avf_aphasemeter
const AVFilter ff_avf_aphasemeter
Definition: avf_aphasemeter.c:434
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioPhaseMeterContext::mpc
uint8_t mpc[4]
Definition: avf_aphasemeter.c:50
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AudioPhaseMeterContext::contrast
int contrast[4]
Definition: avf_aphasemeter.c:48
AudioPhaseMeterContext::time_base
AVRational time_base
Definition: avf_aphasemeter.c:60
AVFilter
Filter definition.
Definition: avfilter.h:166
activate
static int activate(AVFilterContext *ctx)
Definition: avf_aphasemeter.c:357
ret
ret
Definition: filter_design.txt:187
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: avf_aphasemeter.c:387
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:607
channel_layout.h
AudioPhaseMeterContext::draw_median_phase
int draw_median_phase
Definition: avf_aphasemeter.c:51
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:692
add_metadata
static void add_metadata(AVFrame *insamples, const char *key, char *value)
Definition: avf_aphasemeter.c:169
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
get_duration
#define get_duration(index)
Definition: avf_aphasemeter.c:70
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
AudioPhaseMeterContext::tolerance
float tolerance
Definition: avf_aphasemeter.c:57
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:138
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
FF_FILTER_FORWARD_STATUS_ALL
#define FF_FILTER_FORWARD_STATUS_ALL(inlink, filter)
Acknowledge the status on an input link and forward it to an output link.
Definition: filters.h:239
timestamp.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AudioPhaseMeterContext::angle
float angle
Definition: avf_aphasemeter.c:58
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
snprintf
#define snprintf
Definition: snprintf.h:34
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
AudioPhaseMeterContext::out_phase_idx
int64_t out_phase_idx[2]
Definition: avf_aphasemeter.c:64