FFmpeg
avf_showwaves.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia filter
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/parseutils.h"
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "formats.h"
34 #include "audio.h"
35 #include "video.h"
36 #include "internal.h"
37 
44 };
45 
52 };
53 
58 };
59 
64 };
65 
66 struct frame_node {
68  struct frame_node *next;
69 };
70 
71 typedef struct ShowWavesContext {
72  const AVClass *class;
73  int w, h;
75  char *colors;
76  int buf_idx;
77  int16_t *buf_idy; /* y coordinate of previous sample for each channel */
79  int n;
80  int pixstep;
82  int mode; ///< ShowWavesMode
83  int scale; ///< ShowWavesScale
84  int draw_mode; ///< ShowWavesDrawMode
88 
89  int (*get_h)(int16_t sample, int height);
90  void (*draw_sample)(uint8_t *buf, int height, int linesize,
91  int16_t *prev_y, const uint8_t color[4], int h);
92 
93  /* single picture */
97  int64_t total_samples;
98  int64_t *sum; /* abs sum of the samples per channel */
100 
101 #define OFFSET(x) offsetof(ShowWavesContext, x)
102 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
103 
104 static const AVOption showwaves_options[] = {
105  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
106  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
107  { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
108  { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
109  { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
110  { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
111  { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
112  { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
113  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
114  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
115  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
116  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
117  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
118  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
119  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
120  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
121  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
122  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
123  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
124  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
125  { NULL }
126 };
127 
128 AVFILTER_DEFINE_CLASS(showwaves);
129 
131 {
132  ShowWavesContext *showwaves = ctx->priv;
133 
134  av_frame_free(&showwaves->outpicref);
135  av_freep(&showwaves->buf_idy);
136  av_freep(&showwaves->fg);
137 
138  if (showwaves->single_pic) {
139  struct frame_node *node = showwaves->audio_frames;
140  while (node) {
141  struct frame_node *tmp = node;
142 
143  node = node->next;
144  av_frame_free(&tmp->frame);
145  av_freep(&tmp);
146  }
147  av_freep(&showwaves->sum);
148  showwaves->last_frame = NULL;
149  }
150 }
151 
153 {
156  AVFilterLink *inlink = ctx->inputs[0];
157  AVFilterLink *outlink = ctx->outputs[0];
160  int ret;
161 
162  /* set input audio formats */
164  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
165  return ret;
166 
168  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
169  return ret;
170 
172  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
173  return ret;
174 
175  /* set output video format */
177  if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
178  return ret;
179 
180  return 0;
181 }
182 
183 static int get_lin_h(int16_t sample, int height)
184 {
185  return height/2 - av_rescale(sample, height/2, INT16_MAX);
186 }
187 
188 static int get_lin_h2(int16_t sample, int height)
189 {
190  return av_rescale(FFABS(sample), height, INT16_MAX);
191 }
192 
193 static int get_log_h(int16_t sample, int height)
194 {
195  return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
196 }
197 
198 static int get_log_h2(int16_t sample, int height)
199 {
200  return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
201 }
202 
203 static int get_sqrt_h(int16_t sample, int height)
204 {
205  return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
206 }
207 
208 static int get_sqrt_h2(int16_t sample, int height)
209 {
210  return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
211 }
212 
213 static int get_cbrt_h(int16_t sample, int height)
214 {
215  return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
216 }
217 
218 static int get_cbrt_h2(int16_t sample, int height)
219 {
220  return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
221 }
222 
223 static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
224  int16_t *prev_y,
225  const uint8_t color[4], int h)
226 {
227  if (h >= 0 && h < height) {
228  buf[h * linesize + 0] += color[0];
229  buf[h * linesize + 1] += color[1];
230  buf[h * linesize + 2] += color[2];
231  buf[h * linesize + 3] += color[3];
232  }
233 }
234 
235 static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
236  int16_t *prev_y,
237  const uint8_t color[4], int h)
238 {
239  if (h >= 0 && h < height) {
240  buf[h * linesize + 0] = color[0];
241  buf[h * linesize + 1] = color[1];
242  buf[h * linesize + 2] = color[2];
243  buf[h * linesize + 3] = color[3];
244  }
245 }
246 
247 static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
248  int16_t *prev_y,
249  const uint8_t color[4], int h)
250 {
251  int k;
252  int start = height/2;
253  int end = av_clip(h, 0, height-1);
254  if (start > end)
255  FFSWAP(int16_t, start, end);
256  for (k = start; k < end; k++) {
257  buf[k * linesize + 0] += color[0];
258  buf[k * linesize + 1] += color[1];
259  buf[k * linesize + 2] += color[2];
260  buf[k * linesize + 3] += color[3];
261  }
262 }
263 
264 static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
265  int16_t *prev_y,
266  const uint8_t color[4], int h)
267 {
268  int k;
269  int start = height/2;
270  int end = av_clip(h, 0, height-1);
271  if (start > end)
272  FFSWAP(int16_t, start, end);
273  for (k = start; k < end; k++) {
274  buf[k * linesize + 0] = color[0];
275  buf[k * linesize + 1] = color[1];
276  buf[k * linesize + 2] = color[2];
277  buf[k * linesize + 3] = color[3];
278  }
279 }
280 
281 static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
282  int16_t *prev_y,
283  const uint8_t color[4], int h)
284 {
285  int k;
286  if (h >= 0 && h < height) {
287  buf[h * linesize + 0] += color[0];
288  buf[h * linesize + 1] += color[1];
289  buf[h * linesize + 2] += color[2];
290  buf[h * linesize + 3] += color[3];
291  if (*prev_y && h != *prev_y) {
292  int start = *prev_y;
293  int end = av_clip(h, 0, height-1);
294  if (start > end)
295  FFSWAP(int16_t, start, end);
296  for (k = start + 1; k < end; k++) {
297  buf[k * linesize + 0] += color[0];
298  buf[k * linesize + 1] += color[1];
299  buf[k * linesize + 2] += color[2];
300  buf[k * linesize + 3] += color[3];
301  }
302  }
303  }
304  *prev_y = h;
305 }
306 
307 static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
308  int16_t *prev_y,
309  const uint8_t color[4], int h)
310 {
311  int k;
312  if (h >= 0 && h < height) {
313  buf[h * linesize + 0] = color[0];
314  buf[h * linesize + 1] = color[1];
315  buf[h * linesize + 2] = color[2];
316  buf[h * linesize + 3] = color[3];
317  if (*prev_y && h != *prev_y) {
318  int start = *prev_y;
319  int end = av_clip(h, 0, height-1);
320  if (start > end)
321  FFSWAP(int16_t, start, end);
322  for (k = start + 1; k < end; k++) {
323  buf[k * linesize + 0] = color[0];
324  buf[k * linesize + 1] = color[1];
325  buf[k * linesize + 2] = color[2];
326  buf[k * linesize + 3] = color[3];
327  }
328  }
329  }
330  *prev_y = h;
331 }
332 
333 static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
334  int16_t *prev_y,
335  const uint8_t color[4], int h)
336 {
337  int k;
338  const int start = (height - h) / 2;
339  const int end = start + h;
340  for (k = start; k < end; k++) {
341  buf[k * linesize + 0] += color[0];
342  buf[k * linesize + 1] += color[1];
343  buf[k * linesize + 2] += color[2];
344  buf[k * linesize + 3] += color[3];
345  }
346 }
347  static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
348  int16_t *prev_y,
349  const uint8_t color[4], int h)
350 {
351  int k;
352  const int start = (height - h) / 2;
353  const int end = start + h;
354  for (k = start; k < end; k++) {
355  buf[k * linesize + 0] = color[0];
356  buf[k * linesize + 1] = color[1];
357  buf[k * linesize + 2] = color[2];
358  buf[k * linesize + 3] = color[3];
359  }
360 }
361 
362 static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
363  int16_t *prev_y,
364  const uint8_t color[4], int h)
365 {
366  if (h >= 0 && h < height)
367  buf[h * linesize] += color[0];
368 }
369 
370 static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
371  int16_t *prev_y,
372  const uint8_t color[4], int h)
373 {
374  int k;
375  int start = height/2;
376  int end = av_clip(h, 0, height-1);
377  if (start > end)
378  FFSWAP(int16_t, start, end);
379  for (k = start; k < end; k++)
380  buf[k * linesize] += color[0];
381 }
382 
383 static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
384  int16_t *prev_y,
385  const uint8_t color[4], int h)
386 {
387  int k;
388  if (h >= 0 && h < height) {
389  buf[h * linesize] += color[0];
390  if (*prev_y && h != *prev_y) {
391  int start = *prev_y;
392  int end = av_clip(h, 0, height-1);
393  if (start > end)
394  FFSWAP(int16_t, start, end);
395  for (k = start + 1; k < end; k++)
396  buf[k * linesize] += color[0];
397  }
398  }
399  *prev_y = h;
400 }
401 
402 static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
403  int16_t *prev_y,
404  const uint8_t color[4], int h)
405 {
406  int k;
407  const int start = (height - h) / 2;
408  const int end = start + h;
409  for (k = start; k < end; k++)
410  buf[k * linesize] += color[0];
411 }
412 
413 static int config_output(AVFilterLink *outlink)
414 {
415  AVFilterContext *ctx = outlink->src;
416  AVFilterLink *inlink = ctx->inputs[0];
417  ShowWavesContext *showwaves = ctx->priv;
418  int nb_channels = inlink->channels;
419  char *colors, *saveptr = NULL;
420  uint8_t x;
421  int ch;
422 
423  if (showwaves->single_pic)
424  showwaves->n = 1;
425 
426  if (!showwaves->n)
427  showwaves->n = FFMAX(1, av_rescale_q(inlink->sample_rate, av_make_q(1, showwaves->w), showwaves->rate));
428 
429  showwaves->buf_idx = 0;
430  if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
431  av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
432  return AVERROR(ENOMEM);
433  }
434  outlink->w = showwaves->w;
435  outlink->h = showwaves->h;
436  outlink->sample_aspect_ratio = (AVRational){1,1};
437 
438  outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
439  (AVRational){showwaves->w,1});
440 
441  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
442  showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
443 
444  switch (outlink->format) {
445  case AV_PIX_FMT_GRAY8:
446  switch (showwaves->mode) {
447  case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
448  case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
449  case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
450  case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
451  default:
452  return AVERROR_BUG;
453  }
454  showwaves->pixstep = 1;
455  break;
456  case AV_PIX_FMT_RGBA:
457  switch (showwaves->mode) {
462  default:
463  return AVERROR_BUG;
464  }
465  showwaves->pixstep = 4;
466  break;
467  }
468 
469  switch (showwaves->scale) {
470  case SCALE_LIN:
471  switch (showwaves->mode) {
472  case MODE_POINT:
473  case MODE_LINE:
474  case MODE_P2P: showwaves->get_h = get_lin_h; break;
475  case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
476  default:
477  return AVERROR_BUG;
478  }
479  break;
480  case SCALE_LOG:
481  switch (showwaves->mode) {
482  case MODE_POINT:
483  case MODE_LINE:
484  case MODE_P2P: showwaves->get_h = get_log_h; break;
485  case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
486  default:
487  return AVERROR_BUG;
488  }
489  break;
490  case SCALE_SQRT:
491  switch (showwaves->mode) {
492  case MODE_POINT:
493  case MODE_LINE:
494  case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
495  case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
496  default:
497  return AVERROR_BUG;
498  }
499  break;
500  case SCALE_CBRT:
501  switch (showwaves->mode) {
502  case MODE_POINT:
503  case MODE_LINE:
504  case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
505  case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
506  default:
507  return AVERROR_BUG;
508  }
509  break;
510  }
511 
512  showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
513  if (!showwaves->fg)
514  return AVERROR(ENOMEM);
515 
516  colors = av_strdup(showwaves->colors);
517  if (!colors)
518  return AVERROR(ENOMEM);
519 
520  if (showwaves->draw_mode == DRAW_SCALE) {
521  /* multiplication factor, pre-computed to avoid in-loop divisions */
522  x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
523  } else {
524  x = 255;
525  }
526  if (outlink->format == AV_PIX_FMT_RGBA) {
527  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
528 
529  for (ch = 0; ch < nb_channels; ch++) {
530  char *color;
531 
532  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
533  if (color)
534  av_parse_color(fg, color, -1, ctx);
535  showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
536  showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
537  showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
538  showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
539  }
540  } else {
541  for (ch = 0; ch < nb_channels; ch++)
542  showwaves->fg[4 * ch + 0] = x;
543  }
544  av_free(colors);
545 
546  return 0;
547 }
548 
549 inline static int push_frame(AVFilterLink *outlink)
550 {
551  AVFilterContext *ctx = outlink->src;
552  AVFilterLink *inlink = ctx->inputs[0];
553  ShowWavesContext *showwaves = outlink->src->priv;
554  int nb_channels = inlink->channels;
555  int ret, i;
556 
557  ret = ff_filter_frame(outlink, showwaves->outpicref);
558  showwaves->outpicref = NULL;
559  showwaves->buf_idx = 0;
560  for (i = 0; i < nb_channels; i++)
561  showwaves->buf_idy[i] = 0;
562  return ret;
563 }
564 
565 static int push_single_pic(AVFilterLink *outlink)
566 {
567  AVFilterContext *ctx = outlink->src;
568  AVFilterLink *inlink = ctx->inputs[0];
569  ShowWavesContext *showwaves = ctx->priv;
570  int64_t n = 0, column_max_samples = showwaves->total_samples / outlink->w;
571  int64_t remaining_samples = showwaves->total_samples - (column_max_samples * outlink->w);
572  int64_t last_column_samples = column_max_samples + remaining_samples;
573  AVFrame *out = showwaves->outpicref;
574  struct frame_node *node;
575  const int nb_channels = inlink->channels;
576  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
577  const int linesize = out->linesize[0];
578  const int pixstep = showwaves->pixstep;
579  int col = 0;
580  int64_t *sum = showwaves->sum;
581 
582  if (column_max_samples == 0) {
583  av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
584  return AVERROR(EINVAL);
585  }
586 
587  av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", column_max_samples);
588 
589  memset(sum, 0, nb_channels);
590 
591  for (node = showwaves->audio_frames; node; node = node->next) {
592  int i;
593  const AVFrame *frame = node->frame;
594  const int16_t *p = (const int16_t *)frame->data[0];
595 
596  for (i = 0; i < frame->nb_samples; i++) {
597  int64_t max_samples = col == outlink->w - 1 ? last_column_samples: column_max_samples;
598  int ch;
599 
600  switch (showwaves->filter_mode) {
601  case FILTER_AVERAGE:
602  for (ch = 0; ch < nb_channels; ch++)
603  sum[ch] += abs(p[ch + i*nb_channels]) << 1;
604  break;
605  case FILTER_PEAK:
606  for (ch = 0; ch < nb_channels; ch++)
607  sum[ch] = FFMAX(sum[ch], abs(p[ch + i*nb_channels]));
608  break;
609  }
610 
611  n++;
612  if (n == max_samples) {
613  for (ch = 0; ch < nb_channels; ch++) {
614  int16_t sample = sum[ch] / (showwaves->filter_mode == FILTER_AVERAGE ? max_samples : 1);
615  uint8_t *buf = out->data[0] + col * pixstep;
616  int h;
617 
618  if (showwaves->split_channels)
619  buf += ch*ch_height*linesize;
620  av_assert0(col < outlink->w);
621  h = showwaves->get_h(sample, ch_height);
622  showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
623  sum[ch] = 0;
624  }
625  col++;
626  n = 0;
627  }
628  }
629  }
630 
631  return push_frame(outlink);
632 }
633 
634 
635 static int request_frame(AVFilterLink *outlink)
636 {
637  ShowWavesContext *showwaves = outlink->src->priv;
638  AVFilterLink *inlink = outlink->src->inputs[0];
639  int ret;
640 
642  if (ret == AVERROR_EOF && showwaves->outpicref) {
643  if (showwaves->single_pic)
644  push_single_pic(outlink);
645  else
646  push_frame(outlink);
647  }
648 
649  return ret;
650 }
651 
652 static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
653  const AVFilterLink *inlink, AVFilterLink *outlink,
654  const AVFrame *in)
655 {
656  if (!showwaves->outpicref) {
657  int j;
658  AVFrame *out = showwaves->outpicref =
659  ff_get_video_buffer(outlink, outlink->w, outlink->h);
660  if (!out)
661  return AVERROR(ENOMEM);
662  out->width = outlink->w;
663  out->height = outlink->h;
664  out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
665  av_make_q(1, inlink->sample_rate),
666  outlink->time_base);
667  for (j = 0; j < outlink->h; j++)
668  memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
669  }
670  return 0;
671 }
672 
674 {
675  ShowWavesContext *showwaves = ctx->priv;
676 
677  if (!strcmp(ctx->filter->name, "showwavespic")) {
678  showwaves->single_pic = 1;
679  showwaves->mode = MODE_CENTERED_LINE;
680  }
681 
682  return 0;
683 }
684 
685 #if CONFIG_SHOWWAVES_FILTER
686 
687 static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
688 {
689  AVFilterContext *ctx = inlink->dst;
690  AVFilterLink *outlink = ctx->outputs[0];
691  ShowWavesContext *showwaves = ctx->priv;
692  const int nb_samples = insamples->nb_samples;
693  AVFrame *outpicref = showwaves->outpicref;
694  int16_t *p = (int16_t *)insamples->data[0];
695  int nb_channels = inlink->channels;
696  int i, j, ret = 0;
697  const int pixstep = showwaves->pixstep;
698  const int n = showwaves->n;
699  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
700 
701  /* draw data in the buffer */
702  for (i = 0; i < nb_samples; i++) {
703 
704  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
705  if (ret < 0)
706  goto end;
707  outpicref = showwaves->outpicref;
708 
709  for (j = 0; j < nb_channels; j++) {
710  uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
711  const int linesize = outpicref->linesize[0];
712  int h;
713 
714  if (showwaves->split_channels)
715  buf += j*ch_height*linesize;
716  h = showwaves->get_h(*p++, ch_height);
717  showwaves->draw_sample(buf, ch_height, linesize,
718  &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
719  }
720 
721  showwaves->sample_count_mod++;
722  if (showwaves->sample_count_mod == n) {
723  showwaves->sample_count_mod = 0;
724  showwaves->buf_idx++;
725  }
726  if (showwaves->buf_idx == showwaves->w ||
727  (ff_outlink_get_status(inlink) && i == nb_samples - 1))
728  if ((ret = push_frame(outlink)) < 0)
729  break;
730  outpicref = showwaves->outpicref;
731  }
732 
733 end:
734  av_frame_free(&insamples);
735  return ret;
736 }
737 
738 static int activate(AVFilterContext *ctx)
739 {
740  AVFilterLink *inlink = ctx->inputs[0];
741  AVFilterLink *outlink = ctx->outputs[0];
742  ShowWavesContext *showwaves = ctx->priv;
743  AVFrame *in;
744  const int nb_samples = showwaves->n * outlink->w;
745  int ret;
746 
748 
749  ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
750  if (ret < 0)
751  return ret;
752  if (ret > 0)
753  return showwaves_filter_frame(inlink, in);
754 
757 
758  return FFERROR_NOT_READY;
759 }
760 
761 static const AVFilterPad showwaves_inputs[] = {
762  {
763  .name = "default",
764  .type = AVMEDIA_TYPE_AUDIO,
765  },
766  { NULL }
767 };
768 
769 static const AVFilterPad showwaves_outputs[] = {
770  {
771  .name = "default",
772  .type = AVMEDIA_TYPE_VIDEO,
773  .config_props = config_output,
774  },
775  { NULL }
776 };
777 
779  .name = "showwaves",
780  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
781  .init = init,
782  .uninit = uninit,
783  .query_formats = query_formats,
784  .priv_size = sizeof(ShowWavesContext),
785  .inputs = showwaves_inputs,
786  .activate = activate,
787  .outputs = showwaves_outputs,
788  .priv_class = &showwaves_class,
789 };
790 
791 #endif // CONFIG_SHOWWAVES_FILTER
792 
793 #if CONFIG_SHOWWAVESPIC_FILTER
794 
795 #define OFFSET(x) offsetof(ShowWavesContext, x)
796 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
797 
798 static const AVOption showwavespic_options[] = {
799  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
800  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
801  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
802  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
803  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
804  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
805  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
806  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
807  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
808  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
809  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
810  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
811  { "filter", "set filter mode", OFFSET(filter_mode), AV_OPT_TYPE_INT, {.i64 = FILTER_AVERAGE}, 0, FILTER_NB-1, FLAGS, .unit="filter" },
812  { "average", "use average samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_AVERAGE}, .flags=FLAGS, .unit="filter"},
813  { "peak", "use peak samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_PEAK}, .flags=FLAGS, .unit="filter"},
814  { NULL }
815 };
816 
817 AVFILTER_DEFINE_CLASS(showwavespic);
818 
819 static int showwavespic_config_input(AVFilterLink *inlink)
820 {
821  AVFilterContext *ctx = inlink->dst;
822  ShowWavesContext *showwaves = ctx->priv;
823 
824  if (showwaves->single_pic) {
825  showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
826  if (!showwaves->sum)
827  return AVERROR(ENOMEM);
828  }
829 
830  return 0;
831 }
832 
833 static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
834 {
835  AVFilterContext *ctx = inlink->dst;
836  AVFilterLink *outlink = ctx->outputs[0];
837  ShowWavesContext *showwaves = ctx->priv;
838  int16_t *p = (int16_t *)insamples->data[0];
839  int ret = 0;
840 
841  if (showwaves->single_pic) {
842  struct frame_node *f;
843 
844  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
845  if (ret < 0)
846  goto end;
847 
848  /* queue the audio frame */
849  f = av_malloc(sizeof(*f));
850  if (!f) {
851  ret = AVERROR(ENOMEM);
852  goto end;
853  }
854  f->frame = insamples;
855  f->next = NULL;
856  if (!showwaves->last_frame) {
857  showwaves->audio_frames =
858  showwaves->last_frame = f;
859  } else {
860  showwaves->last_frame->next = f;
861  showwaves->last_frame = f;
862  }
863  showwaves->total_samples += insamples->nb_samples;
864 
865  return 0;
866  }
867 
868 end:
869  av_frame_free(&insamples);
870  return ret;
871 }
872 
873 static const AVFilterPad showwavespic_inputs[] = {
874  {
875  .name = "default",
876  .type = AVMEDIA_TYPE_AUDIO,
877  .config_props = showwavespic_config_input,
878  .filter_frame = showwavespic_filter_frame,
879  },
880  { NULL }
881 };
882 
883 static const AVFilterPad showwavespic_outputs[] = {
884  {
885  .name = "default",
886  .type = AVMEDIA_TYPE_VIDEO,
887  .config_props = config_output,
888  .request_frame = request_frame,
889  },
890  { NULL }
891 };
892 
894  .name = "showwavespic",
895  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
896  .init = init,
897  .uninit = uninit,
898  .query_formats = query_formats,
899  .priv_size = sizeof(ShowWavesContext),
900  .inputs = showwavespic_inputs,
901  .outputs = showwavespic_outputs,
902  .priv_class = &showwavespic_class,
903 };
904 
905 #endif // CONFIG_SHOWWAVESPIC_FILTER
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
SCALE_SQRT
@ SCALE_SQRT
Definition: avf_showwaves.c:49
DRAW_SCALE
@ DRAW_SCALE
Definition: avf_showwaves.c:55
FILTER_PEAK
@ FILTER_PEAK
Definition: avf_showwaves.c:62
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:86
FILTER_AVERAGE
@ FILTER_AVERAGE
Definition: avf_showwaves.c:61
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_clip
#define av_clip
Definition: common.h:122
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
get_cbrt_h
static int get_cbrt_h(int16_t sample, int height)
Definition: avf_showwaves.c:213
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
color
Definition: vf_paletteuse.c:583
ShowWavesContext::filter_mode
int filter_mode
Definition: avf_showwaves.c:86
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:461
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:238
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
frame_node::frame
AVFrame * frame
Definition: avf_showwaves.c:67
ShowWavesContext::get_h
int(* get_h)(int16_t sample, int height)
Definition: avf_showwaves.c:89
frame_node::next
struct frame_node * next
Definition: avf_showwaves.c:68
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
draw_sample_point_rgba_scale
static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:223
ShowWavesContext::buf_idy
int16_t * buf_idy
Definition: avf_showwaves.c:77
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
w
uint8_t w
Definition: llviddspenc.c:39
AVOption
AVOption.
Definition: opt.h:248
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:408
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
get_cbrt_h2
static int get_cbrt_h2(int16_t sample, int height)
Definition: avf_showwaves.c:218
ShowWavesContext::buf_idx
int buf_idx
Definition: avf_showwaves.c:76
ShowWavesContext::w
int w
Definition: avf_showwaves.c:73
showwaves_options
static const AVOption showwaves_options[]
Definition: avf_showwaves.c:104
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(showwaves)
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
frame_node
Definition: avf_showwaves.c:66
video.h
get_sqrt_h2
static int get_sqrt_h2(int16_t sample, int height)
Definition: avf_showwaves.c:208
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
DRAW_FULL
@ DRAW_FULL
Definition: avf_showwaves.c:56
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
draw_sample_line_rgba_scale
static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:247
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
formats.h
draw_sample_cline_rgba_full
static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:347
get_log_h2
static int get_log_h2(int16_t sample, int height)
Definition: avf_showwaves.c:198
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:356
MODE_NB
@ MODE_NB
Definition: avf_showwaves.c:43
FILTER_NB
@ FILTER_NB
Definition: avf_showwaves.c:63
get_sqrt_h
static int get_sqrt_h(int16_t sample, int height)
Definition: avf_showwaves.c:203
get_lin_h2
static int get_lin_h2(int16_t sample, int height)
Definition: avf_showwaves.c:188
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
SCALE_CBRT
@ SCALE_CBRT
Definition: avf_showwaves.c:50
get_lin_h
static int get_lin_h(int16_t sample, int height)
Definition: avf_showwaves.c:183
ShowWavesContext::sample_count_mod
int sample_count_mod
Definition: avf_showwaves.c:81
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
cbrt
#define cbrt
Definition: tablegen.h:35
draw_sample_point_gray
static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:362
draw_sample_p2p_gray
static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:383
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
DRAW_NB
@ DRAW_NB
Definition: avf_showwaves.c:57
FLAGS
#define FLAGS
Definition: avf_showwaves.c:102
ShowWavesContext::sum
int64_t * sum
Definition: avf_showwaves.c:98
ShowWavesContext::last_frame
struct frame_node * last_frame
Definition: avf_showwaves.c:96
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:466
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ShowWavesContext::audio_frames
struct frame_node * audio_frames
Definition: avf_showwaves.c:95
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ctx
AVFormatContext * ctx
Definition: movenc.c:48
init
static av_cold int init(AVFilterContext *ctx)
Definition: avf_showwaves.c:673
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
ShowWavesContext::rate
AVRational rate
Definition: avf_showwaves.c:74
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
config_output
static int config_output(AVFilterLink *outlink)
Definition: avf_showwaves.c:413
push_frame
static int push_frame(AVFilterLink *outlink)
Definition: avf_showwaves.c:549
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ShowWavesContext::scale
int scale
ShowWavesScale.
Definition: avf_showwaves.c:83
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1513
NULL
#define NULL
Definition: coverity.c:32
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
get_log_h
static int get_log_h(int16_t sample, int height)
Definition: avf_showwaves.c:193
activate
filter_frame For filters that do not use the activate() callback
ShowWavesContext::single_pic
int single_pic
Definition: avf_showwaves.c:94
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
ShowWavesContext::mode
int mode
ShowWavesMode.
Definition: avf_showwaves.c:82
parseutils.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: avf_showwaves.c:130
ShowWavesContext::total_samples
int64_t total_samples
Definition: avf_showwaves.c:97
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: avf_showwaves.c:635
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: avf_showwaves.c:152
abs
#define abs(x)
Definition: cuda_runtime.h:35
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
MODE_POINT
@ MODE_POINT
Definition: avf_showwaves.c:39
ShowWavesContext::draw_mode
int draw_mode
ShowWavesDrawMode.
Definition: avf_showwaves.c:84
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
draw_sample_cline_gray
static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:402
ff_avf_showwavespic
AVFilter ff_avf_showwavespic
ShowWavesFilterMode
ShowWavesFilterMode
Definition: avf_showwaves.c:60
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
MODE_CENTERED_LINE
@ MODE_CENTERED_LINE
Definition: avf_showwaves.c:42
draw_sample_p2p_rgba_full
static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:307
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
ShowWavesContext::n
int n
Definition: avf_showwaves.c:79
SCALE_LIN
@ SCALE_LIN
Definition: avf_showwaves.c:47
height
#define height
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:427
MODE_P2P
@ MODE_P2P
Definition: avf_showwaves.c:41
ShowWavesContext::outpicref
AVFrame * outpicref
Definition: avf_showwaves.c:78
internal.h
ShowWavesContext::h
int h
Definition: avf_showwaves.c:73
ShowWavesScale
ShowWavesScale
Definition: avf_showwaves.c:46
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
push_single_pic
static int push_single_pic(AVFilterLink *outlink)
Definition: avf_showwaves.c:565
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
i
int i
Definition: input.c:407
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
draw_sample_line_gray
static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:370
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
draw_sample_p2p_rgba_scale
static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:281
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AVFilter
Filter definition.
Definition: avfilter.h:145
ret
ret
Definition: filter_design.txt:187
ShowWavesContext::draw_sample
void(* draw_sample)(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:90
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ShowWavesContext::fg
uint8_t * fg
Definition: avf_showwaves.c:87
SCALE_LOG
@ SCALE_LOG
Definition: avf_showwaves.c:48
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
channel_layout.h
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
draw_sample_cline_rgba_scale
static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:333
ShowWavesContext::split_channels
int split_channels
Definition: avf_showwaves.c:85
ShowWavesContext::colors
char * colors
Definition: avf_showwaves.c:75
MODE_LINE
@ MODE_LINE
Definition: avf_showwaves.c:40
alloc_out_frame
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, const AVFilterLink *inlink, AVFilterLink *outlink, const AVFrame *in)
Definition: avf_showwaves.c:652
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
ShowWavesMode
ShowWavesMode
Definition: avf_showwaves.c:38
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
OFFSET
#define OFFSET(x)
Definition: avf_showwaves.c:101
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
h
h
Definition: vp9dsp_template.c:2038
ShowWavesDrawMode
ShowWavesDrawMode
Definition: avf_showwaves.c:54
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
ff_avf_showwaves
AVFilter ff_avf_showwaves
int
int
Definition: ffmpeg_filter.c:170
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ShowWavesContext::pixstep
int pixstep
Definition: avf_showwaves.c:80
ShowWavesContext
Definition: avf_showwaves.c:71
SCALE_NB
@ SCALE_NB
Definition: avf_showwaves.c:51
draw_sample_line_rgba_full
static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:264
draw_sample_point_rgba_full
static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:235
nb_channels
int nb_channels
Definition: channel_layout.c:81