FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_vectorscope.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/opt.h"
23 #include "libavutil/parseutils.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "internal.h"
28 #include "video.h"
29 
37 };
38 
39 typedef struct VectorscopeContext {
40  const AVClass *class;
41  int mode;
42  int intensity;
43  float fintensity;
44  const uint8_t *bg_color;
45  int planewidth[4];
46  int planeheight[4];
47  int hsub, vsub;
48  int x, y, pd;
49  int is_yuv;
50  int envelope;
51  uint8_t peak[256][256];
53 
54 #define OFFSET(x) offsetof(VectorscopeContext, x)
55 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
56 
57 static const AVOption vectorscope_options[] = {
58  { "mode", "set vectorscope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, MODE_NB-1, FLAGS, "mode"},
59  { "m", "set vectorscope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, MODE_NB-1, FLAGS, "mode"},
60  { "gray", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAY}, 0, 0, FLAGS, "mode" },
61  { "color", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR}, 0, 0, FLAGS, "mode" },
62  { "color2", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR2}, 0, 0, FLAGS, "mode" },
63  { "color3", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR3}, 0, 0, FLAGS, "mode" },
64  { "color4", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR4}, 0, 0, FLAGS, "mode" },
65  { "x", "set color component on X axis", OFFSET(x), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS},
66  { "y", "set color component on Y axis", OFFSET(y), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS},
67  { "intensity", "set intensity", OFFSET(fintensity), AV_OPT_TYPE_FLOAT, {.dbl=0.004}, 0, 1, FLAGS},
68  { "i", "set intensity", OFFSET(fintensity), AV_OPT_TYPE_FLOAT, {.dbl=0.004}, 0, 1, FLAGS},
69  { "envelope", "set envelope", OFFSET(envelope), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS, "envelope"},
70  { "e", "set envelope", OFFSET(envelope), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS, "envelope"},
71  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "envelope" },
72  { "instant", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "envelope" },
73  { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "envelope" },
74  { "peak+instant", 0, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "envelope" },
75  { NULL }
76 };
77 
79 
80 static const enum AVPixelFormat out_yuv_pix_fmts[] = {
83 };
84 
85 static const enum AVPixelFormat out_rgb_pix_fmts[] = {
88 };
89 
90 static const enum AVPixelFormat in1_pix_fmts[] = {
94 };
95 
96 static const enum AVPixelFormat in2_pix_fmts[] = {
104 };
105 
107 {
108  VectorscopeContext *s = ctx->priv;
109  const enum AVPixelFormat *out_pix_fmts;
110  const AVPixFmtDescriptor *desc;
111  AVFilterFormats *avff;
112  int rgb, i;
113 
114  if (!ctx->inputs[0]->in_formats ||
115  !ctx->inputs[0]->in_formats->nb_formats) {
116  return AVERROR(EAGAIN);
117  }
118 
119  if (!ctx->inputs[0]->out_formats) {
120  const enum AVPixelFormat *in_pix_fmts;
121 
122  if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))
123  in_pix_fmts = in2_pix_fmts;
124  else
125  in_pix_fmts = in1_pix_fmts;
126  ff_formats_ref(ff_make_format_list(in_pix_fmts), &ctx->inputs[0]->out_formats);
127  }
128 
129  avff = ctx->inputs[0]->in_formats;
130  desc = av_pix_fmt_desc_get(avff->formats[0]);
131  rgb = desc->flags & AV_PIX_FMT_FLAG_RGB;
132  for (i = 1; i < avff->nb_formats; i++) {
133  desc = av_pix_fmt_desc_get(avff->formats[i]);
134  if (rgb != (desc->flags & AV_PIX_FMT_FLAG_RGB))
135  return AVERROR(EAGAIN);
136  }
137 
138  if (rgb)
139  out_pix_fmts = out_rgb_pix_fmts;
140  else
141  out_pix_fmts = out_yuv_pix_fmts;
142  ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats);
143 
144  return 0;
145 }
146 
147 static const uint8_t black_yuva_color[4] = { 0, 127, 127, 0 };
148 static const uint8_t black_gbrp_color[4] = { 0, 0, 0, 0 };
149 
150 static int config_input(AVFilterLink *inlink)
151 {
152  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
153  VectorscopeContext *s = inlink->dst->priv;
154 
155  s->is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB);
156 
157  if (s->mode == GRAY && s->is_yuv)
158  s->pd = 0;
159  else {
160  if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))
161  s->pd = 0;
162  else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0))
163  s->pd = 1;
164  else if ((s->x == 0 && s->y == 1) || (s->x == 1 && s->y == 0))
165  s->pd = 2;
166  }
167 
168  switch (inlink->format) {
169  case AV_PIX_FMT_GBRAP:
170  case AV_PIX_FMT_GBRP:
172  break;
173  default:
175  }
176 
177  s->hsub = desc->log2_chroma_w;
178  s->vsub = desc->log2_chroma_h;
179  s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
180  s->planeheight[0] = s->planeheight[3] = inlink->h;
181  s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
182  s->planewidth[0] = s->planewidth[3] = inlink->w;
183 
184  return 0;
185 }
186 
187 static int config_output(AVFilterLink *outlink)
188 {
189  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
190  const int depth = desc->comp[0].depth_minus1 + 1;
191  VectorscopeContext *s = outlink->src->priv;
192 
193  s->intensity = s->fintensity * ((1 << depth) - 1);
194  outlink->h = outlink->w = 1 << depth;
195  outlink->sample_aspect_ratio = (AVRational){1,1};
196  return 0;
197 }
198 
200 {
201  const int dlinesize = out->linesize[0];
202  uint8_t *dpd = s->mode == COLOR || !s->is_yuv ? out->data[s->pd] : out->data[0];
203  int i, j;
204 
205  for (i = 0; i < out->height; i++) {
206  for (j = 0; j < out->width; j++) {
207  const int pos = i * dlinesize + j;
208  const int poa = (i - 1) * dlinesize + j;
209  const int pob = (i + 1) * dlinesize + j;
210 
211  if (dpd[pos] && (((!j || !dpd[pos - 1]) || ((j == (out->width - 1)) || !dpd[pos + 1]))
212  || ((!i || !dpd[poa]) || ((i == (out->height - 1)) || !dpd[pob])))) {
213  dpd[pos] = 255;
214  }
215  }
216  }
217 }
218 
220 {
221  const int dlinesize = out->linesize[0];
222  uint8_t *dpd = s->mode == COLOR || !s->is_yuv ? out->data[s->pd] : out->data[0];
223  int i, j;
224 
225  for (i = 0; i < out->height; i++) {
226  for (j = 0; j < out->width; j++) {
227  const int pos = i * dlinesize + j;
228 
229  if (dpd[pos])
230  s->peak[i][j] = 255;
231  }
232  }
233 
234  if (s->envelope == 3)
235  envelope_instant(s, out);
236 
237  for (i = 0; i < out->height; i++) {
238  for (j = 0; j < out->width; j++) {
239  const int pos = i * dlinesize + j;
240 
241  if (s->peak[i][j] && (((!j || !s->peak[i][j-1]) || ((j == (out->width - 1)) || !s->peak[i][j + 1]))
242  || ((!i || !s->peak[i-1][j]) || ((i == (out->height - 1)) || !s->peak[i + 1][j])))) {
243  dpd[pos] = 255;
244  }
245  }
246  }
247 }
248 
250 {
251  if (!s->envelope) {
252  return;
253  } else if (s->envelope == 1) {
254  envelope_instant(s, out);
255  } else {
256  envelope_peak(s, out);
257  }
258 }
259 
261 {
262  const uint8_t * const *src = (const uint8_t * const *)in->data;
263  const int slinesizex = in->linesize[s->x];
264  const int slinesizey = in->linesize[s->y];
265  const int slinesized = in->linesize[pd];
266  const int dlinesize = out->linesize[0];
267  const int intensity = s->intensity;
268  const int px = s->x, py = s->y;
269  const int h = s->planeheight[py];
270  const int w = s->planewidth[px];
271  const uint8_t *spx = src[px];
272  const uint8_t *spy = src[py];
273  const uint8_t *spd = src[pd];
274  const int hsub = s->hsub;
275  const int vsub = s->vsub;
276  uint8_t **dst = out->data;
277  uint8_t *dpx = dst[px];
278  uint8_t *dpy = dst[py];
279  uint8_t *dpd = dst[pd];
280  int i, j;
281 
282  switch (s->mode) {
283  case COLOR:
284  case GRAY:
285  if (s->is_yuv) {
286  for (i = 0; i < h; i++) {
287  const int iwx = i * slinesizex;
288  const int iwy = i * slinesizey;
289  for (j = 0; j < w; j++) {
290  const int x = spx[iwx + j];
291  const int y = spy[iwy + j];
292  const int pos = y * dlinesize + x;
293 
294  dpd[pos] = FFMIN(dpd[pos] + intensity, 255);
295  if (dst[3])
296  dst[3][pos] = 255;
297  }
298  }
299  } else {
300  for (i = 0; i < h; i++) {
301  const int iwx = i * slinesizex;
302  const int iwy = i * slinesizey;
303  for (j = 0; j < w; j++) {
304  const int x = spx[iwx + j];
305  const int y = spy[iwy + j];
306  const int pos = y * dlinesize + x;
307 
308  dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255);
309  dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255);
310  dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255);
311  if (dst[3])
312  dst[3][pos] = 255;
313  }
314  }
315  }
316  break;
317  case COLOR2:
318  if (s->is_yuv) {
319  for (i = 0; i < h; i++) {
320  const int iw1 = i * slinesizex;
321  const int iw2 = i * slinesizey;
322  for (j = 0; j < w; j++) {
323  const int x = spx[iw1 + j];
324  const int y = spy[iw2 + j];
325  const int pos = y * dlinesize + x;
326 
327  if (!dpd[pos])
328  dpd[pos] = FFABS(128 - x) + FFABS(128 - y);
329  dpx[pos] = x;
330  dpy[pos] = y;
331  if (dst[3])
332  dst[3][pos] = 255;
333  }
334  }
335  } else {
336  for (i = 0; i < h; i++) {
337  const int iw1 = i * slinesizex;
338  const int iw2 = i * slinesizey;
339  for (j = 0; j < w; j++) {
340  const int x = spx[iw1 + j];
341  const int y = spy[iw2 + j];
342  const int pos = y * dlinesize + x;
343 
344  if (!dpd[pos])
345  dpd[pos] = FFMIN(x + y, 255);
346  dpx[pos] = x;
347  dpy[pos] = y;
348  if (dst[3])
349  dst[3][pos] = 255;
350  }
351  }
352  }
353  break;
354  case COLOR3:
355  for (i = 0; i < h; i++) {
356  const int iw1 = i * slinesizex;
357  const int iw2 = i * slinesizey;
358  for (j = 0; j < w; j++) {
359  const int x = spx[iw1 + j];
360  const int y = spy[iw2 + j];
361  const int pos = y * dlinesize + x;
362 
363  dpd[pos] = FFMIN(255, dpd[pos] + intensity);
364  dpx[pos] = x;
365  dpy[pos] = y;
366  if (dst[3])
367  dst[3][pos] = 255;
368  }
369  }
370  break;
371  case COLOR4:
372  for (i = 0; i < in->height; i++) {
373  const int iwx = (i >> vsub) * slinesizex;
374  const int iwy = (i >> vsub) * slinesizey;
375  const int iwd = i * slinesized;
376  for (j = 0; j < in->width; j++) {
377  const int x = spx[iwx + (j >> hsub)];
378  const int y = spy[iwy + (j >> hsub)];
379  const int pos = y * dlinesize + x;
380 
381  dpd[pos] = FFMAX(spd[iwd + j], dpd[pos]);
382  dpx[pos] = x;
383  dpy[pos] = y;
384  if (dst[3])
385  dst[3][pos] = 255;
386  }
387  }
388  break;
389  default:
390  av_assert0(0);
391  }
392 
393  envelope(s, out);
394 
395  if (s->mode == COLOR) {
396  for (i = 0; i < out->height; i++) {
397  for (j = 0; j < out->width; j++) {
398  if (!dpd[i * out->linesize[pd] + j]) {
399  dpx[i * out->linesize[px] + j] = j;
400  dpy[i * out->linesize[py] + j] = i;
401  dpd[i * out->linesize[pd] + j] = 128;
402  }
403  }
404  }
405  }
406 }
407 
408 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
409 {
410  AVFilterContext *ctx = inlink->dst;
411  VectorscopeContext *s = ctx->priv;
412  AVFilterLink *outlink = ctx->outputs[0];
413  AVFrame *out;
414  uint8_t **dst;
415  int i, k;
416 
417  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
418  if (!out) {
419  av_frame_free(&in);
420  return AVERROR(ENOMEM);
421  }
422  out->pts = in->pts;
423  dst = out->data;
424 
425  for (k = 0; k < 4 && dst[k]; k++)
426  for (i = 0; i < outlink->h ; i++)
427  memset(dst[k] + i * out->linesize[k],
428  s->mode == COLOR && k == s->pd ? 0 : s->bg_color[k], outlink->w);
429 
430  vectorscope(s, in, out, s->pd);
431 
432  av_frame_free(&in);
433  return ff_filter_frame(outlink, out);
434 }
435 
436 static const AVFilterPad inputs[] = {
437  {
438  .name = "default",
439  .type = AVMEDIA_TYPE_VIDEO,
440  .filter_frame = filter_frame,
441  .config_props = config_input,
442  },
443  { NULL }
444 };
445 
446 static const AVFilterPad outputs[] = {
447  {
448  .name = "default",
449  .type = AVMEDIA_TYPE_VIDEO,
450  .config_props = config_output,
451  },
452  { NULL }
453 };
454 
456  .name = "vectorscope",
457  .description = NULL_IF_CONFIG_SMALL("Video vectorscope."),
458  .priv_size = sizeof(VectorscopeContext),
459  .priv_class = &vectorscope_class,
461  .inputs = inputs,
462  .outputs = outputs,
463 };
static int config_input(AVFilterLink *inlink)
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:631
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2129
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
AVFILTER_DEFINE_CLASS(vectorscope)
static enum AVPixelFormat out_yuv_pix_fmts[]
AVOption.
Definition: opt.h:255
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:68
Main libavfilter public API header.
static void envelope_peak(VectorscopeContext *s, AVFrame *out)
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:188
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:109
static const uint8_t black_gbrp_color[4]
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:69
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
#define OFFSET(x)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1158
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:103
VectorscopeMode
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:100
uint8_t
static enum AVPixelFormat in2_pix_fmts[]
mode
Definition: f_perms.c:27
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:76
A filter pad used for either input or output.
Definition: internal.h:63
static const AVOption vectorscope_options[]
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:281
uint16_t depth_minus1
Number of bits in the component minus 1.
Definition: pixdesc.h:57
int width
width and height of the video frame
Definition: frame.h:220
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
#define AVERROR(e)
Definition: error.h:43
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:131
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:175
void * priv
private data for use by the filter
Definition: avfilter.h:654
static const AVFilterPad inputs[]
simple assert() macros that are a bit more flexible than ISO C assert().
static const uint8_t black_yuva_color[4]
#define FFMAX(a, b)
Definition: common.h:79
int depth
Definition: v4l.c:62
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:67
AVFilter ff_vf_vectorscope
static void envelope(VectorscopeContext *s, AVFrame *out)
static enum AVPixelFormat in1_pix_fmts[]
#define FFMIN(a, b)
Definition: common.h:81
float y
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:75
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:422
#define FF_CEIL_RSHIFT(a, b)
Definition: common.h:57
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
unsigned nb_formats
number of formats
Definition: formats.h:65
uint8_t peak[256][256]
AVS_Value src
Definition: avisynth_c.h:482
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:280
uint8_t flags
Definition: pixdesc.h:90
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
static const AVFilterPad outputs[]
static int config_output(AVFilterLink *outlink)
#define FLAGS
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:69
Describe the class of an AVClass context structure.
Definition: log.h:67
static int query_formats(AVFilterContext *ctx)
Filter definition.
Definition: avfilter.h:470
rational number numerator/denominator
Definition: rational.h:43
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
const char * name
Filter name.
Definition: avfilter.h:474
static enum AVPixelFormat out_rgb_pix_fmts[]
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:648
static void envelope_instant(VectorscopeContext *s, AVFrame *out)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:299
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:77
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:70
static void vectorscope(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd)
const uint8_t * bg_color
A list of supported formats for one end of a filter link.
Definition: formats.h:64
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:302
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
int height
Definition: frame.h:220
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:101
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
int * formats
list of media formats
Definition: formats.h:66