FFmpeg
vf_decimate.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Fredrik Mellbin
3  * Copyright (c) 2013 Clément Bœsch
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/opt.h"
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/timestamp.h"
25 #include "avfilter.h"
26 #include "internal.h"
27 
28 #define INPUT_MAIN 0
29 #define INPUT_CLEANSRC 1
30 
31 struct qitem {
33  int64_t maxbdiff;
34  int64_t totdiff;
35 };
36 
37 typedef struct DecimateContext {
38  const AVClass *class;
39  struct qitem *queue; ///< window of cycle frames and the associated data diff
40  int fid; ///< current frame id in the queue
41  int filled; ///< 1 if the queue is filled, 0 otherwise
42  AVFrame *last; ///< last frame from the previous queue
43  AVFrame **clean_src; ///< frame queue for the clean source
44  int got_frame[2]; ///< frame request flag for each input stream
45  AVRational ts_unit; ///< timestamp units for the output frames
46  int64_t start_pts; ///< base for output timestamps
47  uint32_t eof; ///< bitmask for end of stream
48  int hsub, vsub; ///< chroma subsampling values
49  int depth;
51  int bdiffsize;
52  int64_t *bdiffs;
53 
54  /* options */
55  int cycle;
56  double dupthresh_flt;
57  double scthresh_flt;
58  int64_t dupthresh;
59  int64_t scthresh;
60  int blockx, blocky;
61  int ppsrc;
62  int chroma;
64 
65 #define OFFSET(x) offsetof(DecimateContext, x)
66 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
67 
68 static const AVOption decimate_options[] = {
69  { "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
70  { "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
71  { "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
72  { "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
73  { "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
74  { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
75  { "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
76  { NULL }
77 };
78 
79 AVFILTER_DEFINE_CLASS(decimate);
80 
81 static void calc_diffs(const DecimateContext *dm, struct qitem *q,
82  const AVFrame *f1, const AVFrame *f2)
83 {
84  int64_t maxdiff = -1;
85  int64_t *bdiffs = dm->bdiffs;
86  int plane, i, j;
87 
88  memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
89 
90  for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
91  int x, y, xl;
92  const int linesize1 = f1->linesize[plane];
93  const int linesize2 = f2->linesize[plane];
94  const uint8_t *f1p = f1->data[plane];
95  const uint8_t *f2p = f2->data[plane];
96  int width = plane ? AV_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
97  int height = plane ? AV_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
98  int hblockx = dm->blockx / 2;
99  int hblocky = dm->blocky / 2;
100 
101  if (plane) {
102  hblockx >>= dm->hsub;
103  hblocky >>= dm->vsub;
104  }
105 
106  for (y = 0; y < height; y++) {
107  int ydest = y / hblocky;
108  int xdest = 0;
109 
110 #define CALC_DIFF(nbits) do { \
111  for (x = 0; x < width; x += hblockx) { \
112  int64_t acc = 0; \
113  int m = FFMIN(width, x + hblockx); \
114  for (xl = x; xl < m; xl++) \
115  acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
116  ((const uint##nbits##_t *)f2p)[xl]); \
117  bdiffs[ydest * dm->nxblocks + xdest] += acc; \
118  xdest++; \
119  } \
120 } while (0)
121  if (dm->depth == 8) CALC_DIFF(8);
122  else CALC_DIFF(16);
123 
124  f1p += linesize1;
125  f2p += linesize2;
126  }
127  }
128 
129  for (i = 0; i < dm->nyblocks - 1; i++) {
130  for (j = 0; j < dm->nxblocks - 1; j++) {
131  int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
132  + bdiffs[ i * dm->nxblocks + j + 1]
133  + bdiffs[(i + 1) * dm->nxblocks + j ]
134  + bdiffs[(i + 1) * dm->nxblocks + j + 1];
135  if (tmp > maxdiff)
136  maxdiff = tmp;
137  }
138  }
139 
140  q->totdiff = 0;
141  for (i = 0; i < dm->bdiffsize; i++)
142  q->totdiff += bdiffs[i];
143  q->maxbdiff = maxdiff;
144 }
145 
147 {
148  int scpos = -1, duppos = -1;
149  int drop = INT_MIN, i, lowest = 0, ret;
150  AVFilterContext *ctx = inlink->dst;
151  AVFilterLink *outlink = ctx->outputs[0];
152  DecimateContext *dm = ctx->priv;
153  AVFrame *prv;
154 
155  /* update frames queue(s) */
156  if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
157  dm->queue[dm->fid].frame = in;
158  dm->got_frame[INPUT_MAIN] = 1;
159  } else {
160  dm->clean_src[dm->fid] = in;
161  dm->got_frame[INPUT_CLEANSRC] = 1;
162  }
163  if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
164  return 0;
166 
167  if (dm->ppsrc)
168  in = dm->clean_src[dm->fid];
169 
170  if (in) {
171  /* update frame metrics */
172  prv = dm->fid ? (dm->ppsrc ? dm->clean_src[dm->fid - 1] : dm->queue[dm->fid - 1].frame) : dm->last;
173  if (!prv) {
174  dm->queue[dm->fid].maxbdiff = INT64_MAX;
175  dm->queue[dm->fid].totdiff = INT64_MAX;
176  } else {
177  calc_diffs(dm, &dm->queue[dm->fid], prv, in);
178  }
179  if (++dm->fid != dm->cycle)
180  return 0;
181  av_frame_free(&dm->last);
182  dm->last = av_frame_clone(in);
183  dm->fid = 0;
184 
185  /* we have a complete cycle, select the frame to drop */
186  lowest = 0;
187  for (i = 0; i < dm->cycle; i++) {
188  if (dm->queue[i].totdiff > dm->scthresh)
189  scpos = i;
190  if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
191  lowest = i;
192  }
193  if (dm->queue[lowest].maxbdiff < dm->dupthresh)
194  duppos = lowest;
195  drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
196  }
197 
198  /* metrics debug */
199  if (av_log_get_level() >= AV_LOG_DEBUG) {
200  av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
201  for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
202  av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
203  i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
204  i == scpos ? " sc" : "",
205  i == duppos ? " dup" : "",
206  i == lowest ? " lowest" : "",
207  i == drop ? " [DROP]" : "");
208  }
209  }
210 
211  /* push all frames except the drop */
212  ret = 0;
213  for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
214  if (i == drop) {
215  if (dm->ppsrc)
216  av_frame_free(&dm->clean_src[i]);
217  av_frame_free(&dm->queue[i].frame);
218  } else {
219  AVFrame *frame = dm->queue[i].frame;
220  dm->queue[i].frame = NULL;
221  if (frame->pts != AV_NOPTS_VALUE && dm->start_pts == AV_NOPTS_VALUE)
222  dm->start_pts = frame->pts;
223  if (dm->ppsrc) {
225  frame = dm->clean_src[i];
226  dm->clean_src[i] = NULL;
227  }
228  frame->pts = av_rescale_q(outlink->frame_count_in, dm->ts_unit, (AVRational){1,1}) +
229  (dm->start_pts == AV_NOPTS_VALUE ? 0 : dm->start_pts);
230  ret = ff_filter_frame(outlink, frame);
231  if (ret < 0)
232  break;
233  }
234  }
235 
236  return ret;
237 }
238 
240 {
241  int max_value;
242  AVFilterContext *ctx = inlink->dst;
243  DecimateContext *dm = ctx->priv;
244  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
245  const int w = inlink->w;
246  const int h = inlink->h;
247 
248  dm->hsub = pix_desc->log2_chroma_w;
249  dm->vsub = pix_desc->log2_chroma_h;
250  dm->depth = pix_desc->comp[0].depth;
251  max_value = (1 << dm->depth) - 1;
252  dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
253  dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
254  dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
255  dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
256  dm->bdiffsize = dm->nxblocks * dm->nyblocks;
257  dm->bdiffs = av_malloc_array(dm->bdiffsize, sizeof(*dm->bdiffs));
258  dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
259 
260  if (!dm->bdiffs || !dm->queue)
261  return AVERROR(ENOMEM);
262 
263  if (dm->ppsrc) {
264  dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
265  if (!dm->clean_src)
266  return AVERROR(ENOMEM);
267  }
268 
269  return 0;
270 }
271 
273 {
274  DecimateContext *dm = ctx->priv;
275  AVFilterPad pad = {
276  .name = av_strdup("main"),
277  .type = AVMEDIA_TYPE_VIDEO,
278  .filter_frame = filter_frame,
279  .config_props = config_input,
280  };
281  int ret;
282 
283  if (!pad.name)
284  return AVERROR(ENOMEM);
285  if ((ret = ff_insert_inpad(ctx, INPUT_MAIN, &pad)) < 0) {
286  av_freep(&pad.name);
287  return ret;
288  }
289 
290  if (dm->ppsrc) {
291  pad.name = av_strdup("clean_src");
292  pad.config_props = NULL;
293  if (!pad.name)
294  return AVERROR(ENOMEM);
295  if ((ret = ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad)) < 0) {
296  av_freep(&pad.name);
297  return ret;
298  }
299  }
300 
301  if ((dm->blockx & (dm->blockx - 1)) ||
302  (dm->blocky & (dm->blocky - 1))) {
303  av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
304  return AVERROR(EINVAL);
305  }
306 
308 
309  return 0;
310 }
311 
313 {
314  int i;
315  DecimateContext *dm = ctx->priv;
316 
317  av_frame_free(&dm->last);
318  av_freep(&dm->bdiffs);
319  if (dm->queue) {
320  for (i = 0; i < dm->cycle; i++)
321  av_frame_free(&dm->queue[i].frame);
322  }
323  av_freep(&dm->queue);
324  if (dm->clean_src) {
325  for (i = 0; i < dm->cycle; i++)
326  av_frame_free(&dm->clean_src[i]);
327  }
328  av_freep(&dm->clean_src);
329  for (i = 0; i < ctx->nb_inputs; i++)
330  av_freep(&ctx->input_pads[i].name);
331 }
332 
333 static int request_inlink(AVFilterContext *ctx, int lid)
334 {
335  int ret = 0;
336  DecimateContext *dm = ctx->priv;
337 
338  if (!dm->got_frame[lid]) {
339  AVFilterLink *inlink = ctx->inputs[lid];
341  if (ret == AVERROR_EOF) { // flushing
342  dm->eof |= 1 << lid;
344  }
345  }
346  return ret;
347 }
348 
349 static int request_frame(AVFilterLink *outlink)
350 {
351  int ret;
352  AVFilterContext *ctx = outlink->src;
353  DecimateContext *dm = ctx->priv;
354  const uint32_t eof_mask = 1<<INPUT_MAIN | dm->ppsrc<<INPUT_CLEANSRC;
355 
356  if ((dm->eof & eof_mask) == eof_mask) // flush done?
357  return AVERROR_EOF;
358  if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
359  return ret;
360  if (dm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
361  return ret;
362  return 0;
363 }
364 
366 {
367  static const enum AVPixelFormat pix_fmts[] = {
368 #define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
369 #define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
370 #define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
371  PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
375  };
377  if (!fmts_list)
378  return AVERROR(ENOMEM);
379  return ff_set_common_formats(ctx, fmts_list);
380 }
381 
382 static int config_output(AVFilterLink *outlink)
383 {
384  AVFilterContext *ctx = outlink->src;
385  DecimateContext *dm = ctx->priv;
386  const AVFilterLink *inlink =
387  ctx->inputs[dm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
388  AVRational fps = inlink->frame_rate;
389 
390  if (!fps.num || !fps.den) {
391  av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
392  "current rate of %d/%d is invalid\n", fps.num, fps.den);
393  return AVERROR(EINVAL);
394  }
395  fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
396  av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
397  inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
398  outlink->time_base = inlink->time_base;
399  outlink->frame_rate = fps;
400  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
401  outlink->w = inlink->w;
402  outlink->h = inlink->h;
403  dm->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
404  return 0;
405 }
406 
407 static const AVFilterPad decimate_outputs[] = {
408  {
409  .name = "default",
410  .type = AVMEDIA_TYPE_VIDEO,
411  .request_frame = request_frame,
412  .config_props = config_output,
413  },
414  { NULL }
415 };
416 
418  .name = "decimate",
419  .description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
420  .init = decimate_init,
421  .uninit = decimate_uninit,
422  .priv_size = sizeof(DecimateContext),
425  .priv_class = &decimate_class,
427 };
PF
#define PF(suf)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
DecimateContext::start_pts
int64_t start_pts
base for output timestamps
Definition: vf_decimate.c:46
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:300
DecimateContext::depth
int depth
Definition: vf_decimate.c:49
DecimateContext::queue
struct qitem * queue
window of cycle frames and the associated data diff
Definition: vf_decimate.c:39
qitem
Definition: vf_decimate.c:31
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
AVFrame::width
int width
Definition: frame.h:358
w
uint8_t w
Definition: llviddspenc.c:38
DecimateContext::ts_unit
AVRational ts_unit
timestamp units for the output frames
Definition: vf_decimate.c:45
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVOption
AVOption.
Definition: opt.h:246
chroma
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1631
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
DecimateContext::vsub
int vsub
chroma subsampling values
Definition: vf_decimate.c:48
DecimateContext::scthresh_flt
double scthresh_flt
Definition: vf_decimate.c:57
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
DecimateContext::got_frame
int got_frame[2]
frame request flag for each input stream
Definition: vf_decimate.c:44
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
PF_NOALPHA
#define PF_NOALPHA(suf)
DecimateContext::hsub
int hsub
Definition: vf_decimate.c:48
DecimateContext::blocky
int blocky
Definition: vf_decimate.c:60
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
qitem::frame
AVFrame * frame
Definition: vf_decimate.c:32
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_decimate.c:146
ff_insert_inpad
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:266
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:381
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
AVRational::num
int num
Numerator.
Definition: rational.h:59
calc_diffs
static void calc_diffs(const DecimateContext *dm, struct qitem *q, const AVFrame *f1, const AVFrame *f2)
Definition: vf_decimate.c:81
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
DecimateContext::bdiffsize
int bdiffsize
Definition: vf_decimate.c:51
request_inlink
static int request_inlink(AVFilterContext *ctx, int lid)
Definition: vf_decimate.c:333
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:605
width
#define width
CALC_DIFF
#define CALC_DIFF(nbits)
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
decimate_options
static const AVOption decimate_options[]
Definition: vf_decimate.c:68
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
OFFSET
#define OFFSET(x)
Definition: vf_decimate.c:65
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
DecimateContext::dupthresh
int64_t dupthresh
Definition: vf_decimate.c:58
DecimateContext::nxblocks
int nxblocks
Definition: vf_decimate.c:50
if
if(ret)
Definition: filter_design.txt:179
DecimateContext::bdiffs
int64_t * bdiffs
Definition: vf_decimate.c:52
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
DecimateContext::ppsrc
int ppsrc
Definition: vf_decimate.c:61
INPUT_MAIN
#define INPUT_MAIN
Definition: vf_decimate.c:28
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_decimate.c:239
DecimateContext::eof
uint32_t eof
bitmask for end of stream
Definition: vf_decimate.c:47
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
DecimateContext::dupthresh_flt
double dupthresh_flt
Definition: vf_decimate.c:56
AVFilterPad::config_props
int(* config_props)(AVFilterLink *link)
Link configuration callback.
Definition: internal.h:118
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
P
#define P
qitem::totdiff
int64_t totdiff
Definition: vf_decimate.c:34
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
DecimateContext::chroma
int chroma
Definition: vf_decimate.c:62
height
#define height
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_decimate.c:365
decimate_uninit
static av_cold void decimate_uninit(AVFilterContext *ctx)
Definition: vf_decimate.c:312
DecimateContext::clean_src
AVFrame ** clean_src
frame queue for the clean source
Definition: vf_decimate.c:43
INPUT_CLEANSRC
#define INPUT_CLEANSRC
Definition: vf_decimate.c:29
DecimateContext::blockx
int blockx
Definition: vf_decimate.c:60
internal.h
DecimateContext::scthresh
int64_t scthresh
Definition: vf_decimate.c:59
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
decimate_init
static av_cold int decimate_init(AVFilterContext *ctx)
Definition: vf_decimate.c:272
uint8_t
uint8_t
Definition: audio_convert.c:194
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
DecimateContext::nyblocks
int nyblocks
Definition: vf_decimate.c:50
AVFrame::height
int height
Definition: frame.h:358
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
FF_INLINK_IDX
#define FF_INLINK_IDX(link)
Find the index of a link.
Definition: internal.h:328
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(decimate)
DecimateContext::fid
int fid
current frame id in the queue
Definition: vf_decimate.c:40
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_decimate.c:382
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
DecimateContext
Definition: vf_decimate.c:37
FLAGS
#define FLAGS
Definition: vf_decimate.c:66
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_decimate.c:349
h
h
Definition: vp9dsp_template.c:2038
DecimateContext::filled
int filled
1 if the queue is filled, 0 otherwise
Definition: vf_decimate.c:41
ff_vf_decimate
AVFilter ff_vf_decimate
Definition: vf_decimate.c:417
DecimateContext::last
AVFrame * last
last frame from the previous queue
Definition: vf_decimate.c:42
qitem::maxbdiff
int64_t maxbdiff
Definition: vf_decimate.c:33
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
DecimateContext::cycle
int cycle
Definition: vf_decimate.c:55
decimate_outputs
static const AVFilterPad decimate_outputs[]
Definition: vf_decimate.c:407