FFmpeg
vf_thumbnail.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Potential thumbnail lookup filter to reduce the risk of an inappropriate
24  * selection (such as a black frame) we could get with an absolute seek.
25  *
26  * Simplified version of algorithm by Vadim Zaliva <lord@crocodile.org>.
27  * @see http://notbrainsurgery.livejournal.com/29773.html
28  */
29 
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "internal.h"
35 
36 #define HIST_SIZE (3*256)
37 
38 struct thumb_frame {
39  AVFrame *buf; ///< cached frame
40  int histogram[HIST_SIZE]; ///< RGB color distribution histogram of the frame
41 };
42 
43 typedef struct ThumbContext {
44  const AVClass *class;
45  int n; ///< current frame
46  int loglevel;
47  int n_frames; ///< number of frames for analysis
48  struct thumb_frame *frames; ///< the n_frames frames
49  AVRational tb; ///< copy of the input timebase to ease access
50 
53 
54  int planewidth[4];
55  int planeheight[4];
56 } ThumbContext;
57 
58 #define OFFSET(x) offsetof(ThumbContext, x)
59 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
60 
61 static const AVOption thumbnail_options[] = {
62  { "n", "set the frames batch size", OFFSET(n_frames), AV_OPT_TYPE_INT, {.i64=100}, 2, INT_MAX, FLAGS },
63  { "log", "force stats logging level", OFFSET(loglevel), AV_OPT_TYPE_INT, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, FLAGS, .unit = "level" },
64  { "quiet", "logging disabled", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_QUIET}, 0, 0, FLAGS, .unit = "level" },
65  { "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, 0, 0, FLAGS, .unit = "level" },
66  { "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, 0, 0, FLAGS, .unit = "level" },
67  { NULL }
68 };
69 
71 
73 {
74  ThumbContext *s = ctx->priv;
75 
76  s->frames = av_calloc(s->n_frames, sizeof(*s->frames));
77  if (!s->frames) {
79  "Allocation failure, try to lower the number of frames\n");
80  return AVERROR(ENOMEM);
81  }
82  av_log(ctx, AV_LOG_VERBOSE, "batch size: %d frames\n", s->n_frames);
83  return 0;
84 }
85 
86 /**
87  * @brief Compute Sum-square deviation to estimate "closeness".
88  * @param hist color distribution histogram
89  * @param median average color distribution histogram
90  * @return sum of squared errors
91  */
92 static double frame_sum_square_err(const int *hist, const double *median)
93 {
94  int i;
95  double err, sum_sq_err = 0;
96 
97  for (i = 0; i < HIST_SIZE; i++) {
98  err = median[i] - (double)hist[i];
99  sum_sq_err += err*err;
100  }
101  return sum_sq_err;
102 }
103 
105 {
106  AVFrame *picref;
107  ThumbContext *s = ctx->priv;
108  int i, j, best_frame_idx = 0;
109  int nb_frames = s->n;
110  double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1;
111 
112  // average histogram of the N frames
113  for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) {
114  for (i = 0; i < nb_frames; i++)
115  avg_hist[j] += (double)s->frames[i].histogram[j];
116  avg_hist[j] /= nb_frames;
117  }
118 
119  // find the frame closer to the average using the sum of squared errors
120  for (i = 0; i < nb_frames; i++) {
121  sq_err = frame_sum_square_err(s->frames[i].histogram, avg_hist);
122  if (i == 0 || sq_err < min_sq_err)
123  best_frame_idx = i, min_sq_err = sq_err;
124  }
125 
126  // free and reset everything (except the best frame buffer)
127  for (i = 0; i < nb_frames; i++) {
128  memset(s->frames[i].histogram, 0, sizeof(s->frames[i].histogram));
129  if (i != best_frame_idx)
130  av_frame_free(&s->frames[i].buf);
131  }
132  s->n = 0;
133 
134  // raise the chosen one
135  picref = s->frames[best_frame_idx].buf;
136  if (s->loglevel != AV_LOG_QUIET)
137  av_log(ctx, s->loglevel, "frame id #%d (pts_time=%f) selected "
138  "from a set of %d images\n", best_frame_idx,
139  picref->pts * av_q2d(s->tb), nb_frames);
140  s->frames[best_frame_idx].buf = NULL;
141 
142  return picref;
143 }
144 
145 static int do_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
146 {
147  ThumbContext *s = ctx->priv;
148  AVFrame *frame = arg;
149  int *hist = s->thread_histogram + HIST_SIZE * jobnr;
150  const int h = frame->height;
151  const int w = frame->width;
152  const int slice_start = (h * jobnr) / nb_jobs;
153  const int slice_end = (h * (jobnr+1)) / nb_jobs;
154  const uint8_t *p = frame->data[0] + slice_start * frame->linesize[0];
155 
156  memset(hist, 0, sizeof(*hist) * HIST_SIZE);
157 
158  switch (frame->format) {
159  case AV_PIX_FMT_RGB24:
160  case AV_PIX_FMT_BGR24:
161  for (int j = slice_start; j < slice_end; j++) {
162  for (int i = 0; i < w; i++) {
163  hist[0*256 + p[i*3 ]]++;
164  hist[1*256 + p[i*3 + 1]]++;
165  hist[2*256 + p[i*3 + 2]]++;
166  }
167  p += frame->linesize[0];
168  }
169  break;
170  case AV_PIX_FMT_RGB0:
171  case AV_PIX_FMT_BGR0:
172  case AV_PIX_FMT_RGBA:
173  case AV_PIX_FMT_BGRA:
174  for (int j = slice_start; j < slice_end; j++) {
175  for (int i = 0; i < w; i++) {
176  hist[0*256 + p[i*4 ]]++;
177  hist[1*256 + p[i*4 + 1]]++;
178  hist[2*256 + p[i*4 + 2]]++;
179  }
180  p += frame->linesize[0];
181  }
182  break;
183  case AV_PIX_FMT_0RGB:
184  case AV_PIX_FMT_0BGR:
185  case AV_PIX_FMT_ARGB:
186  case AV_PIX_FMT_ABGR:
187  for (int j = slice_start; j < slice_end; j++) {
188  for (int i = 0; i < w; i++) {
189  hist[0*256 + p[i*4 + 1]]++;
190  hist[1*256 + p[i*4 + 2]]++;
191  hist[2*256 + p[i*4 + 3]]++;
192  }
193  p += frame->linesize[0];
194  }
195  break;
196  default:
197  for (int plane = 0; plane < 3; plane++) {
198  const int slice_start = (s->planeheight[plane] * jobnr) / nb_jobs;
199  const int slice_end = (s->planeheight[plane] * (jobnr+1)) / nb_jobs;
200  const uint8_t *p = frame->data[plane] + slice_start * frame->linesize[plane];
201  const ptrdiff_t linesize = frame->linesize[plane];
202  const int planewidth = s->planewidth[plane];
203  int *hhist = hist + 256 * plane;
204 
205  for (int j = slice_start; j < slice_end; j++) {
206  for (int i = 0; i < planewidth; i++)
207  hhist[p[i]]++;
208  p += linesize;
209  }
210  }
211  break;
212  }
213 
214  return 0;
215 }
216 
218 {
219  AVFilterContext *ctx = inlink->dst;
220  ThumbContext *s = ctx->priv;
221  AVFilterLink *outlink = ctx->outputs[0];
222  int *hist = s->frames[s->n].histogram;
223 
224  // keep a reference of each frame
225  s->frames[s->n].buf = frame;
226 
228  FFMIN(frame->height, s->nb_threads));
229 
230  // update current frame histogram
231  for (int j = 0; j < FFMIN(frame->height, s->nb_threads); j++) {
232  int *thread_histogram = s->thread_histogram + HIST_SIZE * j;
233 
234  for (int i = 0; i < HIST_SIZE; i++)
235  hist[i] += thread_histogram[i];
236  }
237 
238  // no selection until the buffer of N frames is filled up
239  s->n++;
240  if (s->n < s->n_frames)
241  return 0;
242 
243  return ff_filter_frame(outlink, get_best_frame(ctx));
244 }
245 
247 {
248  int i;
249  ThumbContext *s = ctx->priv;
250  for (i = 0; i < s->n_frames && s->frames && s->frames[i].buf; i++)
251  av_frame_free(&s->frames[i].buf);
252  av_freep(&s->frames);
253  av_freep(&s->thread_histogram);
254 }
255 
257 {
258  AVFilterContext *ctx = link->src;
259  ThumbContext *s = ctx->priv;
260  int ret = ff_request_frame(ctx->inputs[0]);
261 
262  if (ret == AVERROR_EOF && s->n) {
264  if (ret < 0)
265  return ret;
266  ret = AVERROR_EOF;
267  }
268  if (ret < 0)
269  return ret;
270  return 0;
271 }
272 
274 {
275  AVFilterContext *ctx = inlink->dst;
276  ThumbContext *s = ctx->priv;
278 
279  s->nb_threads = ff_filter_get_nb_threads(ctx);
280  s->thread_histogram = av_calloc(HIST_SIZE, s->nb_threads * sizeof(*s->thread_histogram));
281  if (!s->thread_histogram)
282  return AVERROR(ENOMEM);
283 
284  s->tb = inlink->time_base;
285  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
286  s->planewidth[0] = s->planewidth[3] = inlink->w;
287  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
288  s->planeheight[0] = s->planeheight[3] = inlink->h;
289 
290  return 0;
291 }
292 
293 static const enum AVPixelFormat pix_fmts[] = {
308 };
309 
310 static const AVFilterPad thumbnail_inputs[] = {
311  {
312  .name = "default",
313  .type = AVMEDIA_TYPE_VIDEO,
314  .config_props = config_props,
315  .filter_frame = filter_frame,
316  },
317 };
318 
319 static const AVFilterPad thumbnail_outputs[] = {
320  {
321  .name = "default",
322  .type = AVMEDIA_TYPE_VIDEO,
323  .request_frame = request_frame,
324  },
325 };
326 
328  .name = "thumbnail",
329  .description = NULL_IF_CONFIG_SMALL("Select the most representative frame in a given sequence of consecutive frames."),
330  .priv_size = sizeof(ThumbContext),
331  .init = init,
332  .uninit = uninit,
336  .priv_class = &thumbnail_class,
339 };
ThumbContext
Definition: vf_thumbnail.c:43
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(thumbnail)
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
thumb_frame::histogram
int histogram[HIST_SIZE]
RGB color distribution histogram of the frame.
Definition: vf_thumbnail.c:40
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
thumb_frame::buf
AVFrame * buf
cached frame
Definition: vf_thumbnail.c:39
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:487
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:346
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:463
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
OFFSET
#define OFFSET(x)
Definition: vf_thumbnail.c:58
thumb_frame
Definition: vf_thumbnail.c:38
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_thumbnail.c:293
ThumbContext::frames
struct thumb_frame * frames
the n_frames frames
Definition: vf_thumbnail.c:48
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
do_slice
static int do_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_thumbnail.c:145
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
ThumbContext::planeheight
int planeheight[4]
Definition: vf_thumbnail.c:55
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1730
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ThumbContext::tb
AVRational tb
copy of the input timebase to ease access
Definition: vf_thumbnail.c:49
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
ThumbContext::n_frames
int n_frames
number of frames for analysis
Definition: vf_thumbnail.c:47
ThumbContext::loglevel
int loglevel
Definition: vf_thumbnail.c:46
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
get_best_frame
static AVFrame * get_best_frame(AVFilterContext *ctx)
Definition: vf_thumbnail.c:104
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
FLAGS
#define FLAGS
Definition: vf_thumbnail.c:59
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
double
double
Definition: af_crystalizer.c:131
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_thumbnail.c:273
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
request_frame
static int request_frame(AVFilterLink *link)
Definition: vf_thumbnail.c:256
thumbnail_outputs
static const AVFilterPad thumbnail_outputs[]
Definition: vf_thumbnail.c:319
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_thumbnail.c:246
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
ThumbContext::n
int n
current frame
Definition: vf_thumbnail.c:45
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
thumbnail
static int thumbnail(AVFilterContext *ctx, int *histogram, AVFrame *in)
Definition: vf_thumbnail_cuda.c:200
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_thumbnail.c:217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
frame_sum_square_err
static double frame_sum_square_err(const int *hist, const double *median)
Compute Sum-square deviation to estimate "closeness".
Definition: vf_thumbnail.c:92
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:827
ThumbContext::thread_histogram
int * thread_histogram
Definition: vf_thumbnail.c:52
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
HIST_SIZE
#define HIST_SIZE
Definition: vf_thumbnail.c:36
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
thumbnail_inputs
static const AVFilterPad thumbnail_inputs[]
Definition: vf_thumbnail.c:310
avfilter.h
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:688
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:75
ThumbContext::nb_threads
int nb_threads
Definition: vf_thumbnail.c:51
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_thumbnail.c:72
h
h
Definition: vp9dsp_template.c:2038
thumbnail_options
static const AVOption thumbnail_options[]
Definition: vf_thumbnail.c:61
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_vf_thumbnail
const AVFilter ff_vf_thumbnail
Definition: vf_thumbnail.c:327
ThumbContext::planewidth
int planewidth[4]
Definition: vf_thumbnail.c:54
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:173