FFmpeg
vf_spp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2013 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Simple post processing filter
25  *
26  * This implementation is based on an algorithm described in
27  * "Aria Nosratinia Embedded Post-Processing for
28  * Enhancement of Compressed Images (1999)"
29  *
30  * Originally written by Michael Niedermayer for the MPlayer project, and
31  * ported by Clément Bœsch for FFmpeg.
32  */
33 
34 #include "libavutil/imgutils.h"
35 #include "libavutil/mem_internal.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "internal.h"
39 #include "qp_table.h"
40 #include "vf_spp.h"
41 
42 enum mode {
46 };
47 
48 static const AVClass *child_class_iterate(void **iter)
49 {
50  const AVClass *c = *iter ? NULL : avcodec_dct_get_class();
51  *iter = (void*)(uintptr_t)c;
52  return c;
53 }
54 
55 static void *child_next(void *obj, void *prev)
56 {
57  SPPContext *s = obj;
58  return prev ? NULL : s->dct;
59 }
60 
61 #define OFFSET(x) offsetof(SPPContext, x)
62 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
63 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
64 static const AVOption spp_options[] = {
65  { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, TFLAGS },
66  { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
67  { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, "mode" },
68  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
69  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
70  { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
71  { NULL }
72 };
73 
74 static const AVClass spp_class = {
75  .class_name = "spp",
76  .item_name = av_default_item_name,
77  .option = spp_options,
78  .version = LIBAVUTIL_VERSION_INT,
79  .category = AV_CLASS_CATEGORY_FILTER,
80  .child_class_iterate = child_class_iterate,
82 };
83 
84 // XXX: share between filters?
85 DECLARE_ALIGNED(8, static const uint8_t, ldither)[8][8] = {
86  { 0, 48, 12, 60, 3, 51, 15, 63 },
87  { 32, 16, 44, 28, 35, 19, 47, 31 },
88  { 8, 56, 4, 52, 11, 59, 7, 55 },
89  { 40, 24, 36, 20, 43, 27, 39, 23 },
90  { 2, 50, 14, 62, 1, 49, 13, 61 },
91  { 34, 18, 46, 30, 33, 17, 45, 29 },
92  { 10, 58, 6, 54, 9, 57, 5, 53 },
93  { 42, 26, 38, 22, 41, 25, 37, 21 },
94 };
95 
96 static const uint8_t offset[128][2] = {
97  {0,0}, // unused
98  {0,0},
99  {0,0}, {4,4}, // quality = 1
100  {0,0}, {2,2}, {6,4}, {4,6}, // quality = 2
101  {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, // quality = 3
102 
103  {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, // quality = 4
104  {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
105 
106  {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, // quality = 5
107  {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
108  {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
109  {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
110 
111  {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, // quality = 6
112  {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
113  {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
114  {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
115  {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
116  {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
117  {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
118  {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
119 };
120 
121 static void hardthresh_c(int16_t dst[64], const int16_t src[64],
122  int qp, const uint8_t *permutation)
123 {
124  int i;
125  int bias = 0; // FIXME
126 
127  unsigned threshold1 = qp * ((1<<4) - bias) - 1;
128  unsigned threshold2 = threshold1 << 1;
129 
130  memset(dst, 0, 64 * sizeof(dst[0]));
131  dst[0] = (src[0] + 4) >> 3;
132 
133  for (i = 1; i < 64; i++) {
134  int level = src[i];
135  if (((unsigned)(level + threshold1)) > threshold2) {
136  const int j = permutation[i];
137  dst[j] = (level + 4) >> 3;
138  }
139  }
140 }
141 
142 static void softthresh_c(int16_t dst[64], const int16_t src[64],
143  int qp, const uint8_t *permutation)
144 {
145  int i;
146  int bias = 0; //FIXME
147 
148  unsigned threshold1 = qp * ((1<<4) - bias) - 1;
149  unsigned threshold2 = threshold1 << 1;
150 
151  memset(dst, 0, 64 * sizeof(dst[0]));
152  dst[0] = (src[0] + 4) >> 3;
153 
154  for (i = 1; i < 64; i++) {
155  int level = src[i];
156  if (((unsigned)(level + threshold1)) > threshold2) {
157  const int j = permutation[i];
158  if (level > 0) dst[j] = (level - threshold1 + 4) >> 3;
159  else dst[j] = (level + threshold1 + 4) >> 3;
160  }
161  }
162 }
163 
164 static void store_slice_c(uint8_t *dst, const int16_t *src,
165  int dst_linesize, int src_linesize,
166  int width, int height, int log2_scale,
167  const uint8_t dither[8][8])
168 {
169  int y, x;
170 
171 #define STORE(pos) do { \
172  temp = ((src[x + y*src_linesize + pos] << log2_scale) + d[pos]) >> 6; \
173  if (temp & 0x100) \
174  temp = ~(temp >> 31); \
175  dst[x + y*dst_linesize + pos] = temp; \
176 } while (0)
177 
178  for (y = 0; y < height; y++) {
179  const uint8_t *d = dither[y];
180  for (x = 0; x < width; x += 8) {
181  int temp;
182  STORE(0);
183  STORE(1);
184  STORE(2);
185  STORE(3);
186  STORE(4);
187  STORE(5);
188  STORE(6);
189  STORE(7);
190  }
191  }
192 }
193 
194 static void store_slice16_c(uint16_t *dst, const int16_t *src,
195  int dst_linesize, int src_linesize,
196  int width, int height, int log2_scale,
197  const uint8_t dither[8][8], int depth)
198 {
199  int y, x;
200  unsigned int mask = -1<<depth;
201 
202 #define STORE16(pos) do { \
203  temp = ((src[x + y*src_linesize + pos] << log2_scale) + (d[pos]>>1)) >> 5; \
204  if (temp & mask ) \
205  temp = ~(temp >> 31); \
206  dst[x + y*dst_linesize + pos] = temp; \
207 } while (0)
208 
209  for (y = 0; y < height; y++) {
210  const uint8_t *d = dither[y];
211  for (x = 0; x < width; x += 8) {
212  int temp;
213  STORE16(0);
214  STORE16(1);
215  STORE16(2);
216  STORE16(3);
217  STORE16(4);
218  STORE16(5);
219  STORE16(6);
220  STORE16(7);
221  }
222  }
223 }
224 
225 static inline void add_block(uint16_t *dst, int linesize, const int16_t block[64])
226 {
227  int y;
228 
229  for (y = 0; y < 8; y++) {
230  dst[0 + y*linesize] += block[0 + y*8];
231  dst[1 + y*linesize] += block[1 + y*8];
232  dst[2 + y*linesize] += block[2 + y*8];
233  dst[3 + y*linesize] += block[3 + y*8];
234  dst[4 + y*linesize] += block[4 + y*8];
235  dst[5 + y*linesize] += block[5 + y*8];
236  dst[6 + y*linesize] += block[6 + y*8];
237  dst[7 + y*linesize] += block[7 + y*8];
238  }
239 }
240 
241 static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
242  int dst_linesize, int src_linesize, int width, int height,
243  const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
244 {
245  int x, y, i;
246  const int count = 1 << p->log2_count;
247  const int linesize = is_luma ? p->temp_linesize : FFALIGN(width+16, 16);
248  DECLARE_ALIGNED(16, uint64_t, block_align)[32];
249  int16_t *block = (int16_t *)block_align;
250  int16_t *block2 = (int16_t *)(block_align + 16);
251  uint16_t *psrc16 = (uint16_t*)p->src;
252  const int sample_bytes = (depth+7) / 8;
253 
254  for (y = 0; y < height; y++) {
255  int index = 8 + 8*linesize + y*linesize;
256  memcpy(p->src + index*sample_bytes, src + y*src_linesize, width*sample_bytes);
257  if (sample_bytes == 1) {
258  for (x = 0; x < 8; x++) {
259  p->src[index - x - 1] = p->src[index + x ];
260  p->src[index + width + x ] = p->src[index + width - x - 1];
261  }
262  } else {
263  for (x = 0; x < 8; x++) {
264  psrc16[index - x - 1] = psrc16[index + x ];
265  psrc16[index + width + x ] = psrc16[index + width - x - 1];
266  }
267  }
268  }
269  for (y = 0; y < 8; y++) {
270  memcpy(p->src + ( 7-y)*linesize * sample_bytes, p->src + ( y+8)*linesize * sample_bytes, linesize * sample_bytes);
271  memcpy(p->src + (height+8+y)*linesize * sample_bytes, p->src + (height-y+7)*linesize * sample_bytes, linesize * sample_bytes);
272  }
273 
274  for (y = 0; y < height + 8; y += 8) {
275  memset(p->temp + (8 + y) * linesize, 0, 8 * linesize * sizeof(*p->temp));
276  for (x = 0; x < width + 8; x += 8) {
277  int qp;
278 
279  if (p->qp) {
280  qp = p->qp;
281  } else{
282  const int qps = 3 + is_luma;
283  qp = qp_table[(FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
284  qp = FFMAX(1, ff_norm_qscale(qp, p->qscale_type));
285  }
286  for (i = 0; i < count; i++) {
287  const int x1 = x + offset[i + count][0];
288  const int y1 = y + offset[i + count][1];
289  const int index = x1 + y1*linesize;
290  p->dct->get_pixels_unaligned(block, p->src + sample_bytes*index, sample_bytes*linesize);
291  p->dct->fdct(block);
292  p->requantize(block2, block, qp, p->dct->idct_permutation);
293  p->dct->idct(block2);
294  add_block(p->temp + index, linesize, block2);
295  }
296  }
297  if (y) {
298  if (sample_bytes == 1) {
299  p->store_slice(dst + (y - 8) * dst_linesize, p->temp + 8 + y*linesize,
300  dst_linesize, linesize, width,
301  FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
302  ldither);
303  } else {
304  store_slice16_c((uint16_t*)(dst + (y - 8) * dst_linesize), p->temp + 8 + y*linesize,
305  dst_linesize/2, linesize, width,
306  FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
307  ldither, depth);
308  }
309  }
310  }
311 }
312 
313 static const enum AVPixelFormat pix_fmts[] = {
328 };
329 
331 {
332  SPPContext *s = inlink->dst->priv;
333  const int h = FFALIGN(inlink->h + 16, 16);
335  const int bps = desc->comp[0].depth;
336 
337  s->store_slice = store_slice_c;
338  switch (s->mode) {
339  case MODE_HARD: s->requantize = hardthresh_c; break;
340  case MODE_SOFT: s->requantize = softthresh_c; break;
341  }
342 
343  av_opt_set_int(s->dct, "bits_per_sample", bps, 0);
344  avcodec_dct_init(s->dct);
345 
346 #if ARCH_X86
348 #endif
349 
350  s->hsub = desc->log2_chroma_w;
351  s->vsub = desc->log2_chroma_h;
352  s->temp_linesize = FFALIGN(inlink->w + 16, 16);
353  s->temp = av_malloc_array(s->temp_linesize, h * sizeof(*s->temp));
354  s->src = av_malloc_array(s->temp_linesize, h * sizeof(*s->src) * 2);
355 
356  if (!s->temp || !s->src)
357  return AVERROR(ENOMEM);
358  return 0;
359 }
360 
362 {
363  AVFilterContext *ctx = inlink->dst;
364  SPPContext *s = ctx->priv;
365  AVFilterLink *outlink = ctx->outputs[0];
366  AVFrame *out = in;
367  int qp_stride = 0;
368  int8_t *qp_table = NULL;
370  const int depth = desc->comp[0].depth;
371  int ret = 0;
372 
373  /* if we are not in a constant user quantizer mode and we don't want to use
374  * the quantizers from the B-frames (B-frames often have a higher QP), we
375  * need to save the qp table from the last non B-frame; this is what the
376  * following code block does */
377  if (!s->qp && (s->use_bframe_qp || in->pict_type != AV_PICTURE_TYPE_B)) {
378  ret = ff_qp_table_extract(in, &qp_table, &qp_stride, NULL, &s->qscale_type);
379  if (ret < 0) {
380  av_frame_free(&in);
381  return ret;
382  }
383 
384  if (!s->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
385  av_freep(&s->non_b_qp_table);
386  s->non_b_qp_table = qp_table;
387  s->non_b_qp_stride = qp_stride;
388  }
389  }
390 
391  if (s->log2_count && !ctx->is_disabled) {
392  if (!s->use_bframe_qp && s->non_b_qp_table) {
393  qp_table = s->non_b_qp_table;
394  qp_stride = s->non_b_qp_stride;
395  }
396 
397  if (qp_table || s->qp) {
398  const int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
399  const int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
400 
401  /* get a new frame if in-place is not possible or if the dimensions
402  * are not multiple of 8 */
403  if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
404  const int aligned_w = FFALIGN(inlink->w, 8);
405  const int aligned_h = FFALIGN(inlink->h, 8);
406 
407  out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
408  if (!out) {
409  av_frame_free(&in);
410  ret = AVERROR(ENOMEM);
411  goto finish;
412  }
414  out->width = in->width;
415  out->height = in->height;
416  }
417 
418  filter(s, out->data[0], in->data[0], out->linesize[0], in->linesize[0], inlink->w, inlink->h, qp_table, qp_stride, 1, depth);
419 
420  if (out->data[2]) {
421  filter(s, out->data[1], in->data[1], out->linesize[1], in->linesize[1], cw, ch, qp_table, qp_stride, 0, depth);
422  filter(s, out->data[2], in->data[2], out->linesize[2], in->linesize[2], cw, ch, qp_table, qp_stride, 0, depth);
423  }
424  emms_c();
425  }
426  }
427 
428  if (in != out) {
429  if (in->data[3])
430  av_image_copy_plane(out->data[3], out->linesize[3],
431  in ->data[3], in ->linesize[3],
432  inlink->w, inlink->h);
433  av_frame_free(&in);
434  }
435  ret = ff_filter_frame(outlink, out);
436 finish:
437  if (qp_table != s->non_b_qp_table)
438  av_freep(&qp_table);
439  return ret;
440 }
441 
442 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
443  char *res, int res_len, int flags)
444 {
445  SPPContext *s = ctx->priv;
446 
447  if (!strcmp(cmd, "level") || !strcmp(cmd, "quality")) {
448  if (!strcmp(args, "max"))
449  s->log2_count = MAX_LEVEL;
450  else
451  s->log2_count = av_clip(strtol(args, NULL, 10), 0, MAX_LEVEL);
452  return 0;
453  }
454  return AVERROR(ENOSYS);
455 }
456 
458 {
459  SPPContext *s = ctx->priv;
460 
461  s->dct = avcodec_dct_alloc();
462  if (!s->dct)
463  return AVERROR(ENOMEM);
464 
465  return 0;
466 }
467 
469 {
470  SPPContext *s = ctx->priv;
471 
472  av_freep(&s->temp);
473  av_freep(&s->src);
474  av_freep(&s->dct);
475  av_freep(&s->non_b_qp_table);
476 }
477 
478 static const AVFilterPad spp_inputs[] = {
479  {
480  .name = "default",
481  .type = AVMEDIA_TYPE_VIDEO,
482  .config_props = config_input,
483  .filter_frame = filter_frame,
484  },
485 };
486 
487 static const AVFilterPad spp_outputs[] = {
488  {
489  .name = "default",
490  .type = AVMEDIA_TYPE_VIDEO,
491  },
492 };
493 
495  .name = "spp",
496  .description = NULL_IF_CONFIG_SMALL("Apply a simple post processing filter."),
497  .priv_size = sizeof(SPPContext),
498  .preinit = preinit,
499  .uninit = uninit,
503  .process_command = process_command,
504  .priv_class = &spp_class,
506 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:101
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:95
qp_table.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
mem_internal.h
out
FILE * out
Definition: movenc.c:54
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_spp.c:361
SPPContext::qscale_type
enum AVVideoEncParamsType qscale_type
Definition: vf_spp.h:37
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:174
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
spp_options
static const AVOption spp_options[]
Definition: vf_spp.c:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
MODE_SOFT
@ MODE_SOFT
Definition: vf_spp.c:44
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
ldither
static const uint8_t ldither[8][8]
Definition: vf_spp.c:85
AVFrame::width
int width
Definition: frame.h:402
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
add_block
static void add_block(uint16_t *dst, int linesize, const int16_t block[64])
Definition: vf_spp.c:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
store_slice16_c
static void store_slice16_c(uint16_t *dst, const int16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8], int depth)
Definition: vf_spp.c:194
ff_norm_qscale
static int ff_norm_qscale(int qscale, enum AVVideoEncParamsType type)
Normalize the qscale factor FIXME Add support for other values of enum AVVideoEncParamsType besides A...
Definition: qp_table.h:39
preinit
static av_cold int preinit(AVFilterContext *ctx)
Definition: vf_spp.c:457
AVDCT::fdct
void(* fdct)(int16_t *block)
Definition: avdct.h:50
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_spp.c:442
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
SPPContext::requantize
void(* requantize)(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.h:52
finish
static void finish(void)
Definition: movenc.c:342
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
SPPContext::dct
AVDCT * dct
Definition: vf_spp.h:41
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:457
child_class_iterate
static const AVClass * child_class_iterate(void **iter)
Definition: vf_spp.c:48
AVDCT::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: avdct.h:48
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
SPPContext::qp
int qp
Definition: vf_spp.h:35
SPPContext::temp_linesize
int temp_linesize
Definition: vf_spp.h:38
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_spp.c:313
spp_class
static const AVClass spp_class
Definition: vf_spp.c:74
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: vf_spp.c:61
NB_MODES
@ NB_MODES
Definition: vf_spp.c:45
mask
static const uint16_t mask[17]
Definition: lzw.c:38
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_spp.c:468
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:456
ctx
AVFormatContext * ctx
Definition: movenc.c:48
offset
static const uint8_t offset[128][2]
Definition: vf_spp.c:96
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
vf_spp.h
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
bias
static int bias(int x, int c)
Definition: vqcdec.c:113
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
hardthresh_c
static void hardthresh_c(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.c:121
avcodec_dct_init
int avcodec_dct_init(AVDCT *dsp)
Definition: avdct.c:88
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AVDCT::idct
void(* idct)(int16_t *block)
Definition: avdct.h:32
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:474
SPPContext::store_slice
void(* store_slice)(uint8_t *dst, const int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale, const uint8_t dither[8][8])
Definition: vf_spp.h:47
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
SPPContext
Definition: vf_spp.h:31
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
avcodec_dct_alloc
AVDCT * avcodec_dct_alloc(void)
Allocates a AVDCT context.
Definition: avdct.c:75
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:427
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:87
AVClass::child_next
void *(* child_next)(void *obj, void *prev)
Return next AVOptions-enabled child or NULL.
Definition: log.h:131
bps
unsigned bps
Definition: movenc.c:1642
TFLAGS
#define TFLAGS
Definition: vf_spp.c:63
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
height
#define height
AVDCT::get_pixels_unaligned
void(* get_pixels_unaligned)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Definition: avdct.h:71
internal.h
ff_vf_spp
const AVFilter ff_vf_spp
Definition: vf_spp.c:494
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_spp.c:330
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
avcodec_dct_get_class
const AVClass * avcodec_dct_get_class(void)
Definition: avdct.c:70
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
SPPContext::temp
uint16_t * temp
Definition: vf_spp.h:40
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:458
AVFilter
Filter definition.
Definition: avfilter.h:161
FLAGS
#define FLAGS
Definition: vf_spp.c:62
ret
ret
Definition: filter_design.txt:187
SPPContext::src
uint8_t * src
Definition: vf_spp.h:39
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
ff_qp_table_extract
int ff_qp_table_extract(AVFrame *frame, int8_t **table, int *table_w, int *table_h, enum AVVideoEncParamsType *qscale_type)
Extract a libpostproc-compatible QP table - an 8-bit QP value per 16x16 macroblock,...
Definition: qp_table.c:27
spp_inputs
static const AVFilterPad spp_inputs[]
Definition: vf_spp.c:478
store_slice_c
static void store_slice_c(uint8_t *dst, const int16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8])
Definition: vf_spp.c:164
AVFrame::height
int height
Definition: frame.h:402
filter
static void filter(SPPContext *p, uint8_t *dst, uint8_t *src, int dst_linesize, int src_linesize, int width, int height, const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
Definition: vf_spp.c:241
spp_outputs
static const AVFilterPad spp_outputs[]
Definition: vf_spp.c:487
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
temp
else temp
Definition: vf_mcdeint.c:248
STORE
#define STORE(pos)
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
d
d
Definition: ffmpeg_filter.c:156
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:150
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
h
h
Definition: vp9dsp_template.c:2038
STORE16
#define STORE16(pos)
child_next
static void * child_next(void *obj, void *prev)
Definition: vf_spp.c:55
SPPContext::log2_count
int log2_count
Definition: vf_spp.h:34
MODE_HARD
@ MODE_HARD
Definition: vf_spp.c:43
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_spp_init_x86
av_cold void ff_spp_init_x86(SPPContext *s)
Definition: vf_spp.c:220
softthresh_c
static void softthresh_c(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.c:142
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:58