FFmpeg
vf_spp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2013 Clément Bœsch <u pkh me>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Simple post processing filter
25  *
26  * This implementation is based on an algorithm described in
27  * "Aria Nosratinia Embedded Post-Processing for
28  * Enhancement of Compressed Images (1999)"
29  *
30  * Originally written by Michael Niedermayer for the MPlayer project, and
31  * ported by Clément Bœsch for FFmpeg.
32  */
33 
34 #include "libavutil/emms.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/mem_internal.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/pixdesc.h"
40 #include "internal.h"
41 #include "qp_table.h"
42 #include "vf_spp.h"
43 #include "video.h"
44 
45 enum mode {
49 };
50 
51 static const AVClass *child_class_iterate(void **iter)
52 {
53  const AVClass *c = *iter ? NULL : avcodec_dct_get_class();
54  *iter = (void*)(uintptr_t)c;
55  return c;
56 }
57 
58 static void *child_next(void *obj, void *prev)
59 {
60  SPPContext *s = obj;
61  return prev ? NULL : s->dct;
62 }
63 
64 #define OFFSET(x) offsetof(SPPContext, x)
65 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
66 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
67 static const AVOption spp_options[] = {
68  { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, TFLAGS },
69  { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
70  { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, .unit = "mode" },
71  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, .unit = "mode" },
72  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, .unit = "mode" },
73  { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
74  { NULL }
75 };
76 
77 static const AVClass spp_class = {
78  .class_name = "spp",
79  .item_name = av_default_item_name,
80  .option = spp_options,
81  .version = LIBAVUTIL_VERSION_INT,
82  .category = AV_CLASS_CATEGORY_FILTER,
83  .child_class_iterate = child_class_iterate,
85 };
86 
87 // XXX: share between filters?
88 DECLARE_ALIGNED(8, static const uint8_t, ldither)[8][8] = {
89  { 0, 48, 12, 60, 3, 51, 15, 63 },
90  { 32, 16, 44, 28, 35, 19, 47, 31 },
91  { 8, 56, 4, 52, 11, 59, 7, 55 },
92  { 40, 24, 36, 20, 43, 27, 39, 23 },
93  { 2, 50, 14, 62, 1, 49, 13, 61 },
94  { 34, 18, 46, 30, 33, 17, 45, 29 },
95  { 10, 58, 6, 54, 9, 57, 5, 53 },
96  { 42, 26, 38, 22, 41, 25, 37, 21 },
97 };
98 
99 static const uint8_t offset[128][2] = {
100  {0,0}, // unused
101  {0,0},
102  {0,0}, {4,4}, // quality = 1
103  {0,0}, {2,2}, {6,4}, {4,6}, // quality = 2
104  {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7}, // quality = 3
105 
106  {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3}, // quality = 4
107  {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
108 
109  {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7}, // quality = 5
110  {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
111  {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
112  {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
113 
114  {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2}, // quality = 6
115  {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
116  {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
117  {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
118  {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
119  {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
120  {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
121  {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
122 };
123 
124 static void hardthresh_c(int16_t dst[64], const int16_t src[64],
125  int qp, const uint8_t *permutation)
126 {
127  int i;
128  int bias = 0; // FIXME
129 
130  unsigned threshold1 = qp * ((1<<4) - bias) - 1;
131  unsigned threshold2 = threshold1 << 1;
132 
133  memset(dst, 0, 64 * sizeof(dst[0]));
134  dst[0] = (src[0] + 4) >> 3;
135 
136  for (i = 1; i < 64; i++) {
137  int level = src[i];
138  if (((unsigned)(level + threshold1)) > threshold2) {
139  const int j = permutation[i];
140  dst[j] = (level + 4) >> 3;
141  }
142  }
143 }
144 
145 static void softthresh_c(int16_t dst[64], const int16_t src[64],
146  int qp, const uint8_t *permutation)
147 {
148  int i;
149  int bias = 0; //FIXME
150 
151  unsigned threshold1 = qp * ((1<<4) - bias) - 1;
152  unsigned threshold2 = threshold1 << 1;
153 
154  memset(dst, 0, 64 * sizeof(dst[0]));
155  dst[0] = (src[0] + 4) >> 3;
156 
157  for (i = 1; i < 64; i++) {
158  int level = src[i];
159  if (((unsigned)(level + threshold1)) > threshold2) {
160  const int j = permutation[i];
161  if (level > 0) dst[j] = (level - threshold1 + 4) >> 3;
162  else dst[j] = (level + threshold1 + 4) >> 3;
163  }
164  }
165 }
166 
167 static void store_slice_c(uint8_t *dst, const int16_t *src,
168  int dst_linesize, int src_linesize,
169  int width, int height, int log2_scale,
170  const uint8_t dither[8][8])
171 {
172  int y, x;
173 
174 #define STORE(pos) do { \
175  temp = (src[x + y*src_linesize + pos] * (1 << log2_scale) + d[pos]) >> 6;\
176  if (temp & 0x100) \
177  temp = ~(temp >> 31); \
178  dst[x + y*dst_linesize + pos] = temp; \
179 } while (0)
180 
181  for (y = 0; y < height; y++) {
182  const uint8_t *d = dither[y];
183  for (x = 0; x < width; x += 8) {
184  int temp;
185  STORE(0);
186  STORE(1);
187  STORE(2);
188  STORE(3);
189  STORE(4);
190  STORE(5);
191  STORE(6);
192  STORE(7);
193  }
194  }
195 }
196 
197 static void store_slice16_c(uint16_t *dst, const int16_t *src,
198  int dst_linesize, int src_linesize,
199  int width, int height, int log2_scale,
200  const uint8_t dither[8][8], int depth)
201 {
202  int y, x;
203  unsigned int mask = -1<<depth;
204 
205 #define STORE16(pos) do { \
206  temp = (src[x + y*src_linesize + pos] * (1 << log2_scale) + (d[pos]>>1)) >> 5; \
207  if (temp & mask ) \
208  temp = ~(temp >> 31); \
209  dst[x + y*dst_linesize + pos] = temp; \
210 } while (0)
211 
212  for (y = 0; y < height; y++) {
213  const uint8_t *d = dither[y];
214  for (x = 0; x < width; x += 8) {
215  int temp;
216  STORE16(0);
217  STORE16(1);
218  STORE16(2);
219  STORE16(3);
220  STORE16(4);
221  STORE16(5);
222  STORE16(6);
223  STORE16(7);
224  }
225  }
226 }
227 
228 static inline void add_block(uint16_t *dst, int linesize, const int16_t block[64])
229 {
230  int y;
231 
232  for (y = 0; y < 8; y++) {
233  dst[0 + y*linesize] += block[0 + y*8];
234  dst[1 + y*linesize] += block[1 + y*8];
235  dst[2 + y*linesize] += block[2 + y*8];
236  dst[3 + y*linesize] += block[3 + y*8];
237  dst[4 + y*linesize] += block[4 + y*8];
238  dst[5 + y*linesize] += block[5 + y*8];
239  dst[6 + y*linesize] += block[6 + y*8];
240  dst[7 + y*linesize] += block[7 + y*8];
241  }
242 }
243 
244 static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
245  int dst_linesize, int src_linesize, int width, int height,
246  const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
247 {
248  int x, y, i;
249  const int count = 1 << p->log2_count;
250  const int linesize = is_luma ? p->temp_linesize : FFALIGN(width+16, 16);
251  DECLARE_ALIGNED(16, uint64_t, block_align)[32];
252  int16_t *block = (int16_t *)block_align;
253  int16_t *block2 = (int16_t *)(block_align + 16);
254  uint16_t *psrc16 = (uint16_t*)p->src;
255  const int sample_bytes = (depth+7) / 8;
256 
257  for (y = 0; y < height; y++) {
258  int index = 8 + 8*linesize + y*linesize;
259  memcpy(p->src + index*sample_bytes, src + y*src_linesize, width*sample_bytes);
260  if (sample_bytes == 1) {
261  for (x = 0; x < 8; x++) {
262  p->src[index - x - 1] = p->src[index + x ];
263  p->src[index + width + x ] = p->src[index + width - x - 1];
264  }
265  } else {
266  for (x = 0; x < 8; x++) {
267  psrc16[index - x - 1] = psrc16[index + x ];
268  psrc16[index + width + x ] = psrc16[index + width - x - 1];
269  }
270  }
271  }
272  for (y = 0; y < 8; y++) {
273  memcpy(p->src + ( 7-y)*linesize * sample_bytes, p->src + ( y+8)*linesize * sample_bytes, linesize * sample_bytes);
274  memcpy(p->src + (height+8+y)*linesize * sample_bytes, p->src + (height-y+7)*linesize * sample_bytes, linesize * sample_bytes);
275  }
276 
277  for (y = 0; y < height + 8; y += 8) {
278  memset(p->temp + (8 + y) * linesize, 0, 8 * linesize * sizeof(*p->temp));
279  for (x = 0; x < width + 8; x += 8) {
280  int qp;
281 
282  if (p->qp) {
283  qp = p->qp;
284  } else{
285  const int qps = 3 + is_luma;
286  qp = qp_table[(FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
287  qp = FFMAX(1, ff_norm_qscale(qp, p->qscale_type));
288  }
289  for (i = 0; i < count; i++) {
290  const int x1 = x + offset[i + count][0];
291  const int y1 = y + offset[i + count][1];
292  const int index = x1 + y1*linesize;
293  p->dct->get_pixels_unaligned(block, p->src + sample_bytes*index, sample_bytes*linesize);
294  p->dct->fdct(block);
295  p->requantize(block2, block, qp, p->dct->idct_permutation);
296  p->dct->idct(block2);
297  add_block(p->temp + index, linesize, block2);
298  }
299  }
300  if (y) {
301  if (sample_bytes == 1) {
302  p->store_slice(dst + (y - 8) * dst_linesize, p->temp + 8 + y*linesize,
303  dst_linesize, linesize, width,
304  FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
305  ldither);
306  } else {
307  store_slice16_c((uint16_t*)(dst + (y - 8) * dst_linesize), p->temp + 8 + y*linesize,
308  dst_linesize/2, linesize, width,
309  FFMIN(8, height + 8 - y), MAX_LEVEL - p->log2_count,
310  ldither, depth);
311  }
312  }
313  }
314 }
315 
316 static const enum AVPixelFormat pix_fmts[] = {
331 };
332 
334 {
335  SPPContext *s = inlink->dst->priv;
336  const int h = FFALIGN(inlink->h + 16, 16);
338  const int bps = desc->comp[0].depth;
339 
340  s->store_slice = store_slice_c;
341  switch (s->mode) {
342  case MODE_HARD: s->requantize = hardthresh_c; break;
343  case MODE_SOFT: s->requantize = softthresh_c; break;
344  }
345 
346  av_opt_set_int(s->dct, "bits_per_sample", bps, 0);
347  avcodec_dct_init(s->dct);
348 
349 #if ARCH_X86
351 #endif
352 
353  s->hsub = desc->log2_chroma_w;
354  s->vsub = desc->log2_chroma_h;
355  s->temp_linesize = FFALIGN(inlink->w + 16, 16);
356  s->temp = av_malloc_array(s->temp_linesize, h * sizeof(*s->temp));
357  s->src = av_malloc_array(s->temp_linesize, h * sizeof(*s->src) * 2);
358 
359  if (!s->temp || !s->src)
360  return AVERROR(ENOMEM);
361  return 0;
362 }
363 
365 {
366  AVFilterContext *ctx = inlink->dst;
367  SPPContext *s = ctx->priv;
368  AVFilterLink *outlink = ctx->outputs[0];
369  AVFrame *out = in;
370  int qp_stride = 0;
371  int8_t *qp_table = NULL;
373  const int depth = desc->comp[0].depth;
374  int ret = 0;
375 
376  /* if we are not in a constant user quantizer mode and we don't want to use
377  * the quantizers from the B-frames (B-frames often have a higher QP), we
378  * need to save the qp table from the last non B-frame; this is what the
379  * following code block does */
380  if (!s->qp && (s->use_bframe_qp || in->pict_type != AV_PICTURE_TYPE_B)) {
381  ret = ff_qp_table_extract(in, &qp_table, &qp_stride, NULL, &s->qscale_type);
382  if (ret < 0) {
383  av_frame_free(&in);
384  return ret;
385  }
386 
387  if (!s->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
388  av_freep(&s->non_b_qp_table);
389  s->non_b_qp_table = qp_table;
390  s->non_b_qp_stride = qp_stride;
391  }
392  }
393 
394  if (s->log2_count && !ctx->is_disabled) {
395  if (!s->use_bframe_qp && s->non_b_qp_table) {
396  qp_table = s->non_b_qp_table;
397  qp_stride = s->non_b_qp_stride;
398  }
399 
400  if (qp_table || s->qp) {
401  const int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
402  const int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
403 
404  /* get a new frame if in-place is not possible or if the dimensions
405  * are not multiple of 8 */
406  if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
407  const int aligned_w = FFALIGN(inlink->w, 8);
408  const int aligned_h = FFALIGN(inlink->h, 8);
409 
410  out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
411  if (!out) {
412  av_frame_free(&in);
413  ret = AVERROR(ENOMEM);
414  goto finish;
415  }
417  out->width = in->width;
418  out->height = in->height;
419  }
420 
421  filter(s, out->data[0], in->data[0], out->linesize[0], in->linesize[0], inlink->w, inlink->h, qp_table, qp_stride, 1, depth);
422 
423  if (out->data[2]) {
424  filter(s, out->data[1], in->data[1], out->linesize[1], in->linesize[1], cw, ch, qp_table, qp_stride, 0, depth);
425  filter(s, out->data[2], in->data[2], out->linesize[2], in->linesize[2], cw, ch, qp_table, qp_stride, 0, depth);
426  }
427  emms_c();
428  }
429  }
430 
431  if (in != out) {
432  if (in->data[3])
433  av_image_copy_plane(out->data[3], out->linesize[3],
434  in ->data[3], in ->linesize[3],
435  inlink->w, inlink->h);
436  av_frame_free(&in);
437  }
438  ret = ff_filter_frame(outlink, out);
439 finish:
440  if (qp_table != s->non_b_qp_table)
441  av_freep(&qp_table);
442  return ret;
443 }
444 
445 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
446  char *res, int res_len, int flags)
447 {
448  SPPContext *s = ctx->priv;
449 
450  if (!strcmp(cmd, "level") || !strcmp(cmd, "quality")) {
451  if (!strcmp(args, "max"))
452  s->log2_count = MAX_LEVEL;
453  else
454  s->log2_count = av_clip(strtol(args, NULL, 10), 0, MAX_LEVEL);
455  return 0;
456  }
457  return AVERROR(ENOSYS);
458 }
459 
461 {
462  SPPContext *s = ctx->priv;
463 
464  s->dct = avcodec_dct_alloc();
465  if (!s->dct)
466  return AVERROR(ENOMEM);
467 
468  return 0;
469 }
470 
472 {
473  SPPContext *s = ctx->priv;
474 
475  av_freep(&s->temp);
476  av_freep(&s->src);
477  av_freep(&s->dct);
478  av_freep(&s->non_b_qp_table);
479 }
480 
481 static const AVFilterPad spp_inputs[] = {
482  {
483  .name = "default",
484  .type = AVMEDIA_TYPE_VIDEO,
485  .config_props = config_input,
486  .filter_frame = filter_frame,
487  },
488 };
489 
491  .name = "spp",
492  .description = NULL_IF_CONFIG_SMALL("Apply a simple post processing filter."),
493  .priv_size = sizeof(SPPContext),
494  .preinit = preinit,
495  .uninit = uninit,
499  .process_command = process_command,
500  .priv_class = &spp_class,
502 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
level
uint8_t level
Definition: svq3.c:205
av_clip
#define av_clip
Definition: common.h:99
qp_table.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
mem_internal.h
out
FILE * out
Definition: movenc.c:55
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_spp.c:364
SPPContext::qscale_type
enum AVVideoEncParamsType qscale_type
Definition: vf_spp.h:37
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
spp_options
static const AVOption spp_options[]
Definition: vf_spp.c:67
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
MODE_SOFT
@ MODE_SOFT
Definition: vf_spp.c:47
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
pixdesc.h
ldither
static const uint8_t ldither[8][8]
Definition: vf_spp.c:88
AVFrame::width
int width
Definition: frame.h:446
AVOption
AVOption.
Definition: opt.h:346
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
add_block
static void add_block(uint16_t *dst, int linesize, const int16_t block[64])
Definition: vf_spp.c:228
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
store_slice16_c
static void store_slice16_c(uint16_t *dst, const int16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8], int depth)
Definition: vf_spp.c:197
ff_norm_qscale
static int ff_norm_qscale(int qscale, enum AVVideoEncParamsType type)
Normalize the qscale factor FIXME Add support for other values of enum AVVideoEncParamsType besides A...
Definition: qp_table.h:39
preinit
static av_cold int preinit(AVFilterContext *ctx)
Definition: vf_spp.c:460
video.h
AVDCT::fdct
void(* fdct)(int16_t *block)
Definition: avdct.h:50
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_spp.c:445
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
SPPContext::requantize
void(* requantize)(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.h:52
finish
static void finish(void)
Definition: movenc.c:373
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
SPPContext::dct
AVDCT * dct
Definition: vf_spp.h:41
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
child_class_iterate
static const AVClass * child_class_iterate(void **iter)
Definition: vf_spp.c:51
AVDCT::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: avdct.h:48
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
SPPContext::qp
int qp
Definition: vf_spp.h:35
SPPContext::temp_linesize
int temp_linesize
Definition: vf_spp.h:38
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_spp.c:316
spp_class
static const AVClass spp_class
Definition: vf_spp.c:77
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: vf_spp.c:64
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
NB_MODES
@ NB_MODES
Definition: vf_spp.c:48
mask
static const uint16_t mask[17]
Definition: lzw.c:38
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_spp.c:471
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
ctx
AVFormatContext * ctx
Definition: movenc.c:49
offset
static const uint8_t offset[128][2]
Definition: vf_spp.c:99
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
vf_spp.h
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
hardthresh_c
static void hardthresh_c(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.c:124
avcodec_dct_init
int avcodec_dct_init(AVDCT *dsp)
Definition: avdct.c:89
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
AVDCT::idct
void(* idct)(int16_t *block)
Definition: avdct.h:32
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
SPPContext::store_slice
void(* store_slice)(uint8_t *dst, const int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale, const uint8_t dither[8][8])
Definition: vf_spp.h:47
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:791
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
SPPContext
Definition: vf_spp.h:31
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
avcodec_dct_alloc
AVDCT * avcodec_dct_alloc(void)
Allocates a AVDCT context.
Definition: avdct.c:76
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
AVClass::child_next
void *(* child_next)(void *obj, void *prev)
Return next AVOptions-enabled child or NULL.
Definition: log.h:131
bps
unsigned bps
Definition: movenc.c:1788
TFLAGS
#define TFLAGS
Definition: vf_spp.c:66
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:645
height
#define height
AVDCT::get_pixels_unaligned
void(* get_pixels_unaligned)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Definition: avdct.h:71
internal.h
ff_vf_spp
const AVFilter ff_vf_spp
Definition: vf_spp.c:490
emms.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_spp.c:333
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
avcodec_dct_get_class
const AVClass * avcodec_dct_get_class(void)
Definition: avdct.c:71
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
SPPContext::temp
uint16_t * temp
Definition: vf_spp.h:40
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
AVFilter
Filter definition.
Definition: avfilter.h:166
FLAGS
#define FLAGS
Definition: vf_spp.c:65
ret
ret
Definition: filter_design.txt:187
SPPContext::src
uint8_t * src
Definition: vf_spp.h:39
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
ff_qp_table_extract
int ff_qp_table_extract(AVFrame *frame, int8_t **table, int *table_w, int *table_h, enum AVVideoEncParamsType *qscale_type)
Extract a libpostproc-compatible QP table - an 8-bit QP value per 16x16 macroblock,...
Definition: qp_table.c:27
spp_inputs
static const AVFilterPad spp_inputs[]
Definition: vf_spp.c:481
store_slice_c
static void store_slice_c(uint8_t *dst, const int16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8])
Definition: vf_spp.c:167
AVFrame::height
int height
Definition: frame.h:446
filter
static void filter(SPPContext *p, uint8_t *dst, uint8_t *src, int dst_linesize, int src_linesize, int width, int height, const uint8_t *qp_table, int qp_stride, int is_luma, int depth)
Definition: vf_spp.c:244
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
temp
else temp
Definition: vf_mcdeint.c:263
STORE
#define STORE(pos)
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
d
d
Definition: ffmpeg_filter.c:424
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
h
h
Definition: vp9dsp_template.c:2038
STORE16
#define STORE16(pos)
child_next
static void * child_next(void *obj, void *prev)
Definition: vf_spp.c:58
SPPContext::log2_count
int log2_count
Definition: vf_spp.h:34
MODE_HARD
@ MODE_HARD
Definition: vf_spp.c:46
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_spp_init_x86
av_cold void ff_spp_init_x86(SPPContext *s)
Definition: vf_spp.c:220
softthresh_c
static void softthresh_c(int16_t dst[64], const int16_t src[64], int qp, const uint8_t *permutation)
Definition: vf_spp.c:145
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:61