FFmpeg
vp8dsp_init.c
Go to the documentation of this file.
1 /*
2  * VP8 DSP functions x86-optimized
3  * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4  * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/mem_internal.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/vp8dsp.h"
29 
30 #if HAVE_X86ASM
31 
32 /*
33  * MC functions
34  */
35 void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
36  uint8_t *src, ptrdiff_t srcstride,
37  int height, int mx, int my);
38 void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
39  uint8_t *src, ptrdiff_t srcstride,
40  int height, int mx, int my);
41 void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
42  uint8_t *src, ptrdiff_t srcstride,
43  int height, int mx, int my);
44 void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
45  uint8_t *src, ptrdiff_t srcstride,
46  int height, int mx, int my);
47 
48 void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
49  uint8_t *src, ptrdiff_t srcstride,
50  int height, int mx, int my);
51 void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
52  uint8_t *src, ptrdiff_t srcstride,
53  int height, int mx, int my);
54 void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
55  uint8_t *src, ptrdiff_t srcstride,
56  int height, int mx, int my);
57 void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
58  uint8_t *src, ptrdiff_t srcstride,
59  int height, int mx, int my);
60 
61 void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
62  uint8_t *src, ptrdiff_t srcstride,
63  int height, int mx, int my);
64 void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
65  uint8_t *src, ptrdiff_t srcstride,
66  int height, int mx, int my);
67 void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
68  uint8_t *src, ptrdiff_t srcstride,
69  int height, int mx, int my);
70 void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
71  uint8_t *src, ptrdiff_t srcstride,
72  int height, int mx, int my);
73 void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
74  uint8_t *src, ptrdiff_t srcstride,
75  int height, int mx, int my);
76 void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
77  uint8_t *src, ptrdiff_t srcstride,
78  int height, int mx, int my);
79 void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
80  uint8_t *src, ptrdiff_t srcstride,
81  int height, int mx, int my);
82 void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
83  uint8_t *src, ptrdiff_t srcstride,
84  int height, int mx, int my);
85 
86 void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
87  uint8_t *src, ptrdiff_t srcstride,
88  int height, int mx, int my);
89 void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
90  uint8_t *src, ptrdiff_t srcstride,
91  int height, int mx, int my);
92 void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
93  uint8_t *src, ptrdiff_t srcstride,
94  int height, int mx, int my);
95 void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
96  uint8_t *src, ptrdiff_t srcstride,
97  int height, int mx, int my);
98 
99 void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
100  uint8_t *src, ptrdiff_t srcstride,
101  int height, int mx, int my);
102 void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
103  uint8_t *src, ptrdiff_t srcstride,
104  int height, int mx, int my);
105 void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
106  uint8_t *src, ptrdiff_t srcstride,
107  int height, int mx, int my);
108 void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
109  uint8_t *src, ptrdiff_t srcstride,
110  int height, int mx, int my);
111 
112 
113 void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
114  uint8_t *src, ptrdiff_t srcstride,
115  int height, int mx, int my);
116 void ff_put_vp8_pixels16_mmx(uint8_t *dst, ptrdiff_t dststride,
117  uint8_t *src, ptrdiff_t srcstride,
118  int height, int mx, int my);
119 void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
120  uint8_t *src, ptrdiff_t srcstride,
121  int height, int mx, int my);
122 
123 #define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
124 static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
125  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
126  ptrdiff_t srcstride, int height, int mx, int my) \
127 { \
128  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
129  dst, dststride, src, srcstride, height, mx, my); \
130  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
131  dst + 8, dststride, src + 8, srcstride, height, mx, my); \
132 }
133 #define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
134 static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
135  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
136  ptrdiff_t srcstride, int height, int mx, int my) \
137 { \
138  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
139  dst, dststride, src, srcstride, height, mx, my); \
140  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
141  dst + 4, dststride, src + 4, srcstride, height, mx, my); \
142 }
143 
144 #if ARCH_X86_32
145 TAP_W8 (mmxext, epel, h4)
146 TAP_W8 (mmxext, epel, h6)
147 TAP_W16(mmxext, epel, h6)
148 TAP_W8 (mmxext, epel, v4)
149 TAP_W8 (mmxext, epel, v6)
150 TAP_W16(mmxext, epel, v6)
151 TAP_W8 (mmxext, bilinear, h)
152 TAP_W16(mmxext, bilinear, h)
153 TAP_W8 (mmxext, bilinear, v)
154 TAP_W16(mmxext, bilinear, v)
155 #endif
156 
157 TAP_W16(sse2, epel, h6)
158 TAP_W16(sse2, epel, v6)
159 TAP_W16(sse2, bilinear, h)
160 TAP_W16(sse2, bilinear, v)
161 
162 TAP_W16(ssse3, epel, h6)
163 TAP_W16(ssse3, epel, v6)
164 TAP_W16(ssse3, bilinear, h)
165 TAP_W16(ssse3, bilinear, v)
166 
167 #define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
168 static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
169  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
170  ptrdiff_t srcstride, int height, int mx, int my) \
171 { \
172  LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + TAPNUMY - 1)]); \
173  uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
174  src -= srcstride * (TAPNUMY / 2 - 1); \
175  ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## _ ## OPT( \
176  tmp, SIZE, src, srcstride, height + TAPNUMY - 1, mx, my); \
177  ff_put_vp8_epel ## SIZE ## _v ## TAPNUMY ## _ ## OPT( \
178  dst, dststride, tmpptr, SIZE, height, mx, my); \
179 }
180 
181 #if ARCH_X86_32
182 #define HVTAPMMX(x, y) \
183 HVTAP(mmxext, 8, x, y, 4, 8) \
184 HVTAP(mmxext, 8, x, y, 8, 16)
185 
186 HVTAP(mmxext, 8, 6, 6, 16, 16)
187 #else
188 #define HVTAPMMX(x, y) \
189 HVTAP(mmxext, 8, x, y, 4, 8)
190 #endif
191 
192 HVTAPMMX(4, 4)
193 HVTAPMMX(4, 6)
194 HVTAPMMX(6, 4)
195 HVTAPMMX(6, 6)
196 
197 #define HVTAPSSE2(x, y, w) \
198 HVTAP(sse2, 16, x, y, w, 16) \
199 HVTAP(ssse3, 16, x, y, w, 16)
200 
201 HVTAPSSE2(4, 4, 8)
202 HVTAPSSE2(4, 6, 8)
203 HVTAPSSE2(6, 4, 8)
204 HVTAPSSE2(6, 6, 8)
205 HVTAPSSE2(6, 6, 16)
206 
207 HVTAP(ssse3, 16, 4, 4, 4, 8)
208 HVTAP(ssse3, 16, 4, 6, 4, 8)
209 HVTAP(ssse3, 16, 6, 4, 4, 8)
210 HVTAP(ssse3, 16, 6, 6, 4, 8)
211 
212 #define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
213 static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
214  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
215  ptrdiff_t srcstride, int height, int mx, int my) \
216 { \
217  LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + 2)]); \
218  ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
219  tmp, SIZE, src, srcstride, height + 1, mx, my); \
220  ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT( \
221  dst, dststride, tmp, SIZE, height, mx, my); \
222 }
223 
224 HVBILIN(mmxext, 8, 4, 8)
225 #if ARCH_X86_32
226 HVBILIN(mmxext, 8, 8, 16)
227 HVBILIN(mmxext, 8, 16, 16)
228 #endif
229 HVBILIN(sse2, 8, 8, 16)
230 HVBILIN(sse2, 8, 16, 16)
231 HVBILIN(ssse3, 8, 4, 8)
232 HVBILIN(ssse3, 8, 8, 16)
233 HVBILIN(ssse3, 8, 16, 16)
234 
235 void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16],
236  ptrdiff_t stride);
237 void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16],
238  ptrdiff_t stride);
239 void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
240  ptrdiff_t stride);
241 void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16],
242  ptrdiff_t stride);
243 void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
244  ptrdiff_t stride);
245 void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
246  ptrdiff_t stride);
247 void ff_vp8_luma_dc_wht_mmx(int16_t block[4][4][16], int16_t dc[16]);
248 void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
249 void ff_vp8_idct_add_mmx(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
250 void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
251 
252 #define DECLARE_LOOP_FILTER(NAME) \
253 void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
254  ptrdiff_t stride, \
255  int flim); \
256 void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
257  ptrdiff_t stride, \
258  int flim); \
259 void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
260  ptrdiff_t stride, \
261  int e, int i, int hvt); \
262 void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
263  ptrdiff_t stride, \
264  int e, int i, int hvt); \
265 void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
266  uint8_t *dstV, \
267  ptrdiff_t s, \
268  int e, int i, int hvt); \
269 void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
270  uint8_t *dstV, \
271  ptrdiff_t s, \
272  int e, int i, int hvt); \
273 void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
274  ptrdiff_t stride, \
275  int e, int i, int hvt); \
276 void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
277  ptrdiff_t stride, \
278  int e, int i, int hvt); \
279 void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
280  uint8_t *dstV, \
281  ptrdiff_t s, \
282  int e, int i, int hvt); \
283 void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
284  uint8_t *dstV, \
285  ptrdiff_t s, \
286  int e, int i, int hvt);
287 
288 DECLARE_LOOP_FILTER(mmx)
289 DECLARE_LOOP_FILTER(mmxext)
290 DECLARE_LOOP_FILTER(sse2)
291 DECLARE_LOOP_FILTER(ssse3)
292 DECLARE_LOOP_FILTER(sse4)
293 
294 #endif /* HAVE_X86ASM */
295 
296 #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
297  c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
298  c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
299  c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
300 
301 #define VP8_MC_FUNC(IDX, SIZE, OPT) \
302  c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
303  c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
304  c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
305  c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
306  c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
307  VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
308 
309 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
310  c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
311  c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
312  c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
313  c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
314  c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
315  c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
316  c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
317  c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
318 
319 
321 {
322 #if HAVE_X86ASM
323  int cpu_flags = av_get_cpu_flags();
324 
325  if (EXTERNAL_MMX(cpu_flags)) {
326 #if ARCH_X86_32
327  c->put_vp8_epel_pixels_tab[0][0][0] =
328  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
329 #endif
330  c->put_vp8_epel_pixels_tab[1][0][0] =
331  c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
332  }
333 
334  /* note that 4-tap width=16 functions are missing because w=16
335  * is only used for luma, and luma is always a copy or sixtap. */
336  if (EXTERNAL_MMXEXT(cpu_flags)) {
337  VP8_MC_FUNC(2, 4, mmxext);
338  VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
339 #if ARCH_X86_32
340  VP8_LUMA_MC_FUNC(0, 16, mmxext);
341  VP8_MC_FUNC(1, 8, mmxext);
342  VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
343  VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
344 #endif
345  }
346 
347  if (EXTERNAL_SSE(cpu_flags)) {
348  c->put_vp8_epel_pixels_tab[0][0][0] =
349  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
350  }
351 
353  VP8_LUMA_MC_FUNC(0, 16, sse2);
354  VP8_MC_FUNC(1, 8, sse2);
355  VP8_BILINEAR_MC_FUNC(0, 16, sse2);
356  VP8_BILINEAR_MC_FUNC(1, 8, sse2);
357  }
358 
359  if (EXTERNAL_SSSE3(cpu_flags)) {
360  VP8_LUMA_MC_FUNC(0, 16, ssse3);
361  VP8_MC_FUNC(1, 8, ssse3);
362  VP8_MC_FUNC(2, 4, ssse3);
363  VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
364  VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
365  VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
366  }
367 #endif /* HAVE_X86ASM */
368 }
369 
371 {
372 #if HAVE_X86ASM
373  int cpu_flags = av_get_cpu_flags();
374 
375  if (EXTERNAL_MMX(cpu_flags)) {
376  c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
377 #if ARCH_X86_32
378  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
379  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
380  c->vp8_idct_add = ff_vp8_idct_add_mmx;
381  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
382 
383  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
384  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
385 
386  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
387  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
388  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
389  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
390 
391  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
392  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
393  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
394  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
395 #endif
396  }
397 
398  /* note that 4-tap width=16 functions are missing because w=16
399  * is only used for luma, and luma is always a copy or sixtap. */
400  if (EXTERNAL_MMXEXT(cpu_flags)) {
401 #if ARCH_X86_32
402  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
403  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
404 
405  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
406  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
407  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
408  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
409 
410  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
411  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
412  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
413  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
414 #endif
415  }
416 
417  if (EXTERNAL_SSE(cpu_flags)) {
418  c->vp8_idct_add = ff_vp8_idct_add_sse;
419  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
420  }
421 
423  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
424 
425  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
426  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
427 
428  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
429  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
430  }
431 
432  if (EXTERNAL_SSE2(cpu_flags)) {
433  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse2;
434  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
435 
436  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
437 
438  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
439  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
440 
441  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
442  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
443  }
444 
445  if (EXTERNAL_SSSE3(cpu_flags)) {
446  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
447  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
448 
449  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
450  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
451  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
452  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
453 
454  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
455  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
456  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
457  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
458  }
459 
460  if (EXTERNAL_SSE4(cpu_flags)) {
461  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
462 
463  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
464  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
465  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
466  }
467 #endif /* HAVE_X86ASM */
468 }
stride
int stride
Definition: mace.c:144
cpu.h
mem_internal.h
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:95
VP8_BILINEAR_MC_FUNC
#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:309
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:50
ff_vp8dsp_init_x86
av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:370
av_cold
#define av_cold
Definition: attributes.h:90
vp8dsp.h
EXTERNAL_SSE
#define EXTERNAL_SSE(flags)
Definition: cpu.h:58
src
#define src
Definition: vp8dsp.c:255
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VP8DSPContext
Definition: vp8dsp.h:37
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
cpu.h
height
#define height
attributes.h
EXTERNAL_SSE2
#define EXTERNAL_SSE2(flags)
Definition: cpu.h:59
VP8_MC_FUNC
#define VP8_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:301
EXTERNAL_SSE4
#define EXTERNAL_SSE4(flags)
Definition: cpu.h:68
mem.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
h
h
Definition: vp9dsp_template.c:2038
EXTERNAL_SSSE3
#define EXTERNAL_SSSE3(flags)
Definition: cpu.h:65
EXTERNAL_MMX
#define EXTERNAL_MMX(flags)
Definition: cpu.h:56
EXTERNAL_SSE2_SLOW
#define EXTERNAL_SSE2_SLOW(flags)
Definition: cpu.h:61
EXTERNAL_MMXEXT
#define EXTERNAL_MMXEXT(flags)
Definition: cpu.h:57
ff_vp78dsp_init_x86
av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:320
VP8_LUMA_MC_FUNC
#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:296