FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp8dsp_altivec.c
Go to the documentation of this file.
1 /*
2  * VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/ppc/cpu.h"
29 #include "libavcodec/vp8dsp.h"
30 #include "hpeldsp_altivec.h"
31 
32 #if HAVE_ALTIVEC
33 #define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
34 
35 // h subpel filter uses msum to multiply+add 4 pixel taps at once
36 static const vec_s8 h_subpel_filters_inner[7] =
37 {
38  REPT4( -6, 123, 12, -1),
39  REPT4(-11, 108, 36, -8),
40  REPT4( -9, 93, 50, -6),
41  REPT4(-16, 77, 77, -16),
42  REPT4( -6, 50, 93, -9),
43  REPT4( -8, 36, 108, -11),
44  REPT4( -1, 12, 123, -6),
45 };
46 
47 // for 6tap filters, these are the outer two taps
48 // The zeros mask off pixels 4-7 when filtering 0-3
49 // and vice-versa
50 static const vec_s8 h_subpel_filters_outer[3] =
51 {
52  REPT4(0, 0, 2, 1),
53  REPT4(0, 0, 3, 3),
54  REPT4(0, 0, 1, 2),
55 };
56 
57 #define LOAD_H_SUBPEL_FILTER(i) \
58  vec_s8 filter_inner = h_subpel_filters_inner[i]; \
59  vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
60  vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
61 
62 #if HAVE_BIGENDIAN
63 #define GET_PIXHL(offset) \
64  a = vec_ld((offset)-is6tap-1, src); \
65  b = vec_ld((offset)-is6tap-1+15, src); \
66  pixh = vec_perm(a, b, permh##offset); \
67  pixl = vec_perm(a, b, perml##offset)
68 
69 #define GET_OUTER(offset) outer = vec_perm(a, b, perm_6tap##offset)
70 #else
71 #define GET_PIXHL(offset) \
72  a = vec_vsx_ld((offset)-is6tap-1, src); \
73  pixh = vec_perm(a, a, perm_inner); \
74  pixl = vec_perm(a, a, vec_add(perm_inner, vec_splat_u8(4)))
75 
76 #define GET_OUTER(offset) outer = vec_perm(a, a, perm_outer)
77 #endif
78 
79 #define FILTER_H(dstv, off) \
80  GET_PIXHL(off); \
81  filth = vec_msum(filter_inner, pixh, c64); \
82  filtl = vec_msum(filter_inner, pixl, c64); \
83 \
84  if (is6tap) { \
85  GET_OUTER(off); \
86  filth = vec_msum(filter_outerh, outer, filth); \
87  filtl = vec_msum(filter_outerl, outer, filtl); \
88  } \
89  if (w == 4) \
90  filtl = filth; /* discard pixels 4-7 */ \
91  dstv = vec_packs(filth, filtl); \
92  dstv = vec_sra(dstv, c7)
93 
94 static av_always_inline
95 void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
96  uint8_t *src, ptrdiff_t src_stride,
97  int h, int mx, int w, int is6tap)
98 {
99  LOAD_H_SUBPEL_FILTER(mx-1);
100 #if HAVE_BIGENDIAN
101  vec_u8 align_vec0, align_vec8, permh0, permh8;
102  vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
103  vec_u8 b;
104 #endif
105  vec_u8 filt, a, pixh, pixl, outer;
106  vec_s16 f16h, f16l;
107  vec_s32 filth, filtl;
108 
109  vec_u8 perm_inner6 = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 };
110  vec_u8 perm_inner4 = { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 };
111  vec_u8 perm_inner = is6tap ? perm_inner6 : perm_inner4;
112  vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 };
113  vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
114  vec_u16 c7 = vec_splat_u16(7);
115 
116 #if HAVE_BIGENDIAN
117  align_vec0 = vec_lvsl( -is6tap-1, src);
118  align_vec8 = vec_lvsl(8-is6tap-1, src);
119 
120  permh0 = vec_perm(align_vec0, align_vec0, perm_inner);
121  permh8 = vec_perm(align_vec8, align_vec8, perm_inner);
122  perm_inner = vec_add(perm_inner, vec_splat_u8(4));
123  perml0 = vec_perm(align_vec0, align_vec0, perm_inner);
124  perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
125  perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
126  perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
127 #endif
128 
129  while (h --> 0) {
130  FILTER_H(f16h, 0);
131 
132  if (w == 16) {
133  FILTER_H(f16l, 8);
134  filt = vec_packsu(f16h, f16l);
135  vec_st(filt, 0, dst);
136  } else {
137  filt = vec_packsu(f16h, f16h);
138  vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
139  if (w == 8)
140  vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
141  }
142  src += src_stride;
143  dst += dst_stride;
144  }
145 }
146 
147 // v subpel filter does a simple vertical multiply + add
148 static const vec_u8 v_subpel_filters[7] =
149 {
150  { 0, 6, 123, 12, 1, 0 },
151  { 2, 11, 108, 36, 8, 1 },
152  { 0, 9, 93, 50, 6, 0 },
153  { 3, 16, 77, 77, 16, 3 },
154  { 0, 6, 50, 93, 9, 0 },
155  { 1, 8, 36, 108, 11, 2 },
156  { 0, 1, 12, 123, 6, 0 },
157 };
158 
159 #define LOAD_V_SUBPEL_FILTER(i) \
160  vec_u8 subpel_filter = v_subpel_filters[i]; \
161  vec_u8 f0 = vec_splat(subpel_filter, 0); \
162  vec_u8 f1 = vec_splat(subpel_filter, 1); \
163  vec_u8 f2 = vec_splat(subpel_filter, 2); \
164  vec_u8 f3 = vec_splat(subpel_filter, 3); \
165  vec_u8 f4 = vec_splat(subpel_filter, 4); \
166  vec_u8 f5 = vec_splat(subpel_filter, 5)
167 
168 #define FILTER_V(dstv, vec_mul) \
169  s1f = (vec_s16)vec_mul(s1, f1); \
170  s2f = (vec_s16)vec_mul(s2, f2); \
171  s3f = (vec_s16)vec_mul(s3, f3); \
172  s4f = (vec_s16)vec_mul(s4, f4); \
173  s2f = vec_subs(s2f, s1f); \
174  s3f = vec_subs(s3f, s4f); \
175  if (is6tap) { \
176  s0f = (vec_s16)vec_mul(s0, f0); \
177  s5f = (vec_s16)vec_mul(s5, f5); \
178  s2f = vec_adds(s2f, s0f); \
179  s3f = vec_adds(s3f, s5f); \
180  } \
181  dstv = vec_adds(s2f, s3f); \
182  dstv = vec_adds(dstv, c64); \
183  dstv = vec_sra(dstv, c7)
184 
185 #if HAVE_BIGENDIAN
186 #define LOAD_HL(off, s, perm) load_with_perm_vec(off, s, perm)
187 #else
188 #define LOAD_HL(off, s, perm) vec_mergeh(vec_vsx_ld(off,s), vec_vsx_ld(off+8,s))
189 #endif
190 
191 static av_always_inline
192 void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
193  uint8_t *src, ptrdiff_t src_stride,
194  int h, int my, int w, int is6tap)
195 {
196  LOAD_V_SUBPEL_FILTER(my-1);
197  vec_u8 s0, s1, s2, s3, s4, s5, filt, align_vech, perm_vec, align_vecl;
198  vec_s16 s0f, s1f, s2f, s3f, s4f, s5f, f16h, f16l;
199  vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
200  vec_u16 c7 = vec_splat_u16(7);
201 
202 #if HAVE_BIGENDIAN
203  // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
204  // so combine this permute with the alignment permute vector
205  align_vech = vec_lvsl(0, src);
206  align_vecl = vec_sld(align_vech, align_vech, 8);
207  if (w ==16)
208  perm_vec = vec_mergeh(align_vech, align_vecl);
209  else
210  perm_vec = vec_mergeh(align_vech, align_vech);
211 #endif
212 
213  if (is6tap)
214  s0 = LOAD_HL(-2*src_stride, src, perm_vec);
215  s1 = LOAD_HL(-1*src_stride, src, perm_vec);
216  s2 = LOAD_HL( 0*src_stride, src, perm_vec);
217  s3 = LOAD_HL( 1*src_stride, src, perm_vec);
218  if (is6tap)
219  s4 = LOAD_HL( 2*src_stride, src, perm_vec);
220 
221  src += (2+is6tap)*src_stride;
222 
223  while (h --> 0) {
224  if (is6tap)
225  s5 = LOAD_HL(0, src, perm_vec);
226  else
227  s4 = LOAD_HL(0, src, perm_vec);
228 
229  FILTER_V(f16h, vec_mule);
230 
231  if (w == 16) {
232  FILTER_V(f16l, vec_mulo);
233  filt = vec_packsu(f16h, f16l);
234  vec_st(filt, 0, dst);
235  } else {
236  filt = vec_packsu(f16h, f16h);
237  if (w == 4)
238  filt = (vec_u8)vec_splat((vec_u32)filt, 0);
239  else
240  vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
241  vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
242  }
243 
244  if (is6tap)
245  s0 = s1;
246  s1 = s2;
247  s2 = s3;
248  s3 = s4;
249  if (is6tap)
250  s4 = s5;
251 
252  dst += dst_stride;
253  src += src_stride;
254  }
255 }
256 
257 #define EPEL_FUNCS(WIDTH, TAPS) \
258 static av_noinline \
259 void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
260 { \
261  put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
262 } \
263 \
264 static av_noinline \
265 void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
266 { \
267  put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
268 }
269 
270 #define EPEL_HV(WIDTH, HTAPS, VTAPS) \
271 static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
272 { \
273  DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
274  if (VTAPS == 6) { \
275  put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-2*sstride, sstride, h+5, mx, my); \
276  put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+2*16, 16, h, mx, my); \
277  } else { \
278  put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-sstride, sstride, h+4, mx, my); \
279  put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+16, 16, h, mx, my); \
280  } \
281 }
282 
283 EPEL_FUNCS(16,6)
284 EPEL_FUNCS(8, 6)
285 EPEL_FUNCS(8, 4)
286 EPEL_FUNCS(4, 6)
287 EPEL_FUNCS(4, 4)
288 
289 EPEL_HV(16, 6,6)
290 EPEL_HV(8, 6,6)
291 EPEL_HV(8, 4,6)
292 EPEL_HV(8, 6,4)
293 EPEL_HV(8, 4,4)
294 EPEL_HV(4, 6,6)
295 EPEL_HV(4, 4,6)
296 EPEL_HV(4, 6,4)
297 EPEL_HV(4, 4,4)
298 
299 static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
300 {
301  register vector unsigned char perm;
302  int i;
303  register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
304  register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
305  register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
306 
307 #if HAVE_BIGENDIAN
308  perm = vec_lvsl(0, src);
309 #endif
310 // hand-unrolling the loop by 4 gains about 15%
311 // mininum execution time goes from 74 to 60 cycles
312 // it's faster than -funroll-loops, but using
313 // -funroll-loops w/ this is bad - 74 cycles again.
314 // all this is on a 7450, tuning for the 7450
315  for (i = 0; i < h; i += 4) {
316  vec_st(load_with_perm_vec(0, src, perm), 0, dst);
317  vec_st(load_with_perm_vec(sstride, src, perm), dstride, dst);
318  vec_st(load_with_perm_vec(sstride2, src, perm), dstride2, dst);
319  vec_st(load_with_perm_vec(sstride3, src, perm), dstride3, dst);
320  src += sstride4;
321  dst += dstride4;
322  }
323 }
324 
325 #endif /* HAVE_ALTIVEC */
326 
327 
329 {
330 #if HAVE_ALTIVEC
332  return;
333 
334  c->put_vp8_epel_pixels_tab[0][0][0] = put_vp8_pixels16_altivec;
335  c->put_vp8_epel_pixels_tab[0][0][2] = put_vp8_epel16_h6_altivec;
336  c->put_vp8_epel_pixels_tab[0][2][0] = put_vp8_epel16_v6_altivec;
337  c->put_vp8_epel_pixels_tab[0][2][2] = put_vp8_epel16_h6v6_altivec;
338 
339  c->put_vp8_epel_pixels_tab[1][0][2] = put_vp8_epel8_h6_altivec;
340  c->put_vp8_epel_pixels_tab[1][2][0] = put_vp8_epel8_v6_altivec;
341  c->put_vp8_epel_pixels_tab[1][0][1] = put_vp8_epel8_h4_altivec;
342  c->put_vp8_epel_pixels_tab[1][1][0] = put_vp8_epel8_v4_altivec;
343 
344  c->put_vp8_epel_pixels_tab[1][2][2] = put_vp8_epel8_h6v6_altivec;
345  c->put_vp8_epel_pixels_tab[1][1][1] = put_vp8_epel8_h4v4_altivec;
346  c->put_vp8_epel_pixels_tab[1][1][2] = put_vp8_epel8_h6v4_altivec;
347  c->put_vp8_epel_pixels_tab[1][2][1] = put_vp8_epel8_h4v6_altivec;
348 
349  c->put_vp8_epel_pixels_tab[2][0][2] = put_vp8_epel4_h6_altivec;
350  c->put_vp8_epel_pixels_tab[2][2][0] = put_vp8_epel4_v6_altivec;
351  c->put_vp8_epel_pixels_tab[2][0][1] = put_vp8_epel4_h4_altivec;
352  c->put_vp8_epel_pixels_tab[2][1][0] = put_vp8_epel4_v4_altivec;
353 
354  c->put_vp8_epel_pixels_tab[2][2][2] = put_vp8_epel4_h6v6_altivec;
355  c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
356  c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
357  c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
358 #endif /* HAVE_ALTIVEC */
359 }
memory handling functions
av_cold void ff_vp78dsp_init_ppc(VP8DSPContext *c)
const char * b
Definition: vf_curves.c:109
#define vec_s32
Definition: types_altivec.h:32
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
uint8_t
#define av_cold
Definition: attributes.h:82
VP8 compatible video decoder.
#define vec_s16
Definition: types_altivec.h:30
#define s2
Definition: regdef.h:39
#define PPC_ALTIVEC(flags)
Definition: cpu.h:26
#define s0
Definition: regdef.h:37
#define vec_u16
Definition: types_altivec.h:29
#define s5
Definition: regdef.h:42
#define vec_u32
Definition: types_altivec.h:31
perm
Definition: f_perms.c:74
#define s4
Definition: regdef.h:41
#define src
Definition: vp9dsp.c:530
#define s3
Definition: regdef.h:40
#define vec_u8
Definition: types_altivec.h:27
#define s1
Definition: regdef.h:38
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:76
Contains misc utility macros and inline functions.
static const int8_t filt[NUMTAPS]
Definition: af_earwax.c:39
static double c[64]
#define EPEL_FUNCS(depth)
#define av_always_inline
Definition: attributes.h:39
#define vec_s8
Definition: types_altivec.h:28