45 t1 = vec_mradds(a1, vx7, vx1); \
46 t8 = vec_mradds(a1, vx1, vec_subs(zero, vx7)); \
47 t7 = vec_mradds(a2, vx5, vx3); \
48 t3 = vec_mradds(ma2, vx3, vx5); \
51 t5 = vec_adds(vx0, vx4); \
52 t0 = vec_subs(vx0, vx4); \
53 t2 = vec_mradds(a0, vx6, vx2); \
54 t4 = vec_mradds(a0, vx2, vec_subs(zero, vx6)); \
55 t6 = vec_adds(t8, t3); \
56 t3 = vec_subs(t8, t3); \
57 t8 = vec_subs(t1, t7); \
58 t1 = vec_adds(t1, t7); \
61 t7 = vec_adds(t5, t2); \
62 t2 = vec_subs(t5, t2); \
63 t5 = vec_adds(t0, t4); \
64 t0 = vec_subs(t0, t4); \
65 t4 = vec_subs(t8, t3); \
66 t3 = vec_adds(t8, t3); \
69 vy0 = vec_adds(t7, t1); \
70 vy7 = vec_subs(t7, t1); \
71 vy1 = vec_mradds(c4, t3, t5); \
72 vy6 = vec_mradds(mc4, t3, t5); \
73 vy2 = vec_mradds(c4, t4, t0); \
74 vy5 = vec_mradds(mc4, t4, t0); \
75 vy3 = vec_adds(t2, t6); \
76 vy4 = vec_subs(t2, t6)
79 vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
80 vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
82 vec_s16 c4 = vec_splat(constants[0], 0); \
83 vec_s16 a0 = vec_splat(constants[0], 1); \
84 vec_s16 a1 = vec_splat(constants[0], 2); \
85 vec_s16 a2 = vec_splat(constants[0], 3); \
86 vec_s16 mc4 = vec_splat(constants[0], 4); \
87 vec_s16 ma2 = vec_splat(constants[0], 5); \
88 vec_s16 bias = (vec_s16) vec_splat((vec_s32) constants[0], 3); \
90 vec_s16 zero = vec_splat_s16(0); \
91 vec_u16 shift = vec_splat_u16(4); \
93 vec_s16 vx0 = vec_mradds(vec_sl(block[0], shift), constants[1], zero); \
94 vec_s16 vx1 = vec_mradds(vec_sl(block[1], shift), constants[2], zero); \
95 vec_s16 vx2 = vec_mradds(vec_sl(block[2], shift), constants[3], zero); \
96 vec_s16 vx3 = vec_mradds(vec_sl(block[3], shift), constants[4], zero); \
97 vec_s16 vx4 = vec_mradds(vec_sl(block[4], shift), constants[1], zero); \
98 vec_s16 vx5 = vec_mradds(vec_sl(block[5], shift), constants[4], zero); \
99 vec_s16 vx6 = vec_mradds(vec_sl(block[6], shift), constants[3], zero); \
100 vec_s16 vx7 = vec_mradds(vec_sl(block[7], shift), constants[2], zero); \
104 vx0 = vec_mergeh(vy0, vy4); \
105 vx1 = vec_mergel(vy0, vy4); \
106 vx2 = vec_mergeh(vy1, vy5); \
107 vx3 = vec_mergel(vy1, vy5); \
108 vx4 = vec_mergeh(vy2, vy6); \
109 vx5 = vec_mergel(vy2, vy6); \
110 vx6 = vec_mergeh(vy3, vy7); \
111 vx7 = vec_mergel(vy3, vy7); \
113 vy0 = vec_mergeh(vx0, vx4); \
114 vy1 = vec_mergel(vx0, vx4); \
115 vy2 = vec_mergeh(vx1, vx5); \
116 vy3 = vec_mergel(vx1, vx5); \
117 vy4 = vec_mergeh(vx2, vx6); \
118 vy5 = vec_mergel(vx2, vx6); \
119 vy6 = vec_mergeh(vx3, vx7); \
120 vy7 = vec_mergel(vx3, vx7); \
122 vx0 = vec_adds(vec_mergeh(vy0, vy4), bias); \
123 vx1 = vec_mergel(vy0, vy4); \
124 vx2 = vec_mergeh(vy1, vy5); \
125 vx3 = vec_mergel(vy1, vy5); \
126 vx4 = vec_mergeh(vy2, vy6); \
127 vx5 = vec_mergel(vy2, vy6); \
128 vx6 = vec_mergeh(vy3, vy7); \
129 vx7 = vec_mergel(vy3, vy7); \
133 shift = vec_splat_u16(6); \
134 vx0 = vec_sra(vy0, shift); \
135 vx1 = vec_sra(vy1, shift); \
136 vx2 = vec_sra(vy2, shift); \
137 vx3 = vec_sra(vy3, shift); \
138 vx4 = vec_sra(vy4, shift); \
139 vx5 = vec_sra(vy5, shift); \
140 vx6 = vec_sra(vy6, shift); \
141 vx7 = vec_sra(vy7, shift)
144 { 23170, 13573, 6518, 21895, -23170, -21895, 32, 31 },
145 { 16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725 },
146 { 22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521 },
147 { 21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692 },
148 { 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 }
174 #define COPY(dest, src) \
175 tmp = vec_packsu(src, src); \
176 vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
177 vec_ste((vec_u32) tmp, 4, (unsigned int *) dest)
207 p0 = vec_lvsl(0, dest);
208 p1 = vec_lvsl(stride, dest);
209 p = vec_splat_u8(-1);
210 perm0 = vec_mergeh(p, p0);
211 perm1 = vec_mergeh(p, p1);
213 #define ADD(dest, src, perm) \
215 tmp = vec_ld(0, dest); \
216 tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, perm); \
217 tmp3 = vec_adds(tmp2, src); \
218 tmp = vec_packsu(tmp3, tmp3); \
219 vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
220 vec_ste((vec_u32) tmp, 4, (unsigned int *) dest)
222 ADD(dest, vx0, perm0);
224 ADD(dest, vx1, perm1);
226 ADD(dest, vx2, perm0);
228 ADD(dest, vx3, perm1);
230 ADD(dest, vx4, perm0);
232 ADD(dest, vx5, perm1);
234 ADD(dest, vx6, perm0);
236 ADD(dest, vx7, perm1);