FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fdct_altivec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003 James Klicman <james@klicman.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config.h"
22 #if HAVE_ALTIVEC_H
23 #include <altivec.h>
24 #endif
25 #include "libavutil/common.h"
26 #include "dsputil_altivec.h"
27 
28 #define vs16(v) ((vector signed short)(v))
29 #define vs32(v) ((vector signed int)(v))
30 #define vu8(v) ((vector unsigned char)(v))
31 #define vu16(v) ((vector unsigned short)(v))
32 #define vu32(v) ((vector unsigned int)(v))
33 
34 
35 #define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
36 #define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
37 #define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
38 #define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
39 #define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
40 #define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
41 #define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
42 #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
43 
44 
45 #define W0 -(2 * C2)
46 #define W1 (2 * C6)
47 #define W2 (SQRT_2 * C6)
48 #define W3 (SQRT_2 * C3)
49 #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
50 #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
51 #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
52 #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
53 #define W8 (SQRT_2 * ( C7 - C3))
54 #define W9 (SQRT_2 * (-C1 - C3))
55 #define WA (SQRT_2 * (-C3 - C5))
56 #define WB (SQRT_2 * ( C5 - C3))
57 
58 
59 static vector float fdctconsts[3] = {
60  { W0, W1, W2, W3 },
61  { W4, W5, W6, W7 },
62  { W8, W9, WA, WB }
63 };
64 
65 #define LD_W0 vec_splat(cnsts0, 0)
66 #define LD_W1 vec_splat(cnsts0, 1)
67 #define LD_W2 vec_splat(cnsts0, 2)
68 #define LD_W3 vec_splat(cnsts0, 3)
69 #define LD_W4 vec_splat(cnsts1, 0)
70 #define LD_W5 vec_splat(cnsts1, 1)
71 #define LD_W6 vec_splat(cnsts1, 2)
72 #define LD_W7 vec_splat(cnsts1, 3)
73 #define LD_W8 vec_splat(cnsts2, 0)
74 #define LD_W9 vec_splat(cnsts2, 1)
75 #define LD_WA vec_splat(cnsts2, 2)
76 #define LD_WB vec_splat(cnsts2, 3)
77 
78 
79 #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
80  x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
81  x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
82  x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
83  x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
84  x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
85  x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
86  x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
87  x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
88  \
89  b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
90  b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
91  b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
92  b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
93  \
94  b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
95  b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
96  b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
97  cnst = LD_W2; \
98  b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
99  cnst = LD_W1; \
100  b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
101  cnst = LD_W0; \
102  b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
103  \
104  x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
105  x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
106  x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
107  x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
108  x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
109  cnst = LD_W3; \
110  x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
111  \
112  cnst = LD_W8; \
113  x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
114  cnst = LD_W9; \
115  x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
116  cnst = LD_WA; \
117  x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
118  cnst = LD_WB; \
119  x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
120  \
121  cnst = LD_W4; \
122  b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
123  cnst = LD_W5; \
124  b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
125  cnst = LD_W6; \
126  b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
127  cnst = LD_W7; \
128  b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
129  \
130  b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
131  b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
132  b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
133  b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
134  /* }}} */
135 
136 #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
137  x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
138  x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
139  x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
140  x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
141  x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
142  x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
143  x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
144  x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
145  \
146  b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
147  b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
148  b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
149  b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
150  \
151  b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
152  b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
153  b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
154  cnst = LD_W2; \
155  b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
156  cnst = LD_W1; \
157  b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
158  cnst = LD_W0; \
159  b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
160  \
161  x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
162  x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
163  x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
164  x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
165  x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
166  cnst = LD_W3; \
167  x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
168  \
169  cnst = LD_W8; \
170  x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
171  cnst = LD_W9; \
172  x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
173  cnst = LD_WA; \
174  x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
175  cnst = LD_WB; \
176  x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
177  \
178  cnst = LD_W4; \
179  b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
180  cnst = LD_W5; \
181  b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
182  cnst = LD_W6; \
183  b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
184  cnst = LD_W7; \
185  b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
186  \
187  b7 = vec_add(b7, x2); /* b7 += x2; */ \
188  b5 = vec_add(b5, x3); /* b5 += x3; */ \
189  b3 = vec_add(b3, x2); /* b3 += x2; */ \
190  b1 = vec_add(b1, x3); /* b1 += x3; */ \
191  /* }}} */
192 
193 
194 
195 /* two dimensional discrete cosine transform */
196 
197 void ff_fdct_altivec(int16_t *block)
198 {
199  vector signed short *bp;
200  vector float *cp;
201  vector float b00, b10, b20, b30, b40, b50, b60, b70;
202  vector float b01, b11, b21, b31, b41, b51, b61, b71;
203  vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
204  vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
205 
206  /* setup constants {{{ */
207  /* mzero = -0.0 */
208  mzero = ((vector float)vec_splat_u32(-1));
209  mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
210  cp = fdctconsts;
211  cnsts0 = vec_ld(0, cp); cp++;
212  cnsts1 = vec_ld(0, cp); cp++;
213  cnsts2 = vec_ld(0, cp);
214  /* }}} */
215 
216 
217  /* 8x8 matrix transpose (vector short[8]) {{{ */
218 #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
219 
220  bp = (vector signed short*)block;
221  b00 = ((vector float)vec_ld(0, bp));
222  b40 = ((vector float)vec_ld(16*4, bp));
223  b01 = ((vector float)MERGE_S16(h, b00, b40));
224  b11 = ((vector float)MERGE_S16(l, b00, b40));
225  bp++;
226  b10 = ((vector float)vec_ld(0, bp));
227  b50 = ((vector float)vec_ld(16*4, bp));
228  b21 = ((vector float)MERGE_S16(h, b10, b50));
229  b31 = ((vector float)MERGE_S16(l, b10, b50));
230  bp++;
231  b20 = ((vector float)vec_ld(0, bp));
232  b60 = ((vector float)vec_ld(16*4, bp));
233  b41 = ((vector float)MERGE_S16(h, b20, b60));
234  b51 = ((vector float)MERGE_S16(l, b20, b60));
235  bp++;
236  b30 = ((vector float)vec_ld(0, bp));
237  b70 = ((vector float)vec_ld(16*4, bp));
238  b61 = ((vector float)MERGE_S16(h, b30, b70));
239  b71 = ((vector float)MERGE_S16(l, b30, b70));
240 
241  x0 = ((vector float)MERGE_S16(h, b01, b41));
242  x1 = ((vector float)MERGE_S16(l, b01, b41));
243  x2 = ((vector float)MERGE_S16(h, b11, b51));
244  x3 = ((vector float)MERGE_S16(l, b11, b51));
245  x4 = ((vector float)MERGE_S16(h, b21, b61));
246  x5 = ((vector float)MERGE_S16(l, b21, b61));
247  x6 = ((vector float)MERGE_S16(h, b31, b71));
248  x7 = ((vector float)MERGE_S16(l, b31, b71));
249 
250  b00 = ((vector float)MERGE_S16(h, x0, x4));
251  b10 = ((vector float)MERGE_S16(l, x0, x4));
252  b20 = ((vector float)MERGE_S16(h, x1, x5));
253  b30 = ((vector float)MERGE_S16(l, x1, x5));
254  b40 = ((vector float)MERGE_S16(h, x2, x6));
255  b50 = ((vector float)MERGE_S16(l, x2, x6));
256  b60 = ((vector float)MERGE_S16(h, x3, x7));
257  b70 = ((vector float)MERGE_S16(l, x3, x7));
258 
259 #undef MERGE_S16
260  /* }}} */
261 
262 
263 /* Some of the initial calculations can be done as vector short before
264  * conversion to vector float. The following code section takes advantage
265  * of this.
266  */
267  /* fdct rows {{{ */
268  x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
269  x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
270  x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
271  x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
272  x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
273  x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
274  x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
275  x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
276 
277  b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
278  b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
279 
280  b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
281  b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
282 
283 #define CTF0(n) \
284  b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
285  b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
286  b##n##1 = vec_ctf(vs32(b##n##1), 0); \
287  b##n##0 = vec_ctf(vs32(b##n##0), 0);
288 
289  CTF0(0);
290  CTF0(4);
291 
292  b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
293  b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
294 
295  CTF0(2);
296  CTF0(6);
297 
298 #undef CTF0
299 
300  x0 = vec_add(b60, b20);
301  x1 = vec_add(b61, b21);
302 
303  cnst = LD_W2;
304  x0 = vec_madd(cnst, x0, mzero);
305  x1 = vec_madd(cnst, x1, mzero);
306  cnst = LD_W1;
307  b20 = vec_madd(cnst, b20, x0);
308  b21 = vec_madd(cnst, b21, x1);
309  cnst = LD_W0;
310  b60 = vec_madd(cnst, b60, x0);
311  b61 = vec_madd(cnst, b61, x1);
312 
313 #define CTFX(x,b) \
314  b##0 = ((vector float)vec_unpackh(vs16(x))); \
315  b##1 = ((vector float)vec_unpackl(vs16(x))); \
316  b##0 = vec_ctf(vs32(b##0), 0); \
317  b##1 = vec_ctf(vs32(b##1), 0); \
318 
319  CTFX(x4, b7);
320  CTFX(x5, b5);
321  CTFX(x6, b3);
322  CTFX(x7, b1);
323 
324 #undef CTFX
325 
326 
327  x0 = vec_add(b70, b10);
328  x1 = vec_add(b50, b30);
329  x2 = vec_add(b70, b30);
330  x3 = vec_add(b50, b10);
331  x8 = vec_add(x2, x3);
332  cnst = LD_W3;
333  x8 = vec_madd(cnst, x8, mzero);
334 
335  cnst = LD_W8;
336  x0 = vec_madd(cnst, x0, mzero);
337  cnst = LD_W9;
338  x1 = vec_madd(cnst, x1, mzero);
339  cnst = LD_WA;
340  x2 = vec_madd(cnst, x2, x8);
341  cnst = LD_WB;
342  x3 = vec_madd(cnst, x3, x8);
343 
344  cnst = LD_W4;
345  b70 = vec_madd(cnst, b70, x0);
346  cnst = LD_W5;
347  b50 = vec_madd(cnst, b50, x1);
348  cnst = LD_W6;
349  b30 = vec_madd(cnst, b30, x1);
350  cnst = LD_W7;
351  b10 = vec_madd(cnst, b10, x0);
352 
353  b70 = vec_add(b70, x2);
354  b50 = vec_add(b50, x3);
355  b30 = vec_add(b30, x2);
356  b10 = vec_add(b10, x3);
357 
358 
359  x0 = vec_add(b71, b11);
360  x1 = vec_add(b51, b31);
361  x2 = vec_add(b71, b31);
362  x3 = vec_add(b51, b11);
363  x8 = vec_add(x2, x3);
364  cnst = LD_W3;
365  x8 = vec_madd(cnst, x8, mzero);
366 
367  cnst = LD_W8;
368  x0 = vec_madd(cnst, x0, mzero);
369  cnst = LD_W9;
370  x1 = vec_madd(cnst, x1, mzero);
371  cnst = LD_WA;
372  x2 = vec_madd(cnst, x2, x8);
373  cnst = LD_WB;
374  x3 = vec_madd(cnst, x3, x8);
375 
376  cnst = LD_W4;
377  b71 = vec_madd(cnst, b71, x0);
378  cnst = LD_W5;
379  b51 = vec_madd(cnst, b51, x1);
380  cnst = LD_W6;
381  b31 = vec_madd(cnst, b31, x1);
382  cnst = LD_W7;
383  b11 = vec_madd(cnst, b11, x0);
384 
385  b71 = vec_add(b71, x2);
386  b51 = vec_add(b51, x3);
387  b31 = vec_add(b31, x2);
388  b11 = vec_add(b11, x3);
389  /* }}} */
390 
391 
392  /* 8x8 matrix transpose (vector float[8][2]) {{{ */
393  x0 = vec_mergel(b00, b20);
394  x1 = vec_mergeh(b00, b20);
395  x2 = vec_mergel(b10, b30);
396  x3 = vec_mergeh(b10, b30);
397 
398  b00 = vec_mergeh(x1, x3);
399  b10 = vec_mergel(x1, x3);
400  b20 = vec_mergeh(x0, x2);
401  b30 = vec_mergel(x0, x2);
402 
403  x4 = vec_mergel(b41, b61);
404  x5 = vec_mergeh(b41, b61);
405  x6 = vec_mergel(b51, b71);
406  x7 = vec_mergeh(b51, b71);
407 
408  b41 = vec_mergeh(x5, x7);
409  b51 = vec_mergel(x5, x7);
410  b61 = vec_mergeh(x4, x6);
411  b71 = vec_mergel(x4, x6);
412 
413  x0 = vec_mergel(b01, b21);
414  x1 = vec_mergeh(b01, b21);
415  x2 = vec_mergel(b11, b31);
416  x3 = vec_mergeh(b11, b31);
417 
418  x4 = vec_mergel(b40, b60);
419  x5 = vec_mergeh(b40, b60);
420  x6 = vec_mergel(b50, b70);
421  x7 = vec_mergeh(b50, b70);
422 
423  b40 = vec_mergeh(x1, x3);
424  b50 = vec_mergel(x1, x3);
425  b60 = vec_mergeh(x0, x2);
426  b70 = vec_mergel(x0, x2);
427 
428  b01 = vec_mergeh(x5, x7);
429  b11 = vec_mergel(x5, x7);
430  b21 = vec_mergeh(x4, x6);
431  b31 = vec_mergel(x4, x6);
432  /* }}} */
433 
434 
435  FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
436  FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
437 
438 
439  /* round, convert back to short {{{ */
440 #define CTS(n) \
441  b##n##0 = vec_round(b##n##0); \
442  b##n##1 = vec_round(b##n##1); \
443  b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
444  b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
445  b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
446  vec_st(vs16(b##n##0), 0, bp);
447 
448  bp = (vector signed short*)block;
449  CTS(0); bp++;
450  CTS(1); bp++;
451  CTS(2); bp++;
452  CTS(3); bp++;
453  CTS(4); bp++;
454  CTS(5); bp++;
455  CTS(6); bp++;
456  CTS(7);
457 
458 #undef CTS
459  /* }}} */
460 }
461 
462 /* vim:set foldmethod=marker foldlevel=0: */