FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gmc_altivec.c
Go to the documentation of this file.
1 /*
2  * GMC (Global Motion Compensation)
3  * AltiVec-enabled
4  * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/mem.h"
26 #include "libavcodec/dsputil.h"
27 #include "dsputil_altivec.h"
28 
29 /*
30  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
31  to preserve proper dst alignment.
32 */
33 void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
34 {
35  const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
36  const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
37  {
38  (16-x16)*(16-y16), /* A */
39  ( x16)*(16-y16), /* B */
40  (16-x16)*( y16), /* C */
41  ( x16)*( y16), /* D */
42  0, 0, 0, 0 /* padding */
43  };
44  register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
45  register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
46  register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
47  register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
48  int i;
49  unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
50  unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
51 
52  tempA = vec_ld(0, (const unsigned short*)ABCD);
53  Av = vec_splat(tempA, 0);
54  Bv = vec_splat(tempA, 1);
55  Cv = vec_splat(tempA, 2);
56  Dv = vec_splat(tempA, 3);
57 
58  rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
59 
60  // we'll be able to pick-up our 9 char elements
61  // at src from those 32 bytes
62  // we load the first batch here, as inside the loop
63  // we can re-use 'src+stride' from one iteration
64  // as the 'src' of the next.
65  src_0 = vec_ld(0, src);
66  src_1 = vec_ld(16, src);
67  srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
68 
69  if (src_really_odd != 0x0000000F) {
70  // if src & 0xF == 0xF, then (src+1) is properly aligned
71  // on the second vector.
72  srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
73  } else {
74  srcvB = src_1;
75  }
76  srcvA = vec_mergeh(vczero, srcvA);
77  srcvB = vec_mergeh(vczero, srcvB);
78 
79  for(i=0; i<h; i++) {
80  dst_odd = (unsigned long)dst & 0x0000000F;
81  src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
82 
83  dstv = vec_ld(0, dst);
84 
85  // we we'll be able to pick-up our 9 char elements
86  // at src + stride from those 32 bytes
87  // then reuse the resulting 2 vectors srvcC and srcvD
88  // as the next srcvA and srcvB
89  src_0 = vec_ld(stride + 0, src);
90  src_1 = vec_ld(stride + 16, src);
91  srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
92 
93  if (src_really_odd != 0x0000000F) {
94  // if src & 0xF == 0xF, then (src+1) is properly aligned
95  // on the second vector.
96  srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
97  } else {
98  srcvD = src_1;
99  }
100 
101  srcvC = vec_mergeh(vczero, srcvC);
102  srcvD = vec_mergeh(vczero, srcvD);
103 
104 
105  // OK, now we (finally) do the math :-)
106  // those four instructions replaces 32 int muls & 32 int adds.
107  // isn't AltiVec nice ?
108  tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
109  tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
110  tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
111  tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
112 
113  srcvA = srcvC;
114  srcvB = srcvD;
115 
116  tempD = vec_sr(tempD, vcsr8);
117 
118  dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
119 
120  if (dst_odd) {
121  dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
122  } else {
123  dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
124  }
125 
126  vec_st(dstv2, 0, dst);
127 
128  dst += stride;
129  src += stride;
130  }
131 }