FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gmc_altivec.c
Go to the documentation of this file.
1 /*
2  * GMC (Global Motion Compensation), AltiVec-enabled
3  *
4  * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/mem.h"
26 #include "dsputil_altivec.h"
27 
28 /* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
29  * to preserve proper dst alignment. */
30 void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
31  int stride, int h, int x16, int y16, int rounder)
32 {
33  int i;
34  const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
35  const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = {
36  (16 - x16) * (16 - y16), /* A */
37  (x16) * (16 - y16), /* B */
38  (16 - x16) * (y16), /* C */
39  (x16) * (y16), /* D */
40  0, 0, 0, 0 /* padding */
41  };
42  register const vector unsigned char vczero =
43  (const vector unsigned char) vec_splat_u8(0);
44  register const vector unsigned short vcsr8 =
45  (const vector unsigned short) vec_splat_u16(8);
46  register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD;
47  register vector unsigned short tempB, tempC, tempD;
48  unsigned long dst_odd = (unsigned long) dst & 0x0000000F;
49  unsigned long src_really_odd = (unsigned long) src & 0x0000000F;
50  register vector unsigned short tempA =
51  vec_ld(0, (const unsigned short *) ABCD);
52  register vector unsigned short Av = vec_splat(tempA, 0);
53  register vector unsigned short Bv = vec_splat(tempA, 1);
54  register vector unsigned short Cv = vec_splat(tempA, 2);
55  register vector unsigned short Dv = vec_splat(tempA, 3);
56  register vector unsigned short rounderV =
57  vec_splat((vec_u16) vec_lde(0, &rounder_a), 0);
58 
59  /* we'll be able to pick-up our 9 char elements at src from those
60  * 32 bytes we load the first batch here, as inside the loop we can
61  * reuse 'src + stride' from one iteration as the 'src' of the next. */
62  register vector unsigned char src_0 = vec_ld(0, src);
63  register vector unsigned char src_1 = vec_ld(16, src);
64  register vector unsigned char srcvA = vec_perm(src_0, src_1,
65  vec_lvsl(0, src));
66 
67  if (src_really_odd != 0x0000000F)
68  /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
69  * on the second vector. */
70  srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
71  else
72  srcvB = src_1;
73  srcvA = vec_mergeh(vczero, srcvA);
74  srcvB = vec_mergeh(vczero, srcvB);
75 
76  for (i = 0; i < h; i++) {
77  dst_odd = (unsigned long) dst & 0x0000000F;
78  src_really_odd = (((unsigned long) src) + stride) & 0x0000000F;
79 
80  dstv = vec_ld(0, dst);
81 
82  /* We'll be able to pick-up our 9 char elements at src + stride from
83  * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
84  * as the next srcvA and srcvB. */
85  src_0 = vec_ld(stride + 0, src);
86  src_1 = vec_ld(stride + 16, src);
87  srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
88 
89  if (src_really_odd != 0x0000000F)
90  /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
91  * on the second vector. */
92  srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
93  else
94  srcvD = src_1;
95 
96  srcvC = vec_mergeh(vczero, srcvC);
97  srcvD = vec_mergeh(vczero, srcvD);
98 
99  /* OK, now we (finally) do the math :-)
100  * Those four instructions replace 32 int muls & 32 int adds.
101  * Isn't AltiVec nice? */
102  tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV);
103  tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA);
104  tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB);
105  tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC);
106 
107  srcvA = srcvC;
108  srcvB = srcvD;
109 
110  tempD = vec_sr(tempD, vcsr8);
111 
112  dstv2 = vec_pack(tempD, (vector unsigned short) vczero);
113 
114  if (dst_odd)
115  dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1));
116  else
117  dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3));
118 
119  vec_st(dstv2, 0, dst);
120 
121  dst += stride;
122  src += stride;
123  }
124 }