FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dsputil_avg_template.c
Go to the documentation of this file.
1 /*
2  * DSP utils : average functions are compiled twice for 3dnow/mmxext
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
8  * and improved by Zdenek Kabelac <kabi@users.sf.net>
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 /* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm
28  clobber bug - now it will work with 2.95.2 and also with -fPIC
29  */
30 static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
31 {
32  __asm__ volatile(
33  "lea (%3, %3), %%"REG_a" \n\t"
34  "1: \n\t"
35  "movq (%1), %%mm0 \n\t"
36  "movq (%1, %3), %%mm1 \n\t"
37  PAVGB" 1(%1), %%mm0 \n\t"
38  PAVGB" 1(%1, %3), %%mm1 \n\t"
39  "movq %%mm0, (%2) \n\t"
40  "movq %%mm1, (%2, %3) \n\t"
41  "add %%"REG_a", %1 \n\t"
42  "add %%"REG_a", %2 \n\t"
43  "movq (%1), %%mm0 \n\t"
44  "movq (%1, %3), %%mm1 \n\t"
45  PAVGB" 1(%1), %%mm0 \n\t"
46  PAVGB" 1(%1, %3), %%mm1 \n\t"
47  "add %%"REG_a", %1 \n\t"
48  "movq %%mm0, (%2) \n\t"
49  "movq %%mm1, (%2, %3) \n\t"
50  "add %%"REG_a", %2 \n\t"
51  "subl $4, %0 \n\t"
52  "jnz 1b \n\t"
53  :"+g"(h), "+S"(pixels), "+D"(block)
54  :"r" ((x86_reg)line_size)
55  :"%"REG_a, "memory");
56 }
57 
58 #ifndef SKIP_FOR_3DNOW
59 static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
60 {
61  __asm__ volatile(
62  "testl $1, %0 \n\t"
63  " jz 1f \n\t"
64  "movq (%1), %%mm0 \n\t"
65  "movq (%2), %%mm1 \n\t"
66  "add %4, %1 \n\t"
67  "add $8, %2 \n\t"
68  PAVGB" %%mm1, %%mm0 \n\t"
69  "movq %%mm0, (%3) \n\t"
70  "add %5, %3 \n\t"
71  "decl %0 \n\t"
72  "1: \n\t"
73  "movq (%1), %%mm0 \n\t"
74  "add %4, %1 \n\t"
75  "movq (%1), %%mm1 \n\t"
76  "add %4, %1 \n\t"
77  PAVGB" (%2), %%mm0 \n\t"
78  PAVGB" 8(%2), %%mm1 \n\t"
79  "movq %%mm0, (%3) \n\t"
80  "add %5, %3 \n\t"
81  "movq %%mm1, (%3) \n\t"
82  "add %5, %3 \n\t"
83  "movq (%1), %%mm0 \n\t"
84  "add %4, %1 \n\t"
85  "movq (%1), %%mm1 \n\t"
86  "add %4, %1 \n\t"
87  PAVGB" 16(%2), %%mm0 \n\t"
88  PAVGB" 24(%2), %%mm1 \n\t"
89  "movq %%mm0, (%3) \n\t"
90  "add %5, %3 \n\t"
91  "movq %%mm1, (%3) \n\t"
92  "add %5, %3 \n\t"
93  "add $32, %2 \n\t"
94  "subl $4, %0 \n\t"
95  "jnz 1b \n\t"
96 #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
97  :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
98 #else
99  :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
100 #endif
101  :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
102  :"memory");
103 //the following should be used, though better not with gcc ...
104 /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
105  :"r"(src1Stride), "r"(dstStride)
106  :"memory");*/
107 }
108 
109 static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
110 {
111  __asm__ volatile(
112  "pcmpeqb %%mm6, %%mm6 \n\t"
113  "testl $1, %0 \n\t"
114  " jz 1f \n\t"
115  "movq (%1), %%mm0 \n\t"
116  "movq (%2), %%mm1 \n\t"
117  "add %4, %1 \n\t"
118  "add $8, %2 \n\t"
119  "pxor %%mm6, %%mm0 \n\t"
120  "pxor %%mm6, %%mm1 \n\t"
121  PAVGB" %%mm1, %%mm0 \n\t"
122  "pxor %%mm6, %%mm0 \n\t"
123  "movq %%mm0, (%3) \n\t"
124  "add %5, %3 \n\t"
125  "decl %0 \n\t"
126  "1: \n\t"
127  "movq (%1), %%mm0 \n\t"
128  "add %4, %1 \n\t"
129  "movq (%1), %%mm1 \n\t"
130  "add %4, %1 \n\t"
131  "movq (%2), %%mm2 \n\t"
132  "movq 8(%2), %%mm3 \n\t"
133  "pxor %%mm6, %%mm0 \n\t"
134  "pxor %%mm6, %%mm1 \n\t"
135  "pxor %%mm6, %%mm2 \n\t"
136  "pxor %%mm6, %%mm3 \n\t"
137  PAVGB" %%mm2, %%mm0 \n\t"
138  PAVGB" %%mm3, %%mm1 \n\t"
139  "pxor %%mm6, %%mm0 \n\t"
140  "pxor %%mm6, %%mm1 \n\t"
141  "movq %%mm0, (%3) \n\t"
142  "add %5, %3 \n\t"
143  "movq %%mm1, (%3) \n\t"
144  "add %5, %3 \n\t"
145  "movq (%1), %%mm0 \n\t"
146  "add %4, %1 \n\t"
147  "movq (%1), %%mm1 \n\t"
148  "add %4, %1 \n\t"
149  "movq 16(%2), %%mm2 \n\t"
150  "movq 24(%2), %%mm3 \n\t"
151  "pxor %%mm6, %%mm0 \n\t"
152  "pxor %%mm6, %%mm1 \n\t"
153  "pxor %%mm6, %%mm2 \n\t"
154  "pxor %%mm6, %%mm3 \n\t"
155  PAVGB" %%mm2, %%mm0 \n\t"
156  PAVGB" %%mm3, %%mm1 \n\t"
157  "pxor %%mm6, %%mm0 \n\t"
158  "pxor %%mm6, %%mm1 \n\t"
159  "movq %%mm0, (%3) \n\t"
160  "add %5, %3 \n\t"
161  "movq %%mm1, (%3) \n\t"
162  "add %5, %3 \n\t"
163  "add $32, %2 \n\t"
164  "subl $4, %0 \n\t"
165  "jnz 1b \n\t"
166 #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
167  :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
168 #else
169  :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
170 #endif
171  :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
172  :"memory");
173 //the following should be used, though better not with gcc ...
174 /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
175  :"r"(src1Stride), "r"(dstStride)
176  :"memory");*/
177 }
178 
179 static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
180 {
181  __asm__ volatile(
182  "testl $1, %0 \n\t"
183  " jz 1f \n\t"
184  "movq (%1), %%mm0 \n\t"
185  "movq (%2), %%mm1 \n\t"
186  "add %4, %1 \n\t"
187  "add $8, %2 \n\t"
188  PAVGB" %%mm1, %%mm0 \n\t"
189  PAVGB" (%3), %%mm0 \n\t"
190  "movq %%mm0, (%3) \n\t"
191  "add %5, %3 \n\t"
192  "decl %0 \n\t"
193  "1: \n\t"
194  "movq (%1), %%mm0 \n\t"
195  "add %4, %1 \n\t"
196  "movq (%1), %%mm1 \n\t"
197  "add %4, %1 \n\t"
198  PAVGB" (%2), %%mm0 \n\t"
199  PAVGB" 8(%2), %%mm1 \n\t"
200  PAVGB" (%3), %%mm0 \n\t"
201  "movq %%mm0, (%3) \n\t"
202  "add %5, %3 \n\t"
203  PAVGB" (%3), %%mm1 \n\t"
204  "movq %%mm1, (%3) \n\t"
205  "add %5, %3 \n\t"
206  "movq (%1), %%mm0 \n\t"
207  "add %4, %1 \n\t"
208  "movq (%1), %%mm1 \n\t"
209  "add %4, %1 \n\t"
210  PAVGB" 16(%2), %%mm0 \n\t"
211  PAVGB" 24(%2), %%mm1 \n\t"
212  PAVGB" (%3), %%mm0 \n\t"
213  "movq %%mm0, (%3) \n\t"
214  "add %5, %3 \n\t"
215  PAVGB" (%3), %%mm1 \n\t"
216  "movq %%mm1, (%3) \n\t"
217  "add %5, %3 \n\t"
218  "add $32, %2 \n\t"
219  "subl $4, %0 \n\t"
220  "jnz 1b \n\t"
221 #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
222  :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
223 #else
224  :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
225 #endif
226  :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
227  :"memory");
228 //the following should be used, though better not with gcc ...
229 /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
230  :"r"(src1Stride), "r"(dstStride)
231  :"memory");*/
232 }
233 #endif /* SKIP_FOR_3DNOW */
234 
235 static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
236 {
237  __asm__ volatile(
238  "lea (%3, %3), %%"REG_a" \n\t"
239  "1: \n\t"
240  "movq (%1), %%mm0 \n\t"
241  "movq (%1, %3), %%mm1 \n\t"
242  "movq 8(%1), %%mm2 \n\t"
243  "movq 8(%1, %3), %%mm3 \n\t"
244  PAVGB" 1(%1), %%mm0 \n\t"
245  PAVGB" 1(%1, %3), %%mm1 \n\t"
246  PAVGB" 9(%1), %%mm2 \n\t"
247  PAVGB" 9(%1, %3), %%mm3 \n\t"
248  "movq %%mm0, (%2) \n\t"
249  "movq %%mm1, (%2, %3) \n\t"
250  "movq %%mm2, 8(%2) \n\t"
251  "movq %%mm3, 8(%2, %3) \n\t"
252  "add %%"REG_a", %1 \n\t"
253  "add %%"REG_a", %2 \n\t"
254  "movq (%1), %%mm0 \n\t"
255  "movq (%1, %3), %%mm1 \n\t"
256  "movq 8(%1), %%mm2 \n\t"
257  "movq 8(%1, %3), %%mm3 \n\t"
258  PAVGB" 1(%1), %%mm0 \n\t"
259  PAVGB" 1(%1, %3), %%mm1 \n\t"
260  PAVGB" 9(%1), %%mm2 \n\t"
261  PAVGB" 9(%1, %3), %%mm3 \n\t"
262  "add %%"REG_a", %1 \n\t"
263  "movq %%mm0, (%2) \n\t"
264  "movq %%mm1, (%2, %3) \n\t"
265  "movq %%mm2, 8(%2) \n\t"
266  "movq %%mm3, 8(%2, %3) \n\t"
267  "add %%"REG_a", %2 \n\t"
268  "subl $4, %0 \n\t"
269  "jnz 1b \n\t"
270  :"+g"(h), "+S"(pixels), "+D"(block)
271  :"r" ((x86_reg)line_size)
272  :"%"REG_a, "memory");
273 }
274 
275 #ifndef SKIP_FOR_3DNOW
276 static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
277 {
278  __asm__ volatile(
279  "testl $1, %0 \n\t"
280  " jz 1f \n\t"
281  "movq (%1), %%mm0 \n\t"
282  "movq 8(%1), %%mm1 \n\t"
283  PAVGB" (%2), %%mm0 \n\t"
284  PAVGB" 8(%2), %%mm1 \n\t"
285  "add %4, %1 \n\t"
286  "add $16, %2 \n\t"
287  "movq %%mm0, (%3) \n\t"
288  "movq %%mm1, 8(%3) \n\t"
289  "add %5, %3 \n\t"
290  "decl %0 \n\t"
291  "1: \n\t"
292  "movq (%1), %%mm0 \n\t"
293  "movq 8(%1), %%mm1 \n\t"
294  "add %4, %1 \n\t"
295  PAVGB" (%2), %%mm0 \n\t"
296  PAVGB" 8(%2), %%mm1 \n\t"
297  "movq %%mm0, (%3) \n\t"
298  "movq %%mm1, 8(%3) \n\t"
299  "add %5, %3 \n\t"
300  "movq (%1), %%mm0 \n\t"
301  "movq 8(%1), %%mm1 \n\t"
302  "add %4, %1 \n\t"
303  PAVGB" 16(%2), %%mm0 \n\t"
304  PAVGB" 24(%2), %%mm1 \n\t"
305  "movq %%mm0, (%3) \n\t"
306  "movq %%mm1, 8(%3) \n\t"
307  "add %5, %3 \n\t"
308  "add $32, %2 \n\t"
309  "subl $2, %0 \n\t"
310  "jnz 1b \n\t"
311 #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
312  :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
313 #else
314  :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
315 #endif
316  :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
317  :"memory");
318 //the following should be used, though better not with gcc ...
319 /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
320  :"r"(src1Stride), "r"(dstStride)
321  :"memory");*/
322 }
323 
324 static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
325 {
326  __asm__ volatile(
327  "testl $1, %0 \n\t"
328  " jz 1f \n\t"
329  "movq (%1), %%mm0 \n\t"
330  "movq 8(%1), %%mm1 \n\t"
331  PAVGB" (%2), %%mm0 \n\t"
332  PAVGB" 8(%2), %%mm1 \n\t"
333  "add %4, %1 \n\t"
334  "add $16, %2 \n\t"
335  PAVGB" (%3), %%mm0 \n\t"
336  PAVGB" 8(%3), %%mm1 \n\t"
337  "movq %%mm0, (%3) \n\t"
338  "movq %%mm1, 8(%3) \n\t"
339  "add %5, %3 \n\t"
340  "decl %0 \n\t"
341  "1: \n\t"
342  "movq (%1), %%mm0 \n\t"
343  "movq 8(%1), %%mm1 \n\t"
344  "add %4, %1 \n\t"
345  PAVGB" (%2), %%mm0 \n\t"
346  PAVGB" 8(%2), %%mm1 \n\t"
347  PAVGB" (%3), %%mm0 \n\t"
348  PAVGB" 8(%3), %%mm1 \n\t"
349  "movq %%mm0, (%3) \n\t"
350  "movq %%mm1, 8(%3) \n\t"
351  "add %5, %3 \n\t"
352  "movq (%1), %%mm0 \n\t"
353  "movq 8(%1), %%mm1 \n\t"
354  "add %4, %1 \n\t"
355  PAVGB" 16(%2), %%mm0 \n\t"
356  PAVGB" 24(%2), %%mm1 \n\t"
357  PAVGB" (%3), %%mm0 \n\t"
358  PAVGB" 8(%3), %%mm1 \n\t"
359  "movq %%mm0, (%3) \n\t"
360  "movq %%mm1, 8(%3) \n\t"
361  "add %5, %3 \n\t"
362  "add $32, %2 \n\t"
363  "subl $2, %0 \n\t"
364  "jnz 1b \n\t"
365 #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
366  :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
367 #else
368  :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
369 #endif
370  :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
371  :"memory");
372 //the following should be used, though better not with gcc ...
373 /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
374  :"r"(src1Stride), "r"(dstStride)
375  :"memory");*/
376 }
377 
378 static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
379 {
380  __asm__ volatile(
381  "pcmpeqb %%mm6, %%mm6 \n\t"
382  "testl $1, %0 \n\t"
383  " jz 1f \n\t"
384  "movq (%1), %%mm0 \n\t"
385  "movq 8(%1), %%mm1 \n\t"
386  "movq (%2), %%mm2 \n\t"
387  "movq 8(%2), %%mm3 \n\t"
388  "pxor %%mm6, %%mm0 \n\t"
389  "pxor %%mm6, %%mm1 \n\t"
390  "pxor %%mm6, %%mm2 \n\t"
391  "pxor %%mm6, %%mm3 \n\t"
392  PAVGB" %%mm2, %%mm0 \n\t"
393  PAVGB" %%mm3, %%mm1 \n\t"
394  "pxor %%mm6, %%mm0 \n\t"
395  "pxor %%mm6, %%mm1 \n\t"
396  "add %4, %1 \n\t"
397  "add $16, %2 \n\t"
398  "movq %%mm0, (%3) \n\t"
399  "movq %%mm1, 8(%3) \n\t"
400  "add %5, %3 \n\t"
401  "decl %0 \n\t"
402  "1: \n\t"
403  "movq (%1), %%mm0 \n\t"
404  "movq 8(%1), %%mm1 \n\t"
405  "add %4, %1 \n\t"
406  "movq (%2), %%mm2 \n\t"
407  "movq 8(%2), %%mm3 \n\t"
408  "pxor %%mm6, %%mm0 \n\t"
409  "pxor %%mm6, %%mm1 \n\t"
410  "pxor %%mm6, %%mm2 \n\t"
411  "pxor %%mm6, %%mm3 \n\t"
412  PAVGB" %%mm2, %%mm0 \n\t"
413  PAVGB" %%mm3, %%mm1 \n\t"
414  "pxor %%mm6, %%mm0 \n\t"
415  "pxor %%mm6, %%mm1 \n\t"
416  "movq %%mm0, (%3) \n\t"
417  "movq %%mm1, 8(%3) \n\t"
418  "add %5, %3 \n\t"
419  "movq (%1), %%mm0 \n\t"
420  "movq 8(%1), %%mm1 \n\t"
421  "add %4, %1 \n\t"
422  "movq 16(%2), %%mm2 \n\t"
423  "movq 24(%2), %%mm3 \n\t"
424  "pxor %%mm6, %%mm0 \n\t"
425  "pxor %%mm6, %%mm1 \n\t"
426  "pxor %%mm6, %%mm2 \n\t"
427  "pxor %%mm6, %%mm3 \n\t"
428  PAVGB" %%mm2, %%mm0 \n\t"
429  PAVGB" %%mm3, %%mm1 \n\t"
430  "pxor %%mm6, %%mm0 \n\t"
431  "pxor %%mm6, %%mm1 \n\t"
432  "movq %%mm0, (%3) \n\t"
433  "movq %%mm1, 8(%3) \n\t"
434  "add %5, %3 \n\t"
435  "add $32, %2 \n\t"
436  "subl $2, %0 \n\t"
437  "jnz 1b \n\t"
438 #if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used
439  :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst)
440 #else
441  :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst)
442 #endif
443  :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride)
444  :"memory");
445 //the following should be used, though better not with gcc ...
446 /* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst)
447  :"r"(src1Stride), "r"(dstStride)
448  :"memory");*/
449 }
450 #endif /* SKIP_FOR_3DNOW */
451 
452 /* GL: this function does incorrect rounding if overflow */
453 static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
454 {
455  MOVQ_BONE(mm6);
456  __asm__ volatile(
457  "lea (%3, %3), %%"REG_a" \n\t"
458  "1: \n\t"
459  "movq (%1), %%mm0 \n\t"
460  "movq (%1, %3), %%mm2 \n\t"
461  "movq 1(%1), %%mm1 \n\t"
462  "movq 1(%1, %3), %%mm3 \n\t"
463  "add %%"REG_a", %1 \n\t"
464  "psubusb %%mm6, %%mm0 \n\t"
465  "psubusb %%mm6, %%mm2 \n\t"
466  PAVGB" %%mm1, %%mm0 \n\t"
467  PAVGB" %%mm3, %%mm2 \n\t"
468  "movq %%mm0, (%2) \n\t"
469  "movq %%mm2, (%2, %3) \n\t"
470  "movq (%1), %%mm0 \n\t"
471  "movq 1(%1), %%mm1 \n\t"
472  "movq (%1, %3), %%mm2 \n\t"
473  "movq 1(%1, %3), %%mm3 \n\t"
474  "add %%"REG_a", %2 \n\t"
475  "add %%"REG_a", %1 \n\t"
476  "psubusb %%mm6, %%mm0 \n\t"
477  "psubusb %%mm6, %%mm2 \n\t"
478  PAVGB" %%mm1, %%mm0 \n\t"
479  PAVGB" %%mm3, %%mm2 \n\t"
480  "movq %%mm0, (%2) \n\t"
481  "movq %%mm2, (%2, %3) \n\t"
482  "add %%"REG_a", %2 \n\t"
483  "subl $4, %0 \n\t"
484  "jnz 1b \n\t"
485  :"+g"(h), "+S"(pixels), "+D"(block)
486  :"r" ((x86_reg)line_size)
487  :"%"REG_a, "memory");
488 }
489 
490 static void DEF(put_no_rnd_pixels8_x2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
491 {
492  __asm__ volatile (
493  "pcmpeqb %%mm6, %%mm6 \n\t"
494  "1: \n\t"
495  "movq (%1), %%mm0 \n\t"
496  "movq (%1, %3), %%mm2 \n\t"
497  "movq 1(%1), %%mm1 \n\t"
498  "movq 1(%1, %3), %%mm3 \n\t"
499  "pxor %%mm6, %%mm0 \n\t"
500  "pxor %%mm6, %%mm2 \n\t"
501  "pxor %%mm6, %%mm1 \n\t"
502  "pxor %%mm6, %%mm3 \n\t"
503  PAVGB" %%mm1, %%mm0 \n\t"
504  PAVGB" %%mm3, %%mm2 \n\t"
505  "pxor %%mm6, %%mm0 \n\t"
506  "pxor %%mm6, %%mm2 \n\t"
507  "movq %%mm0, (%2) \n\t"
508  "movq %%mm2, (%2, %3) \n\t"
509  "movq (%1, %3,2), %%mm0 \n\t"
510  "movq 1(%1, %3,2), %%mm1 \n\t"
511  "movq (%1, %4), %%mm2 \n\t"
512  "movq 1(%1, %4), %%mm3 \n\t"
513  "pxor %%mm6, %%mm0 \n\t"
514  "pxor %%mm6, %%mm1 \n\t"
515  "pxor %%mm6, %%mm2 \n\t"
516  "pxor %%mm6, %%mm3 \n\t"
517  PAVGB" %%mm1, %%mm0 \n\t"
518  PAVGB" %%mm3, %%mm2 \n\t"
519  "pxor %%mm6, %%mm0 \n\t"
520  "pxor %%mm6, %%mm2 \n\t"
521  "movq %%mm0, (%2, %3,2) \n\t"
522  "movq %%mm2, (%2, %4) \n\t"
523  "lea (%1, %3,4), %1 \n\t"
524  "lea (%2, %3,4), %2 \n\t"
525  "subl $4, %0 \n\t"
526  "jg 1b \n\t"
527  : "+g"(h), "+r"(pixels), "+r"(block)
528  : "r" ((x86_reg)line_size), "r"((x86_reg)3*line_size)
529  : "memory"
530  );
531 }
532 
533 static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
534 {
535  __asm__ volatile(
536  "lea (%3, %3), %%"REG_a" \n\t"
537  "movq (%1), %%mm0 \n\t"
538  "sub %3, %2 \n\t"
539  "1: \n\t"
540  "movq (%1, %3), %%mm1 \n\t"
541  "movq (%1, %%"REG_a"), %%mm2 \n\t"
542  "add %%"REG_a", %1 \n\t"
543  PAVGB" %%mm1, %%mm0 \n\t"
544  PAVGB" %%mm2, %%mm1 \n\t"
545  "movq %%mm0, (%2, %3) \n\t"
546  "movq %%mm1, (%2, %%"REG_a") \n\t"
547  "movq (%1, %3), %%mm1 \n\t"
548  "movq (%1, %%"REG_a"), %%mm0 \n\t"
549  "add %%"REG_a", %2 \n\t"
550  "add %%"REG_a", %1 \n\t"
551  PAVGB" %%mm1, %%mm2 \n\t"
552  PAVGB" %%mm0, %%mm1 \n\t"
553  "movq %%mm2, (%2, %3) \n\t"
554  "movq %%mm1, (%2, %%"REG_a") \n\t"
555  "add %%"REG_a", %2 \n\t"
556  "subl $4, %0 \n\t"
557  "jnz 1b \n\t"
558  :"+g"(h), "+S"(pixels), "+D" (block)
559  :"r" ((x86_reg)line_size)
560  :"%"REG_a, "memory");
561 }
562 
563 /* GL: this function does incorrect rounding if overflow */
564 static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
565 {
566  MOVQ_BONE(mm6);
567  __asm__ volatile(
568  "lea (%3, %3), %%"REG_a" \n\t"
569  "movq (%1), %%mm0 \n\t"
570  "sub %3, %2 \n\t"
571  "1: \n\t"
572  "movq (%1, %3), %%mm1 \n\t"
573  "movq (%1, %%"REG_a"), %%mm2 \n\t"
574  "add %%"REG_a", %1 \n\t"
575  "psubusb %%mm6, %%mm1 \n\t"
576  PAVGB" %%mm1, %%mm0 \n\t"
577  PAVGB" %%mm2, %%mm1 \n\t"
578  "movq %%mm0, (%2, %3) \n\t"
579  "movq %%mm1, (%2, %%"REG_a") \n\t"
580  "movq (%1, %3), %%mm1 \n\t"
581  "movq (%1, %%"REG_a"), %%mm0 \n\t"
582  "add %%"REG_a", %2 \n\t"
583  "add %%"REG_a", %1 \n\t"
584  "psubusb %%mm6, %%mm1 \n\t"
585  PAVGB" %%mm1, %%mm2 \n\t"
586  PAVGB" %%mm0, %%mm1 \n\t"
587  "movq %%mm2, (%2, %3) \n\t"
588  "movq %%mm1, (%2, %%"REG_a") \n\t"
589  "add %%"REG_a", %2 \n\t"
590  "subl $4, %0 \n\t"
591  "jnz 1b \n\t"
592  :"+g"(h), "+S"(pixels), "+D" (block)
593  :"r" ((x86_reg)line_size)
594  :"%"REG_a, "memory");
595 }
596 
597 static void DEF(put_no_rnd_pixels8_y2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
598 {
599  __asm__ volatile (
600  "movq (%1), %%mm0 \n\t"
601  "pcmpeqb %%mm6, %%mm6 \n\t"
602  "add %3, %1 \n\t"
603  "pxor %%mm6, %%mm0 \n\t"
604  "1: \n\t"
605  "movq (%1), %%mm1 \n\t"
606  "movq (%1, %3), %%mm2 \n\t"
607  "pxor %%mm6, %%mm1 \n\t"
608  "pxor %%mm6, %%mm2 \n\t"
609  PAVGB" %%mm1, %%mm0 \n\t"
610  PAVGB" %%mm2, %%mm1 \n\t"
611  "pxor %%mm6, %%mm0 \n\t"
612  "pxor %%mm6, %%mm1 \n\t"
613  "movq %%mm0, (%2) \n\t"
614  "movq %%mm1, (%2, %3) \n\t"
615  "movq (%1, %3,2), %%mm1 \n\t"
616  "movq (%1, %4), %%mm0 \n\t"
617  "pxor %%mm6, %%mm1 \n\t"
618  "pxor %%mm6, %%mm0 \n\t"
619  PAVGB" %%mm1, %%mm2 \n\t"
620  PAVGB" %%mm0, %%mm1 \n\t"
621  "pxor %%mm6, %%mm2 \n\t"
622  "pxor %%mm6, %%mm1 \n\t"
623  "movq %%mm2, (%2, %3,2) \n\t"
624  "movq %%mm1, (%2, %4) \n\t"
625  "lea (%1, %3,4), %1 \n\t"
626  "lea (%2, %3,4), %2 \n\t"
627  "subl $4, %0 \n\t"
628  "jg 1b \n\t"
629  :"+g"(h), "+r"(pixels), "+r" (block)
630  :"r" ((x86_reg)line_size), "r"((x86_reg)3*line_size)
631  :"memory"
632  );
633 }
634 
635 static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
636 {
637  __asm__ volatile(
638  "lea (%3, %3), %%"REG_a" \n\t"
639  "1: \n\t"
640  "movq (%2), %%mm0 \n\t"
641  "movq (%2, %3), %%mm1 \n\t"
642  PAVGB" (%1), %%mm0 \n\t"
643  PAVGB" (%1, %3), %%mm1 \n\t"
644  "movq %%mm0, (%2) \n\t"
645  "movq %%mm1, (%2, %3) \n\t"
646  "add %%"REG_a", %1 \n\t"
647  "add %%"REG_a", %2 \n\t"
648  "movq (%2), %%mm0 \n\t"
649  "movq (%2, %3), %%mm1 \n\t"
650  PAVGB" (%1), %%mm0 \n\t"
651  PAVGB" (%1, %3), %%mm1 \n\t"
652  "add %%"REG_a", %1 \n\t"
653  "movq %%mm0, (%2) \n\t"
654  "movq %%mm1, (%2, %3) \n\t"
655  "add %%"REG_a", %2 \n\t"
656  "subl $4, %0 \n\t"
657  "jnz 1b \n\t"
658  :"+g"(h), "+S"(pixels), "+D"(block)
659  :"r" ((x86_reg)line_size)
660  :"%"REG_a, "memory");
661 }
662 
663 static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
664 {
665  __asm__ volatile(
666  "lea (%3, %3), %%"REG_a" \n\t"
667  "1: \n\t"
668  "movq (%1), %%mm0 \n\t"
669  "movq (%1, %3), %%mm2 \n\t"
670  PAVGB" 1(%1), %%mm0 \n\t"
671  PAVGB" 1(%1, %3), %%mm2 \n\t"
672  PAVGB" (%2), %%mm0 \n\t"
673  PAVGB" (%2, %3), %%mm2 \n\t"
674  "add %%"REG_a", %1 \n\t"
675  "movq %%mm0, (%2) \n\t"
676  "movq %%mm2, (%2, %3) \n\t"
677  "movq (%1), %%mm0 \n\t"
678  "movq (%1, %3), %%mm2 \n\t"
679  PAVGB" 1(%1), %%mm0 \n\t"
680  PAVGB" 1(%1, %3), %%mm2 \n\t"
681  "add %%"REG_a", %2 \n\t"
682  "add %%"REG_a", %1 \n\t"
683  PAVGB" (%2), %%mm0 \n\t"
684  PAVGB" (%2, %3), %%mm2 \n\t"
685  "movq %%mm0, (%2) \n\t"
686  "movq %%mm2, (%2, %3) \n\t"
687  "add %%"REG_a", %2 \n\t"
688  "subl $4, %0 \n\t"
689  "jnz 1b \n\t"
690  :"+g"(h), "+S"(pixels), "+D"(block)
691  :"r" ((x86_reg)line_size)
692  :"%"REG_a, "memory");
693 }
694 
695 static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
696 {
697  __asm__ volatile(
698  "lea (%3, %3), %%"REG_a" \n\t"
699  "movq (%1), %%mm0 \n\t"
700  "sub %3, %2 \n\t"
701  "1: \n\t"
702  "movq (%1, %3), %%mm1 \n\t"
703  "movq (%1, %%"REG_a"), %%mm2 \n\t"
704  "add %%"REG_a", %1 \n\t"
705  PAVGB" %%mm1, %%mm0 \n\t"
706  PAVGB" %%mm2, %%mm1 \n\t"
707  "movq (%2, %3), %%mm3 \n\t"
708  "movq (%2, %%"REG_a"), %%mm4 \n\t"
709  PAVGB" %%mm3, %%mm0 \n\t"
710  PAVGB" %%mm4, %%mm1 \n\t"
711  "movq %%mm0, (%2, %3) \n\t"
712  "movq %%mm1, (%2, %%"REG_a") \n\t"
713  "movq (%1, %3), %%mm1 \n\t"
714  "movq (%1, %%"REG_a"), %%mm0 \n\t"
715  PAVGB" %%mm1, %%mm2 \n\t"
716  PAVGB" %%mm0, %%mm1 \n\t"
717  "add %%"REG_a", %2 \n\t"
718  "add %%"REG_a", %1 \n\t"
719  "movq (%2, %3), %%mm3 \n\t"
720  "movq (%2, %%"REG_a"), %%mm4 \n\t"
721  PAVGB" %%mm3, %%mm2 \n\t"
722  PAVGB" %%mm4, %%mm1 \n\t"
723  "movq %%mm2, (%2, %3) \n\t"
724  "movq %%mm1, (%2, %%"REG_a") \n\t"
725  "add %%"REG_a", %2 \n\t"
726  "subl $4, %0 \n\t"
727  "jnz 1b \n\t"
728  :"+g"(h), "+S"(pixels), "+D"(block)
729  :"r" ((x86_reg)line_size)
730  :"%"REG_a, "memory");
731 }
732 
733 /* Note this is not correctly rounded, but this function is only
734  * used for B-frames so it does not matter. */
735 static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
736 {
737  MOVQ_BONE(mm6);
738  __asm__ volatile(
739  "lea (%3, %3), %%"REG_a" \n\t"
740  "movq (%1), %%mm0 \n\t"
741  PAVGB" 1(%1), %%mm0 \n\t"
742  ".p2align 3 \n\t"
743  "1: \n\t"
744  "movq (%1, %%"REG_a"), %%mm2 \n\t"
745  "movq (%1, %3), %%mm1 \n\t"
746  "psubusb %%mm6, %%mm2 \n\t"
747  PAVGB" 1(%1, %3), %%mm1 \n\t"
748  PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t"
749  "add %%"REG_a", %1 \n\t"
750  PAVGB" %%mm1, %%mm0 \n\t"
751  PAVGB" %%mm2, %%mm1 \n\t"
752  PAVGB" (%2), %%mm0 \n\t"
753  PAVGB" (%2, %3), %%mm1 \n\t"
754  "movq %%mm0, (%2) \n\t"
755  "movq %%mm1, (%2, %3) \n\t"
756  "movq (%1, %3), %%mm1 \n\t"
757  "movq (%1, %%"REG_a"), %%mm0 \n\t"
758  PAVGB" 1(%1, %3), %%mm1 \n\t"
759  PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t"
760  "add %%"REG_a", %2 \n\t"
761  "add %%"REG_a", %1 \n\t"
762  PAVGB" %%mm1, %%mm2 \n\t"
763  PAVGB" %%mm0, %%mm1 \n\t"
764  PAVGB" (%2), %%mm2 \n\t"
765  PAVGB" (%2, %3), %%mm1 \n\t"
766  "movq %%mm2, (%2) \n\t"
767  "movq %%mm1, (%2, %3) \n\t"
768  "add %%"REG_a", %2 \n\t"
769  "subl $4, %0 \n\t"
770  "jnz 1b \n\t"
771  :"+g"(h), "+S"(pixels), "+D"(block)
772  :"r" ((x86_reg)line_size)
773  :"%"REG_a, "memory");
774 }
775 
776 //FIXME the following could be optimized too ...
777 static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
778  DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h);
779  DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h);
780 }
781 static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
782  DEF(put_pixels8_y2)(block , pixels , line_size, h);
783  DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h);
784 }
785 static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
786  DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h);
787  DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h);
788 }
789 static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
790  DEF(avg_pixels8)(block , pixels , line_size, h);
791  DEF(avg_pixels8)(block+8, pixels+8, line_size, h);
792 }
793 static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
794  DEF(avg_pixels8_x2)(block , pixels , line_size, h);
795  DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h);
796 }
797 static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
798  DEF(avg_pixels8_y2)(block , pixels , line_size, h);
799  DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h);
800 }
801 static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){
802  DEF(avg_pixels8_xy2)(block , pixels , line_size, h);
803  DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h);
804 }
805 
806 #define QPEL_2TAP_L3(OPNAME) \
807 static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\
808  __asm__ volatile(\
809  "1: \n\t"\
810  "movq (%1,%2), %%mm0 \n\t"\
811  "movq 8(%1,%2), %%mm1 \n\t"\
812  PAVGB" (%1,%3), %%mm0 \n\t"\
813  PAVGB" 8(%1,%3), %%mm1 \n\t"\
814  PAVGB" (%1), %%mm0 \n\t"\
815  PAVGB" 8(%1), %%mm1 \n\t"\
816  STORE_OP( (%1,%4),%%mm0)\
817  STORE_OP(8(%1,%4),%%mm1)\
818  "movq %%mm0, (%1,%4) \n\t"\
819  "movq %%mm1, 8(%1,%4) \n\t"\
820  "add %5, %1 \n\t"\
821  "decl %0 \n\t"\
822  "jnz 1b \n\t"\
823  :"+g"(h), "+r"(src)\
824  :"r"((x86_reg)off1), "r"((x86_reg)off2),\
825  "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\
826  :"memory"\
827  );\
828 }\
829 static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\
830  __asm__ volatile(\
831  "1: \n\t"\
832  "movq (%1,%2), %%mm0 \n\t"\
833  PAVGB" (%1,%3), %%mm0 \n\t"\
834  PAVGB" (%1), %%mm0 \n\t"\
835  STORE_OP((%1,%4),%%mm0)\
836  "movq %%mm0, (%1,%4) \n\t"\
837  "add %5, %1 \n\t"\
838  "decl %0 \n\t"\
839  "jnz 1b \n\t"\
840  :"+g"(h), "+r"(src)\
841  :"r"((x86_reg)off1), "r"((x86_reg)off2),\
842  "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\
843  :"memory"\
844  );\
845 }
846 
847 #ifndef SKIP_FOR_3DNOW
848 #define STORE_OP(a,b) PAVGB" "#a","#b" \n\t"
849 QPEL_2TAP_L3(avg_)
850 #undef STORE_OP
851 #define STORE_OP(a,b)
852 QPEL_2TAP_L3(put_)
853 #undef STORE_OP
854 #undef QPEL_2TAP_L3
855 #endif /* SKIP_FOR_3DNOW */