FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dsputilenc_mmx.c
Go to the documentation of this file.
1 /*
2  * MMX optimized DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  *
22  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23  */
24 
25 #include "libavutil/cpu.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/dsputil.h"
29 #include "libavcodec/mpegvideo.h"
30 #include "libavcodec/mathops.h"
31 #include "dsputil_mmx.h"
32 
33 
34 #if HAVE_INLINE_ASM
35 
36 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
37 {
38  __asm__ volatile(
39  "mov $-128, %%"REG_a" \n\t"
40  "pxor %%mm7, %%mm7 \n\t"
41  ".p2align 4 \n\t"
42  "1: \n\t"
43  "movq (%0), %%mm0 \n\t"
44  "movq (%0, %2), %%mm2 \n\t"
45  "movq %%mm0, %%mm1 \n\t"
46  "movq %%mm2, %%mm3 \n\t"
47  "punpcklbw %%mm7, %%mm0 \n\t"
48  "punpckhbw %%mm7, %%mm1 \n\t"
49  "punpcklbw %%mm7, %%mm2 \n\t"
50  "punpckhbw %%mm7, %%mm3 \n\t"
51  "movq %%mm0, (%1, %%"REG_a") \n\t"
52  "movq %%mm1, 8(%1, %%"REG_a") \n\t"
53  "movq %%mm2, 16(%1, %%"REG_a") \n\t"
54  "movq %%mm3, 24(%1, %%"REG_a") \n\t"
55  "add %3, %0 \n\t"
56  "add $32, %%"REG_a" \n\t"
57  "js 1b \n\t"
58  : "+r" (pixels)
59  : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
60  : "%"REG_a
61  );
62 }
63 
64 static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
65 {
66  __asm__ volatile(
67  "pxor %%xmm4, %%xmm4 \n\t"
68  "movq (%0), %%xmm0 \n\t"
69  "movq (%0, %2), %%xmm1 \n\t"
70  "movq (%0, %2,2), %%xmm2 \n\t"
71  "movq (%0, %3), %%xmm3 \n\t"
72  "lea (%0,%2,4), %0 \n\t"
73  "punpcklbw %%xmm4, %%xmm0 \n\t"
74  "punpcklbw %%xmm4, %%xmm1 \n\t"
75  "punpcklbw %%xmm4, %%xmm2 \n\t"
76  "punpcklbw %%xmm4, %%xmm3 \n\t"
77  "movdqa %%xmm0, (%1) \n\t"
78  "movdqa %%xmm1, 16(%1) \n\t"
79  "movdqa %%xmm2, 32(%1) \n\t"
80  "movdqa %%xmm3, 48(%1) \n\t"
81  "movq (%0), %%xmm0 \n\t"
82  "movq (%0, %2), %%xmm1 \n\t"
83  "movq (%0, %2,2), %%xmm2 \n\t"
84  "movq (%0, %3), %%xmm3 \n\t"
85  "punpcklbw %%xmm4, %%xmm0 \n\t"
86  "punpcklbw %%xmm4, %%xmm1 \n\t"
87  "punpcklbw %%xmm4, %%xmm2 \n\t"
88  "punpcklbw %%xmm4, %%xmm3 \n\t"
89  "movdqa %%xmm0, 64(%1) \n\t"
90  "movdqa %%xmm1, 80(%1) \n\t"
91  "movdqa %%xmm2, 96(%1) \n\t"
92  "movdqa %%xmm3, 112(%1) \n\t"
93  : "+r" (pixels)
94  : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3)
95  );
96 }
97 
98 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
99 {
100  __asm__ volatile(
101  "pxor %%mm7, %%mm7 \n\t"
102  "mov $-128, %%"REG_a" \n\t"
103  ".p2align 4 \n\t"
104  "1: \n\t"
105  "movq (%0), %%mm0 \n\t"
106  "movq (%1), %%mm2 \n\t"
107  "movq %%mm0, %%mm1 \n\t"
108  "movq %%mm2, %%mm3 \n\t"
109  "punpcklbw %%mm7, %%mm0 \n\t"
110  "punpckhbw %%mm7, %%mm1 \n\t"
111  "punpcklbw %%mm7, %%mm2 \n\t"
112  "punpckhbw %%mm7, %%mm3 \n\t"
113  "psubw %%mm2, %%mm0 \n\t"
114  "psubw %%mm3, %%mm1 \n\t"
115  "movq %%mm0, (%2, %%"REG_a") \n\t"
116  "movq %%mm1, 8(%2, %%"REG_a") \n\t"
117  "add %3, %0 \n\t"
118  "add %3, %1 \n\t"
119  "add $16, %%"REG_a" \n\t"
120  "jnz 1b \n\t"
121  : "+r" (s1), "+r" (s2)
122  : "r" (block+64), "r" ((x86_reg)stride)
123  : "%"REG_a
124  );
125 }
126 
127 static int pix_sum16_mmx(uint8_t * pix, int line_size){
128  const int h=16;
129  int sum;
130  x86_reg index= -line_size*h;
131 
132  __asm__ volatile(
133  "pxor %%mm7, %%mm7 \n\t"
134  "pxor %%mm6, %%mm6 \n\t"
135  "1: \n\t"
136  "movq (%2, %1), %%mm0 \n\t"
137  "movq (%2, %1), %%mm1 \n\t"
138  "movq 8(%2, %1), %%mm2 \n\t"
139  "movq 8(%2, %1), %%mm3 \n\t"
140  "punpcklbw %%mm7, %%mm0 \n\t"
141  "punpckhbw %%mm7, %%mm1 \n\t"
142  "punpcklbw %%mm7, %%mm2 \n\t"
143  "punpckhbw %%mm7, %%mm3 \n\t"
144  "paddw %%mm0, %%mm1 \n\t"
145  "paddw %%mm2, %%mm3 \n\t"
146  "paddw %%mm1, %%mm3 \n\t"
147  "paddw %%mm3, %%mm6 \n\t"
148  "add %3, %1 \n\t"
149  " js 1b \n\t"
150  "movq %%mm6, %%mm5 \n\t"
151  "psrlq $32, %%mm6 \n\t"
152  "paddw %%mm5, %%mm6 \n\t"
153  "movq %%mm6, %%mm5 \n\t"
154  "psrlq $16, %%mm6 \n\t"
155  "paddw %%mm5, %%mm6 \n\t"
156  "movd %%mm6, %0 \n\t"
157  "andl $0xFFFF, %0 \n\t"
158  : "=&r" (sum), "+r" (index)
159  : "r" (pix - index), "r" ((x86_reg)line_size)
160  );
161 
162  return sum;
163 }
164 
165 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
166  int tmp;
167  __asm__ volatile (
168  "movl $16,%%ecx\n"
169  "pxor %%mm0,%%mm0\n"
170  "pxor %%mm7,%%mm7\n"
171  "1:\n"
172  "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
173  "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
174 
175  "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
176 
177  "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
178  "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
179 
180  "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
181  "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
182  "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
183 
184  "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
185  "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
186 
187  "pmaddwd %%mm3,%%mm3\n"
188  "pmaddwd %%mm4,%%mm4\n"
189 
190  "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
191  pix2^2+pix3^2+pix6^2+pix7^2) */
192  "paddd %%mm3,%%mm4\n"
193  "paddd %%mm2,%%mm7\n"
194 
195  "add %2, %0\n"
196  "paddd %%mm4,%%mm7\n"
197  "dec %%ecx\n"
198  "jnz 1b\n"
199 
200  "movq %%mm7,%%mm1\n"
201  "psrlq $32, %%mm7\n" /* shift hi dword to lo */
202  "paddd %%mm7,%%mm1\n"
203  "movd %%mm1,%1\n"
204  : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
205  return tmp;
206 }
207 
208 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
209  int tmp;
210  __asm__ volatile (
211  "movl %4,%%ecx\n"
212  "shr $1,%%ecx\n"
213  "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
214  "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
215  "1:\n"
216  "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
217  "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
218  "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
219  "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
220 
221  /* todo: mm1-mm2, mm3-mm4 */
222  /* algo: subtract mm1 from mm2 with saturation and vice versa */
223  /* OR the results to get absolute difference */
224  "movq %%mm1,%%mm5\n"
225  "movq %%mm3,%%mm6\n"
226  "psubusb %%mm2,%%mm1\n"
227  "psubusb %%mm4,%%mm3\n"
228  "psubusb %%mm5,%%mm2\n"
229  "psubusb %%mm6,%%mm4\n"
230 
231  "por %%mm1,%%mm2\n"
232  "por %%mm3,%%mm4\n"
233 
234  /* now convert to 16-bit vectors so we can square them */
235  "movq %%mm2,%%mm1\n"
236  "movq %%mm4,%%mm3\n"
237 
238  "punpckhbw %%mm0,%%mm2\n"
239  "punpckhbw %%mm0,%%mm4\n"
240  "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
241  "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
242 
243  "pmaddwd %%mm2,%%mm2\n"
244  "pmaddwd %%mm4,%%mm4\n"
245  "pmaddwd %%mm1,%%mm1\n"
246  "pmaddwd %%mm3,%%mm3\n"
247 
248  "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
249  "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
250 
251  "paddd %%mm2,%%mm1\n"
252  "paddd %%mm4,%%mm3\n"
253  "paddd %%mm1,%%mm7\n"
254  "paddd %%mm3,%%mm7\n"
255 
256  "decl %%ecx\n"
257  "jnz 1b\n"
258 
259  "movq %%mm7,%%mm1\n"
260  "psrlq $32, %%mm7\n" /* shift hi dword to lo */
261  "paddd %%mm7,%%mm1\n"
262  "movd %%mm1,%2\n"
263  : "+r" (pix1), "+r" (pix2), "=r"(tmp)
264  : "r" ((x86_reg)line_size) , "m" (h)
265  : "%ecx");
266  return tmp;
267 }
268 
269 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
270  int tmp;
271  __asm__ volatile (
272  "movl %4,%%ecx\n"
273  "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
274  "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
275  "1:\n"
276  "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
277  "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
278  "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
279  "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
280 
281  /* todo: mm1-mm2, mm3-mm4 */
282  /* algo: subtract mm1 from mm2 with saturation and vice versa */
283  /* OR the results to get absolute difference */
284  "movq %%mm1,%%mm5\n"
285  "movq %%mm3,%%mm6\n"
286  "psubusb %%mm2,%%mm1\n"
287  "psubusb %%mm4,%%mm3\n"
288  "psubusb %%mm5,%%mm2\n"
289  "psubusb %%mm6,%%mm4\n"
290 
291  "por %%mm1,%%mm2\n"
292  "por %%mm3,%%mm4\n"
293 
294  /* now convert to 16-bit vectors so we can square them */
295  "movq %%mm2,%%mm1\n"
296  "movq %%mm4,%%mm3\n"
297 
298  "punpckhbw %%mm0,%%mm2\n"
299  "punpckhbw %%mm0,%%mm4\n"
300  "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
301  "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
302 
303  "pmaddwd %%mm2,%%mm2\n"
304  "pmaddwd %%mm4,%%mm4\n"
305  "pmaddwd %%mm1,%%mm1\n"
306  "pmaddwd %%mm3,%%mm3\n"
307 
308  "add %3,%0\n"
309  "add %3,%1\n"
310 
311  "paddd %%mm2,%%mm1\n"
312  "paddd %%mm4,%%mm3\n"
313  "paddd %%mm1,%%mm7\n"
314  "paddd %%mm3,%%mm7\n"
315 
316  "decl %%ecx\n"
317  "jnz 1b\n"
318 
319  "movq %%mm7,%%mm1\n"
320  "psrlq $32, %%mm7\n" /* shift hi dword to lo */
321  "paddd %%mm7,%%mm1\n"
322  "movd %%mm1,%2\n"
323  : "+r" (pix1), "+r" (pix2), "=r"(tmp)
324  : "r" ((x86_reg)line_size) , "m" (h)
325  : "%ecx");
326  return tmp;
327 }
328 
329 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
330  int tmp;
331  __asm__ volatile (
332  "movl %3,%%ecx\n"
333  "pxor %%mm7,%%mm7\n"
334  "pxor %%mm6,%%mm6\n"
335 
336  "movq (%0),%%mm0\n"
337  "movq %%mm0, %%mm1\n"
338  "psllq $8, %%mm0\n"
339  "psrlq $8, %%mm1\n"
340  "psrlq $8, %%mm0\n"
341  "movq %%mm0, %%mm2\n"
342  "movq %%mm1, %%mm3\n"
343  "punpcklbw %%mm7,%%mm0\n"
344  "punpcklbw %%mm7,%%mm1\n"
345  "punpckhbw %%mm7,%%mm2\n"
346  "punpckhbw %%mm7,%%mm3\n"
347  "psubw %%mm1, %%mm0\n"
348  "psubw %%mm3, %%mm2\n"
349 
350  "add %2,%0\n"
351 
352  "movq (%0),%%mm4\n"
353  "movq %%mm4, %%mm1\n"
354  "psllq $8, %%mm4\n"
355  "psrlq $8, %%mm1\n"
356  "psrlq $8, %%mm4\n"
357  "movq %%mm4, %%mm5\n"
358  "movq %%mm1, %%mm3\n"
359  "punpcklbw %%mm7,%%mm4\n"
360  "punpcklbw %%mm7,%%mm1\n"
361  "punpckhbw %%mm7,%%mm5\n"
362  "punpckhbw %%mm7,%%mm3\n"
363  "psubw %%mm1, %%mm4\n"
364  "psubw %%mm3, %%mm5\n"
365  "psubw %%mm4, %%mm0\n"
366  "psubw %%mm5, %%mm2\n"
367  "pxor %%mm3, %%mm3\n"
368  "pxor %%mm1, %%mm1\n"
369  "pcmpgtw %%mm0, %%mm3\n\t"
370  "pcmpgtw %%mm2, %%mm1\n\t"
371  "pxor %%mm3, %%mm0\n"
372  "pxor %%mm1, %%mm2\n"
373  "psubw %%mm3, %%mm0\n"
374  "psubw %%mm1, %%mm2\n"
375  "paddw %%mm0, %%mm2\n"
376  "paddw %%mm2, %%mm6\n"
377 
378  "add %2,%0\n"
379  "1:\n"
380 
381  "movq (%0),%%mm0\n"
382  "movq %%mm0, %%mm1\n"
383  "psllq $8, %%mm0\n"
384  "psrlq $8, %%mm1\n"
385  "psrlq $8, %%mm0\n"
386  "movq %%mm0, %%mm2\n"
387  "movq %%mm1, %%mm3\n"
388  "punpcklbw %%mm7,%%mm0\n"
389  "punpcklbw %%mm7,%%mm1\n"
390  "punpckhbw %%mm7,%%mm2\n"
391  "punpckhbw %%mm7,%%mm3\n"
392  "psubw %%mm1, %%mm0\n"
393  "psubw %%mm3, %%mm2\n"
394  "psubw %%mm0, %%mm4\n"
395  "psubw %%mm2, %%mm5\n"
396  "pxor %%mm3, %%mm3\n"
397  "pxor %%mm1, %%mm1\n"
398  "pcmpgtw %%mm4, %%mm3\n\t"
399  "pcmpgtw %%mm5, %%mm1\n\t"
400  "pxor %%mm3, %%mm4\n"
401  "pxor %%mm1, %%mm5\n"
402  "psubw %%mm3, %%mm4\n"
403  "psubw %%mm1, %%mm5\n"
404  "paddw %%mm4, %%mm5\n"
405  "paddw %%mm5, %%mm6\n"
406 
407  "add %2,%0\n"
408 
409  "movq (%0),%%mm4\n"
410  "movq %%mm4, %%mm1\n"
411  "psllq $8, %%mm4\n"
412  "psrlq $8, %%mm1\n"
413  "psrlq $8, %%mm4\n"
414  "movq %%mm4, %%mm5\n"
415  "movq %%mm1, %%mm3\n"
416  "punpcklbw %%mm7,%%mm4\n"
417  "punpcklbw %%mm7,%%mm1\n"
418  "punpckhbw %%mm7,%%mm5\n"
419  "punpckhbw %%mm7,%%mm3\n"
420  "psubw %%mm1, %%mm4\n"
421  "psubw %%mm3, %%mm5\n"
422  "psubw %%mm4, %%mm0\n"
423  "psubw %%mm5, %%mm2\n"
424  "pxor %%mm3, %%mm3\n"
425  "pxor %%mm1, %%mm1\n"
426  "pcmpgtw %%mm0, %%mm3\n\t"
427  "pcmpgtw %%mm2, %%mm1\n\t"
428  "pxor %%mm3, %%mm0\n"
429  "pxor %%mm1, %%mm2\n"
430  "psubw %%mm3, %%mm0\n"
431  "psubw %%mm1, %%mm2\n"
432  "paddw %%mm0, %%mm2\n"
433  "paddw %%mm2, %%mm6\n"
434 
435  "add %2,%0\n"
436  "subl $2, %%ecx\n"
437  " jnz 1b\n"
438 
439  "movq %%mm6, %%mm0\n"
440  "punpcklwd %%mm7,%%mm0\n"
441  "punpckhwd %%mm7,%%mm6\n"
442  "paddd %%mm0, %%mm6\n"
443 
444  "movq %%mm6,%%mm0\n"
445  "psrlq $32, %%mm6\n"
446  "paddd %%mm6,%%mm0\n"
447  "movd %%mm0,%1\n"
448  : "+r" (pix1), "=r"(tmp)
449  : "r" ((x86_reg)line_size) , "g" (h-2)
450  : "%ecx");
451  return tmp;
452 }
453 
454 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
455  int tmp;
456  uint8_t * pix= pix1;
457  __asm__ volatile (
458  "movl %3,%%ecx\n"
459  "pxor %%mm7,%%mm7\n"
460  "pxor %%mm6,%%mm6\n"
461 
462  "movq (%0),%%mm0\n"
463  "movq 1(%0),%%mm1\n"
464  "movq %%mm0, %%mm2\n"
465  "movq %%mm1, %%mm3\n"
466  "punpcklbw %%mm7,%%mm0\n"
467  "punpcklbw %%mm7,%%mm1\n"
468  "punpckhbw %%mm7,%%mm2\n"
469  "punpckhbw %%mm7,%%mm3\n"
470  "psubw %%mm1, %%mm0\n"
471  "psubw %%mm3, %%mm2\n"
472 
473  "add %2,%0\n"
474 
475  "movq (%0),%%mm4\n"
476  "movq 1(%0),%%mm1\n"
477  "movq %%mm4, %%mm5\n"
478  "movq %%mm1, %%mm3\n"
479  "punpcklbw %%mm7,%%mm4\n"
480  "punpcklbw %%mm7,%%mm1\n"
481  "punpckhbw %%mm7,%%mm5\n"
482  "punpckhbw %%mm7,%%mm3\n"
483  "psubw %%mm1, %%mm4\n"
484  "psubw %%mm3, %%mm5\n"
485  "psubw %%mm4, %%mm0\n"
486  "psubw %%mm5, %%mm2\n"
487  "pxor %%mm3, %%mm3\n"
488  "pxor %%mm1, %%mm1\n"
489  "pcmpgtw %%mm0, %%mm3\n\t"
490  "pcmpgtw %%mm2, %%mm1\n\t"
491  "pxor %%mm3, %%mm0\n"
492  "pxor %%mm1, %%mm2\n"
493  "psubw %%mm3, %%mm0\n"
494  "psubw %%mm1, %%mm2\n"
495  "paddw %%mm0, %%mm2\n"
496  "paddw %%mm2, %%mm6\n"
497 
498  "add %2,%0\n"
499  "1:\n"
500 
501  "movq (%0),%%mm0\n"
502  "movq 1(%0),%%mm1\n"
503  "movq %%mm0, %%mm2\n"
504  "movq %%mm1, %%mm3\n"
505  "punpcklbw %%mm7,%%mm0\n"
506  "punpcklbw %%mm7,%%mm1\n"
507  "punpckhbw %%mm7,%%mm2\n"
508  "punpckhbw %%mm7,%%mm3\n"
509  "psubw %%mm1, %%mm0\n"
510  "psubw %%mm3, %%mm2\n"
511  "psubw %%mm0, %%mm4\n"
512  "psubw %%mm2, %%mm5\n"
513  "pxor %%mm3, %%mm3\n"
514  "pxor %%mm1, %%mm1\n"
515  "pcmpgtw %%mm4, %%mm3\n\t"
516  "pcmpgtw %%mm5, %%mm1\n\t"
517  "pxor %%mm3, %%mm4\n"
518  "pxor %%mm1, %%mm5\n"
519  "psubw %%mm3, %%mm4\n"
520  "psubw %%mm1, %%mm5\n"
521  "paddw %%mm4, %%mm5\n"
522  "paddw %%mm5, %%mm6\n"
523 
524  "add %2,%0\n"
525 
526  "movq (%0),%%mm4\n"
527  "movq 1(%0),%%mm1\n"
528  "movq %%mm4, %%mm5\n"
529  "movq %%mm1, %%mm3\n"
530  "punpcklbw %%mm7,%%mm4\n"
531  "punpcklbw %%mm7,%%mm1\n"
532  "punpckhbw %%mm7,%%mm5\n"
533  "punpckhbw %%mm7,%%mm3\n"
534  "psubw %%mm1, %%mm4\n"
535  "psubw %%mm3, %%mm5\n"
536  "psubw %%mm4, %%mm0\n"
537  "psubw %%mm5, %%mm2\n"
538  "pxor %%mm3, %%mm3\n"
539  "pxor %%mm1, %%mm1\n"
540  "pcmpgtw %%mm0, %%mm3\n\t"
541  "pcmpgtw %%mm2, %%mm1\n\t"
542  "pxor %%mm3, %%mm0\n"
543  "pxor %%mm1, %%mm2\n"
544  "psubw %%mm3, %%mm0\n"
545  "psubw %%mm1, %%mm2\n"
546  "paddw %%mm0, %%mm2\n"
547  "paddw %%mm2, %%mm6\n"
548 
549  "add %2,%0\n"
550  "subl $2, %%ecx\n"
551  " jnz 1b\n"
552 
553  "movq %%mm6, %%mm0\n"
554  "punpcklwd %%mm7,%%mm0\n"
555  "punpckhwd %%mm7,%%mm6\n"
556  "paddd %%mm0, %%mm6\n"
557 
558  "movq %%mm6,%%mm0\n"
559  "psrlq $32, %%mm6\n"
560  "paddd %%mm6,%%mm0\n"
561  "movd %%mm0,%1\n"
562  : "+r" (pix1), "=r"(tmp)
563  : "r" ((x86_reg)line_size) , "g" (h-2)
564  : "%ecx");
565  return tmp + hf_noise8_mmx(pix+8, line_size, h);
566 }
567 
568 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
569  MpegEncContext *c = p;
570  int score1, score2;
571 
572  if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
573  else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
574  score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
575 
576  if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
577  else return score1 + FFABS(score2)*8;
578 }
579 
580 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
581  MpegEncContext *c = p;
582  int score1= sse8_mmx(c, pix1, pix2, line_size, h);
583  int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
584 
585  if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
586  else return score1 + FFABS(score2)*8;
587 }
588 
589 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
590  int tmp;
591 
592  av_assert2( (((int)pix) & 7) == 0);
593  av_assert2((line_size &7) ==0);
594 
595 #define SUM(in0, in1, out0, out1) \
596  "movq (%0), %%mm2\n"\
597  "movq 8(%0), %%mm3\n"\
598  "add %2,%0\n"\
599  "movq %%mm2, " #out0 "\n"\
600  "movq %%mm3, " #out1 "\n"\
601  "psubusb " #in0 ", %%mm2\n"\
602  "psubusb " #in1 ", %%mm3\n"\
603  "psubusb " #out0 ", " #in0 "\n"\
604  "psubusb " #out1 ", " #in1 "\n"\
605  "por %%mm2, " #in0 "\n"\
606  "por %%mm3, " #in1 "\n"\
607  "movq " #in0 ", %%mm2\n"\
608  "movq " #in1 ", %%mm3\n"\
609  "punpcklbw %%mm7, " #in0 "\n"\
610  "punpcklbw %%mm7, " #in1 "\n"\
611  "punpckhbw %%mm7, %%mm2\n"\
612  "punpckhbw %%mm7, %%mm3\n"\
613  "paddw " #in1 ", " #in0 "\n"\
614  "paddw %%mm3, %%mm2\n"\
615  "paddw %%mm2, " #in0 "\n"\
616  "paddw " #in0 ", %%mm6\n"
617 
618 
619  __asm__ volatile (
620  "movl %3,%%ecx\n"
621  "pxor %%mm6,%%mm6\n"
622  "pxor %%mm7,%%mm7\n"
623  "movq (%0),%%mm0\n"
624  "movq 8(%0),%%mm1\n"
625  "add %2,%0\n"
626  "jmp 2f\n"
627  "1:\n"
628 
629  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
630  "2:\n"
631  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
632 
633  "subl $2, %%ecx\n"
634  "jnz 1b\n"
635 
636  "movq %%mm6,%%mm0\n"
637  "psrlq $32, %%mm6\n"
638  "paddw %%mm6,%%mm0\n"
639  "movq %%mm0,%%mm6\n"
640  "psrlq $16, %%mm0\n"
641  "paddw %%mm6,%%mm0\n"
642  "movd %%mm0,%1\n"
643  : "+r" (pix), "=r"(tmp)
644  : "r" ((x86_reg)line_size) , "m" (h)
645  : "%ecx");
646  return tmp & 0xFFFF;
647 }
648 #undef SUM
649 
650 static int vsad_intra16_mmxext(void *v, uint8_t *pix, uint8_t *dummy,
651  int line_size, int h)
652 {
653  int tmp;
654 
655  av_assert2( (((int)pix) & 7) == 0);
656  av_assert2((line_size &7) ==0);
657 
658 #define SUM(in0, in1, out0, out1) \
659  "movq (%0), " #out0 "\n"\
660  "movq 8(%0), " #out1 "\n"\
661  "add %2,%0\n"\
662  "psadbw " #out0 ", " #in0 "\n"\
663  "psadbw " #out1 ", " #in1 "\n"\
664  "paddw " #in1 ", " #in0 "\n"\
665  "paddw " #in0 ", %%mm6\n"
666 
667  __asm__ volatile (
668  "movl %3,%%ecx\n"
669  "pxor %%mm6,%%mm6\n"
670  "pxor %%mm7,%%mm7\n"
671  "movq (%0),%%mm0\n"
672  "movq 8(%0),%%mm1\n"
673  "add %2,%0\n"
674  "jmp 2f\n"
675  "1:\n"
676 
677  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
678  "2:\n"
679  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
680 
681  "subl $2, %%ecx\n"
682  "jnz 1b\n"
683 
684  "movd %%mm6,%1\n"
685  : "+r" (pix), "=r"(tmp)
686  : "r" ((x86_reg)line_size) , "m" (h)
687  : "%ecx");
688  return tmp;
689 }
690 #undef SUM
691 
692 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
693  int tmp;
694 
695  av_assert2( (((int)pix1) & 7) == 0);
696  av_assert2( (((int)pix2) & 7) == 0);
697  av_assert2((line_size &7) ==0);
698 
699 #define SUM(in0, in1, out0, out1) \
700  "movq (%0),%%mm2\n"\
701  "movq (%1)," #out0 "\n"\
702  "movq 8(%0),%%mm3\n"\
703  "movq 8(%1)," #out1 "\n"\
704  "add %3,%0\n"\
705  "add %3,%1\n"\
706  "psubb " #out0 ", %%mm2\n"\
707  "psubb " #out1 ", %%mm3\n"\
708  "pxor %%mm7, %%mm2\n"\
709  "pxor %%mm7, %%mm3\n"\
710  "movq %%mm2, " #out0 "\n"\
711  "movq %%mm3, " #out1 "\n"\
712  "psubusb " #in0 ", %%mm2\n"\
713  "psubusb " #in1 ", %%mm3\n"\
714  "psubusb " #out0 ", " #in0 "\n"\
715  "psubusb " #out1 ", " #in1 "\n"\
716  "por %%mm2, " #in0 "\n"\
717  "por %%mm3, " #in1 "\n"\
718  "movq " #in0 ", %%mm2\n"\
719  "movq " #in1 ", %%mm3\n"\
720  "punpcklbw %%mm7, " #in0 "\n"\
721  "punpcklbw %%mm7, " #in1 "\n"\
722  "punpckhbw %%mm7, %%mm2\n"\
723  "punpckhbw %%mm7, %%mm3\n"\
724  "paddw " #in1 ", " #in0 "\n"\
725  "paddw %%mm3, %%mm2\n"\
726  "paddw %%mm2, " #in0 "\n"\
727  "paddw " #in0 ", %%mm6\n"
728 
729 
730  __asm__ volatile (
731  "movl %4,%%ecx\n"
732  "pxor %%mm6,%%mm6\n"
733  "pcmpeqw %%mm7,%%mm7\n"
734  "psllw $15, %%mm7\n"
735  "packsswb %%mm7, %%mm7\n"
736  "movq (%0),%%mm0\n"
737  "movq (%1),%%mm2\n"
738  "movq 8(%0),%%mm1\n"
739  "movq 8(%1),%%mm3\n"
740  "add %3,%0\n"
741  "add %3,%1\n"
742  "psubb %%mm2, %%mm0\n"
743  "psubb %%mm3, %%mm1\n"
744  "pxor %%mm7, %%mm0\n"
745  "pxor %%mm7, %%mm1\n"
746  "jmp 2f\n"
747  "1:\n"
748 
749  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
750  "2:\n"
751  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
752 
753  "subl $2, %%ecx\n"
754  "jnz 1b\n"
755 
756  "movq %%mm6,%%mm0\n"
757  "psrlq $32, %%mm6\n"
758  "paddw %%mm6,%%mm0\n"
759  "movq %%mm0,%%mm6\n"
760  "psrlq $16, %%mm0\n"
761  "paddw %%mm6,%%mm0\n"
762  "movd %%mm0,%2\n"
763  : "+r" (pix1), "+r" (pix2), "=r"(tmp)
764  : "r" ((x86_reg)line_size) , "m" (h)
765  : "%ecx");
766  return tmp & 0x7FFF;
767 }
768 #undef SUM
769 
770 static int vsad16_mmxext(void *v, uint8_t *pix1, uint8_t *pix2,
771  int line_size, int h)
772 {
773  int tmp;
774 
775  av_assert2( (((int)pix1) & 7) == 0);
776  av_assert2( (((int)pix2) & 7) == 0);
777  av_assert2((line_size &7) ==0);
778 
779 #define SUM(in0, in1, out0, out1) \
780  "movq (%0)," #out0 "\n"\
781  "movq (%1),%%mm2\n"\
782  "movq 8(%0)," #out1 "\n"\
783  "movq 8(%1),%%mm3\n"\
784  "add %3,%0\n"\
785  "add %3,%1\n"\
786  "psubb %%mm2, " #out0 "\n"\
787  "psubb %%mm3, " #out1 "\n"\
788  "pxor %%mm7, " #out0 "\n"\
789  "pxor %%mm7, " #out1 "\n"\
790  "psadbw " #out0 ", " #in0 "\n"\
791  "psadbw " #out1 ", " #in1 "\n"\
792  "paddw " #in1 ", " #in0 "\n"\
793  "paddw " #in0 ", %%mm6\n"
794 
795  __asm__ volatile (
796  "movl %4,%%ecx\n"
797  "pxor %%mm6,%%mm6\n"
798  "pcmpeqw %%mm7,%%mm7\n"
799  "psllw $15, %%mm7\n"
800  "packsswb %%mm7, %%mm7\n"
801  "movq (%0),%%mm0\n"
802  "movq (%1),%%mm2\n"
803  "movq 8(%0),%%mm1\n"
804  "movq 8(%1),%%mm3\n"
805  "add %3,%0\n"
806  "add %3,%1\n"
807  "psubb %%mm2, %%mm0\n"
808  "psubb %%mm3, %%mm1\n"
809  "pxor %%mm7, %%mm0\n"
810  "pxor %%mm7, %%mm1\n"
811  "jmp 2f\n"
812  "1:\n"
813 
814  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
815  "2:\n"
816  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
817 
818  "subl $2, %%ecx\n"
819  "jnz 1b\n"
820 
821  "movd %%mm6,%2\n"
822  : "+r" (pix1), "+r" (pix2), "=r"(tmp)
823  : "r" ((x86_reg)line_size) , "m" (h)
824  : "%ecx");
825  return tmp;
826 }
827 #undef SUM
828 
829 static void diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w){
830  x86_reg i=0;
831  if(w>=16)
832  __asm__ volatile(
833  "1: \n\t"
834  "movq (%2, %0), %%mm0 \n\t"
835  "movq (%1, %0), %%mm1 \n\t"
836  "psubb %%mm0, %%mm1 \n\t"
837  "movq %%mm1, (%3, %0) \n\t"
838  "movq 8(%2, %0), %%mm0 \n\t"
839  "movq 8(%1, %0), %%mm1 \n\t"
840  "psubb %%mm0, %%mm1 \n\t"
841  "movq %%mm1, 8(%3, %0) \n\t"
842  "add $16, %0 \n\t"
843  "cmp %4, %0 \n\t"
844  " jb 1b \n\t"
845  : "+r" (i)
846  : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
847  );
848  for(; i<w; i++)
849  dst[i+0] = src1[i+0]-src2[i+0];
850 }
851 
852 static void sub_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *src1,
853  const uint8_t *src2, int w,
854  int *left, int *left_top)
855 {
856  x86_reg i=0;
857  uint8_t l, lt;
858 
859  __asm__ volatile(
860  "movq (%1, %0), %%mm0 \n\t" // LT
861  "psllq $8, %%mm0 \n\t"
862  "1: \n\t"
863  "movq (%1, %0), %%mm1 \n\t" // T
864  "movq -1(%2, %0), %%mm2 \n\t" // L
865  "movq (%2, %0), %%mm3 \n\t" // X
866  "movq %%mm2, %%mm4 \n\t" // L
867  "psubb %%mm0, %%mm2 \n\t"
868  "paddb %%mm1, %%mm2 \n\t" // L + T - LT
869  "movq %%mm4, %%mm5 \n\t" // L
870  "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
871  "pminub %%mm5, %%mm1 \n\t" // min(T, L)
872  "pminub %%mm2, %%mm4 \n\t"
873  "pmaxub %%mm1, %%mm4 \n\t"
874  "psubb %%mm4, %%mm3 \n\t" // dst - pred
875  "movq %%mm3, (%3, %0) \n\t"
876  "add $8, %0 \n\t"
877  "movq -1(%1, %0), %%mm0 \n\t" // LT
878  "cmp %4, %0 \n\t"
879  " jb 1b \n\t"
880  : "+r" (i)
881  : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
882  );
883 
884  l= *left;
885  lt= *left_top;
886 
887  dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
888 
889  *left_top= src1[w-1];
890  *left = src2[w-1];
891 }
892 
893 #define MMABS_MMX(a,z)\
894  "pxor " #z ", " #z " \n\t"\
895  "pcmpgtw " #a ", " #z " \n\t"\
896  "pxor " #z ", " #a " \n\t"\
897  "psubw " #z ", " #a " \n\t"
898 
899 #define MMABS_MMXEXT(a, z) \
900  "pxor " #z ", " #z " \n\t"\
901  "psubw " #a ", " #z " \n\t"\
902  "pmaxsw " #z ", " #a " \n\t"
903 
904 #define MMABS_SSSE3(a,z)\
905  "pabsw " #a ", " #a " \n\t"
906 
907 #define MMABS_SUM(a,z, sum)\
908  MMABS(a,z)\
909  "paddusw " #a ", " #sum " \n\t"
910 
911 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
912  * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
913  * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
914 #define HSUM_MMX(a, t, dst)\
915  "movq "#a", "#t" \n\t"\
916  "psrlq $32, "#a" \n\t"\
917  "paddusw "#t", "#a" \n\t"\
918  "movq "#a", "#t" \n\t"\
919  "psrlq $16, "#a" \n\t"\
920  "paddusw "#t", "#a" \n\t"\
921  "movd "#a", "#dst" \n\t"\
922 
923 #define HSUM_MMXEXT(a, t, dst) \
924  "pshufw $0x0E, "#a", "#t" \n\t"\
925  "paddusw "#t", "#a" \n\t"\
926  "pshufw $0x01, "#a", "#t" \n\t"\
927  "paddusw "#t", "#a" \n\t"\
928  "movd "#a", "#dst" \n\t"\
929 
930 #define HSUM_SSE2(a, t, dst)\
931  "movhlps "#a", "#t" \n\t"\
932  "paddusw "#t", "#a" \n\t"\
933  "pshuflw $0x0E, "#a", "#t" \n\t"\
934  "paddusw "#t", "#a" \n\t"\
935  "pshuflw $0x01, "#a", "#t" \n\t"\
936  "paddusw "#t", "#a" \n\t"\
937  "movd "#a", "#dst" \n\t"\
938 
939 #define DCT_SAD4(m,mm,o)\
940  "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
941  "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
942  "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
943  "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
944  MMABS_SUM(mm##2, mm##6, mm##0)\
945  MMABS_SUM(mm##3, mm##7, mm##1)\
946  MMABS_SUM(mm##4, mm##6, mm##0)\
947  MMABS_SUM(mm##5, mm##7, mm##1)\
948 
949 #define DCT_SAD_MMX\
950  "pxor %%mm0, %%mm0 \n\t"\
951  "pxor %%mm1, %%mm1 \n\t"\
952  DCT_SAD4(q, %%mm, 0)\
953  DCT_SAD4(q, %%mm, 8)\
954  DCT_SAD4(q, %%mm, 64)\
955  DCT_SAD4(q, %%mm, 72)\
956  "paddusw %%mm1, %%mm0 \n\t"\
957  HSUM(%%mm0, %%mm1, %0)
958 
959 #define DCT_SAD_SSE2\
960  "pxor %%xmm0, %%xmm0 \n\t"\
961  "pxor %%xmm1, %%xmm1 \n\t"\
962  DCT_SAD4(dqa, %%xmm, 0)\
963  DCT_SAD4(dqa, %%xmm, 64)\
964  "paddusw %%xmm1, %%xmm0 \n\t"\
965  HSUM(%%xmm0, %%xmm1, %0)
966 
967 #define DCT_SAD_FUNC(cpu) \
968 static int sum_abs_dctelem_##cpu(DCTELEM *block){\
969  int sum;\
970  __asm__ volatile(\
971  DCT_SAD\
972  :"=r"(sum)\
973  :"r"(block)\
974  );\
975  return sum&0xFFFF;\
976 }
977 
978 #define DCT_SAD DCT_SAD_MMX
979 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
980 #define MMABS(a,z) MMABS_MMX(a,z)
981 DCT_SAD_FUNC(mmx)
982 #undef MMABS
983 #undef HSUM
984 
985 #define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
986 #define MMABS(a,z) MMABS_MMXEXT(a,z)
987 DCT_SAD_FUNC(mmxext)
988 #undef HSUM
989 #undef DCT_SAD
990 
991 #define DCT_SAD DCT_SAD_SSE2
992 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
993 DCT_SAD_FUNC(sse2)
994 #undef MMABS
995 
996 #if HAVE_SSSE3_INLINE
997 #define MMABS(a,z) MMABS_SSSE3(a,z)
998 DCT_SAD_FUNC(ssse3)
999 #undef MMABS
1000 #endif
1001 #undef HSUM
1002 #undef DCT_SAD
1003 
1004 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
1005  int sum;
1006  x86_reg i=size;
1007  __asm__ volatile(
1008  "pxor %%mm4, %%mm4 \n"
1009  "1: \n"
1010  "sub $8, %0 \n"
1011  "movq (%2,%0), %%mm2 \n"
1012  "movq (%3,%0,2), %%mm0 \n"
1013  "movq 8(%3,%0,2), %%mm1 \n"
1014  "punpckhbw %%mm2, %%mm3 \n"
1015  "punpcklbw %%mm2, %%mm2 \n"
1016  "psraw $8, %%mm3 \n"
1017  "psraw $8, %%mm2 \n"
1018  "psubw %%mm3, %%mm1 \n"
1019  "psubw %%mm2, %%mm0 \n"
1020  "pmaddwd %%mm1, %%mm1 \n"
1021  "pmaddwd %%mm0, %%mm0 \n"
1022  "paddd %%mm1, %%mm4 \n"
1023  "paddd %%mm0, %%mm4 \n"
1024  "jg 1b \n"
1025  "movq %%mm4, %%mm3 \n"
1026  "psrlq $32, %%mm3 \n"
1027  "paddd %%mm3, %%mm4 \n"
1028  "movd %%mm4, %1 \n"
1029  :"+r"(i), "=r"(sum)
1030  :"r"(pix1), "r"(pix2)
1031  );
1032  return sum;
1033 }
1034 
1035 #define PHADDD(a, t)\
1036  "movq "#a", "#t" \n\t"\
1037  "psrlq $32, "#a" \n\t"\
1038  "paddd "#t", "#a" \n\t"
1039 /*
1040  pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
1041  pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
1042  pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
1043  */
1044 #define PMULHRW(x, y, s, o)\
1045  "pmulhw " #s ", "#x " \n\t"\
1046  "pmulhw " #s ", "#y " \n\t"\
1047  "paddw " #o ", "#x " \n\t"\
1048  "paddw " #o ", "#y " \n\t"\
1049  "psraw $1, "#x " \n\t"\
1050  "psraw $1, "#y " \n\t"
1051 #define DEF(x) x ## _mmx
1052 #define SET_RND MOVQ_WONE
1053 #define SCALE_OFFSET 1
1054 
1055 #include "dsputil_qns_template.c"
1056 
1057 #undef DEF
1058 #undef SET_RND
1059 #undef SCALE_OFFSET
1060 #undef PMULHRW
1061 
1062 #define DEF(x) x ## _3dnow
1063 #define SET_RND(x)
1064 #define SCALE_OFFSET 0
1065 #define PMULHRW(x, y, s, o)\
1066  "pmulhrw " #s ", "#x " \n\t"\
1067  "pmulhrw " #s ", "#y " \n\t"
1068 
1069 #include "dsputil_qns_template.c"
1070 
1071 #undef DEF
1072 #undef SET_RND
1073 #undef SCALE_OFFSET
1074 #undef PMULHRW
1075 
1076 #if HAVE_SSSE3_INLINE
1077 #undef PHADDD
1078 #define DEF(x) x ## _ssse3
1079 #define SET_RND(x)
1080 #define SCALE_OFFSET -1
1081 #define PHADDD(a, t)\
1082  "pshufw $0x0E, "#a", "#t" \n\t"\
1083  "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
1084 #define PMULHRW(x, y, s, o)\
1085  "pmulhrsw " #s ", "#x " \n\t"\
1086  "pmulhrsw " #s ", "#y " \n\t"
1087 
1088 #include "dsputil_qns_template.c"
1089 
1090 #undef DEF
1091 #undef SET_RND
1092 #undef SCALE_OFFSET
1093 #undef PMULHRW
1094 #undef PHADDD
1095 #endif /* HAVE_SSSE3_INLINE */
1096 
1097 #endif /* HAVE_INLINE_ASM */
1098 
1099 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
1100 
1101 #define hadamard_func(cpu) \
1102 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
1103  int stride, int h); \
1104 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
1105  int stride, int h);
1106 
1108 hadamard_func(mmxext)
1109 hadamard_func(sse2)
1110 hadamard_func(ssse3)
1111 
1113 {
1114  int mm_flags = av_get_cpu_flags();
1115 
1116 #if HAVE_INLINE_ASM
1117  int bit_depth = avctx->bits_per_raw_sample;
1118 
1119  if (mm_flags & AV_CPU_FLAG_MMX) {
1120  const int dct_algo = avctx->dct_algo;
1121  if (avctx->bits_per_raw_sample <= 8 &&
1122  (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
1123  if(mm_flags & AV_CPU_FLAG_SSE2){
1124  c->fdct = ff_fdct_sse2;
1125  } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1126  c->fdct = ff_fdct_mmxext;
1127  }else{
1128  c->fdct = ff_fdct_mmx;
1129  }
1130  }
1131 
1132  if (bit_depth <= 8)
1133  c->get_pixels = get_pixels_mmx;
1134  c->diff_pixels = diff_pixels_mmx;
1135  c->pix_sum = pix_sum16_mmx;
1136 
1137  c->diff_bytes= diff_bytes_mmx;
1138  c->sum_abs_dctelem= sum_abs_dctelem_mmx;
1139 
1140  c->pix_norm1 = pix_norm1_mmx;
1141  c->sse[0] = sse16_mmx;
1142  c->sse[1] = sse8_mmx;
1143  c->vsad[4]= vsad_intra16_mmx;
1144 
1145  c->nsse[0] = nsse16_mmx;
1146  c->nsse[1] = nsse8_mmx;
1147  if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1148  c->vsad[0] = vsad16_mmx;
1149  }
1150 
1151  if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1152  c->try_8x8basis= try_8x8basis_mmx;
1153  }
1154  c->add_8x8basis= add_8x8basis_mmx;
1155 
1156  c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
1157 
1158  if (mm_flags & AV_CPU_FLAG_MMXEXT) {
1159  c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
1160  c->vsad[4] = vsad_intra16_mmxext;
1161 
1162  if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1163  c->vsad[0] = vsad16_mmxext;
1164  }
1165 
1166  c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_mmxext;
1167  }
1168 
1169  if(mm_flags & AV_CPU_FLAG_SSE2){
1170  if (bit_depth <= 8)
1171  c->get_pixels = get_pixels_sse2;
1172  c->sum_abs_dctelem= sum_abs_dctelem_sse2;
1173  }
1174 
1175 #if HAVE_SSSE3_INLINE
1176  if(mm_flags & AV_CPU_FLAG_SSSE3){
1177  if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1178  c->try_8x8basis= try_8x8basis_ssse3;
1179  }
1180  c->add_8x8basis= add_8x8basis_ssse3;
1181  c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
1182  }
1183 #endif
1184 
1185  if(mm_flags & AV_CPU_FLAG_3DNOW){
1186  if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
1187  c->try_8x8basis= try_8x8basis_3dnow;
1188  }
1189  c->add_8x8basis= add_8x8basis_3dnow;
1190  }
1191  }
1192 #endif /* HAVE_INLINE_ASM */
1193 
1194  if (EXTERNAL_MMX(mm_flags)) {
1195  c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
1196  c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
1197 
1198  if (EXTERNAL_MMXEXT(mm_flags)) {
1199  c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
1200  c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
1201  }
1202 
1203  if (EXTERNAL_SSE2(mm_flags)) {
1204  c->sse[0] = ff_sse16_sse2;
1205 
1206 #if HAVE_ALIGNED_STACK
1207  c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
1208  c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
1209 #endif
1210  }
1211 
1212  if (EXTERNAL_SSSE3(mm_flags) && HAVE_ALIGNED_STACK) {
1213  c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
1214  c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
1215  }
1216  }
1217 
1218  ff_dsputil_init_pix_mmx(c, avctx);
1219 }