FFmpeg
h264chroma_mmi.c
Go to the documentation of this file.
1 /*
2  * Loongson SIMD optimized h264chroma
3  *
4  * Copyright (c) 2015 Loongson Technology Corporation Limited
5  * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
6  * Zhang Shuangshuang <zhangshuangshuang@ict.ac.cn>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "h264chroma_mips.h"
26 #include "constants.h"
28 
29 void ff_put_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
30  int h, int x, int y)
31 {
32  double ftmp[12];
33  union mmi_intfloat64 A, B, C, D, E;
34  A.i = 64;
35 
36  if (!(x || y)) {
37  /* x=0, y=0, A.i=64 */
38  __asm__ volatile (
39  "1: \n\t"
40  MMI_ULDC1(%[ftmp0], %[src], 0x00)
41  PTR_ADDU "%[src], %[src], %[stride] \n\t"
42  MMI_ULDC1(%[ftmp1], %[src], 0x00)
43  PTR_ADDU "%[src], %[src], %[stride] \n\t"
44  MMI_ULDC1(%[ftmp2], %[src], 0x00)
45  PTR_ADDU "%[src], %[src], %[stride] \n\t"
46  MMI_ULDC1(%[ftmp3], %[src], 0x00)
47  PTR_ADDU "%[src], %[src], %[stride] \n\t"
48 
49  "addi %[h], %[h], -0x04 \n\t"
50 
51  MMI_SDC1(%[ftmp0], %[dst], 0x00)
52  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
53  MMI_SDC1(%[ftmp1], %[dst], 0x00)
54  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
55  MMI_SDC1(%[ftmp2], %[dst], 0x00)
56  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
57  MMI_SDC1(%[ftmp3], %[dst], 0x00)
58  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
59  "bnez %[h], 1b \n\t"
60  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
61  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
62  [dst]"+&r"(dst), [src]"+&r"(src),
63  [h]"+&r"(h)
64  : [stride]"r"((mips_reg)stride)
65  : "memory"
66  );
67  } else if (x && y) {
68  /* x!=0, y!=0 */
69  D.i = x * y;
70  B.i = (x << 3) - D.i;
71  C.i = (y << 3) - D.i;
72  A.i = 64 - D.i - B.i - C.i;
73 
74  __asm__ volatile (
75  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
76  "pshufh %[A], %[A], %[ftmp0] \n\t"
77  "pshufh %[B], %[B], %[ftmp0] \n\t"
78  "mtc1 %[tmp0], %[ftmp9] \n\t"
79  "pshufh %[C], %[C], %[ftmp0] \n\t"
80  "pshufh %[D], %[D], %[ftmp0] \n\t"
81 
82  "1: \n\t"
83  MMI_ULDC1(%[ftmp1], %[src], 0x00)
84  MMI_ULDC1(%[ftmp2], %[src], 0x01)
85  PTR_ADDU "%[src], %[src], %[stride] \n\t"
86  MMI_ULDC1(%[ftmp3], %[src], 0x00)
87  MMI_ULDC1(%[ftmp4], %[src], 0x01)
88  PTR_ADDU "%[src], %[src], %[stride] \n\t"
89  MMI_ULDC1(%[ftmp10], %[src], 0x00)
90  MMI_ULDC1(%[ftmp11], %[src], 0x01)
91  "addi %[h], %[h], -0x02 \n\t"
92 
93  "punpcklbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t"
94  "punpckhbh %[ftmp6], %[ftmp1], %[ftmp0] \n\t"
95  "punpcklbh %[ftmp7], %[ftmp2], %[ftmp0] \n\t"
96  "punpckhbh %[ftmp8], %[ftmp2], %[ftmp0] \n\t"
97  "pmullh %[ftmp5], %[ftmp5], %[A] \n\t"
98  "pmullh %[ftmp7], %[ftmp7], %[B] \n\t"
99  "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
100  "pmullh %[ftmp6], %[ftmp6], %[A] \n\t"
101  "pmullh %[ftmp8], %[ftmp8], %[B] \n\t"
102  "paddh %[ftmp2], %[ftmp6], %[ftmp8] \n\t"
103  "punpcklbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t"
104  "punpckhbh %[ftmp6], %[ftmp3], %[ftmp0] \n\t"
105  "punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t"
106  "punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t"
107  "pmullh %[ftmp5], %[ftmp5], %[C] \n\t"
108  "pmullh %[ftmp7], %[ftmp7], %[D] \n\t"
109  "paddh %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
110  "pmullh %[ftmp6], %[ftmp6], %[C] \n\t"
111  "pmullh %[ftmp8], %[ftmp8], %[D] \n\t"
112  "paddh %[ftmp6], %[ftmp6], %[ftmp8] \n\t"
113  "paddh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
114  "paddh %[ftmp2], %[ftmp2], %[ftmp6] \n\t"
115  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
116  "paddh %[ftmp2], %[ftmp2], %[ff_pw_32] \n\t"
117  "psrlh %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
118  "psrlh %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
119  "packushb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
120 
121  "punpcklbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t"
122  "punpckhbh %[ftmp6], %[ftmp3], %[ftmp0] \n\t"
123  "punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t"
124  "punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t"
125  "pmullh %[ftmp5], %[ftmp5], %[A] \n\t"
126  "pmullh %[ftmp7], %[ftmp7], %[B] \n\t"
127  "paddh %[ftmp3], %[ftmp5], %[ftmp7] \n\t"
128  "pmullh %[ftmp6], %[ftmp6], %[A] \n\t"
129  "pmullh %[ftmp8], %[ftmp8], %[B] \n\t"
130  "paddh %[ftmp4], %[ftmp6], %[ftmp8] \n\t"
131  "punpcklbh %[ftmp5], %[ftmp10], %[ftmp0] \n\t"
132  "punpckhbh %[ftmp6], %[ftmp10], %[ftmp0] \n\t"
133  "punpcklbh %[ftmp7], %[ftmp11], %[ftmp0] \n\t"
134  "punpckhbh %[ftmp8], %[ftmp11], %[ftmp0] \n\t"
135  "pmullh %[ftmp5], %[ftmp5], %[C] \n\t"
136  "pmullh %[ftmp7], %[ftmp7], %[D] \n\t"
137  "paddh %[ftmp5], %[ftmp5], %[ftmp7] \n\t"
138  "pmullh %[ftmp6], %[ftmp6], %[C] \n\t"
139  "pmullh %[ftmp8], %[ftmp8], %[D] \n\t"
140  "paddh %[ftmp6], %[ftmp6], %[ftmp8] \n\t"
141  "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t"
142  "paddh %[ftmp4], %[ftmp4], %[ftmp6] \n\t"
143  "paddh %[ftmp3], %[ftmp3], %[ff_pw_32] \n\t"
144  "paddh %[ftmp4], %[ftmp4], %[ff_pw_32] \n\t"
145  "psrlh %[ftmp3], %[ftmp3], %[ftmp9] \n\t"
146  "psrlh %[ftmp4], %[ftmp4], %[ftmp9] \n\t"
147  "packushb %[ftmp3], %[ftmp3], %[ftmp4] \n\t"
148 
149  MMI_SDC1(%[ftmp1], %[dst], 0x00)
150  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
151  MMI_SDC1(%[ftmp3], %[dst], 0x00)
152  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
153  "bnez %[h], 1b \n\t"
154  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
155  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
156  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
157  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
158  [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
159  [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]),
160  [dst]"+&r"(dst), [src]"+&r"(src),
161  [h]"+&r"(h)
162  : [stride]"r"((mips_reg)stride),[ff_pw_32]"f"(ff_pw_32.f),
163  [A]"f"(A.f), [B]"f"(B.f),
164  [C]"f"(C.f), [D]"f"(D.f),
165  [tmp0]"r"(0x06)
166  : "memory"
167  );
168  } else if (x) {
169  /* x!=0, y==0 */
170  E.i = x << 3;
171  A.i = 64 - E.i;
172 
173  __asm__ volatile (
174  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
175  "pshufh %[A], %[A], %[ftmp0] \n\t"
176  "pshufh %[E], %[E], %[ftmp0] \n\t"
177  "mtc1 %[tmp0], %[ftmp7] \n\t"
178 
179  "1: \n\t"
180  MMI_ULDC1(%[ftmp1], %[src], 0x00)
181  MMI_ULDC1(%[ftmp2], %[src], 0x01)
182  "addi %[h], %[h], -0x01 \n\t"
183  PTR_ADDU "%[src], %[src], %[stride] \n\t"
184 
185  "punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
186  "punpckhbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t"
187  "punpcklbh %[ftmp5], %[ftmp2], %[ftmp0] \n\t"
188  "punpckhbh %[ftmp6], %[ftmp2], %[ftmp0] \n\t"
189  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
190  "pmullh %[ftmp5], %[ftmp5], %[E] \n\t"
191  "paddh %[ftmp1], %[ftmp3], %[ftmp5] \n\t"
192  "pmullh %[ftmp4], %[ftmp4], %[A] \n\t"
193  "pmullh %[ftmp6], %[ftmp6], %[E] \n\t"
194  "paddh %[ftmp2], %[ftmp4], %[ftmp6] \n\t"
195 
196  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
197  "paddh %[ftmp2], %[ftmp2], %[ff_pw_32] \n\t"
198  "psrlh %[ftmp1], %[ftmp1], %[ftmp7] \n\t"
199  "psrlh %[ftmp2], %[ftmp2], %[ftmp7] \n\t"
200  "packushb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
201  MMI_SDC1(%[ftmp1], %[dst], 0x00)
202  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
203  "bnez %[h], 1b \n\t"
204  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
205  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
206  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
207  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
208  [dst]"+&r"(dst), [src]"+&r"(src),
209  [h]"+&r"(h)
210  : [stride]"r"((mips_reg)stride),
211  [ff_pw_32]"f"(ff_pw_32.f), [tmp0]"r"(0x06),
212  [A]"f"(A.f), [E]"f"(E.f)
213  : "memory"
214  );
215  } else {
216  /* x==0, y!=0 */
217  E.i = y << 3;
218  A.i = 64 - E.i;
219 
220  __asm__ volatile (
221  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
222  "pshufh %[A], %[A], %[ftmp0] \n\t"
223  "pshufh %[E], %[E], %[ftmp0] \n\t"
224  "mtc1 %[tmp0], %[ftmp7] \n\t"
225 
226  "1: \n\t"
227  MMI_ULDC1(%[ftmp1], %[src], 0x00)
228  PTR_ADDU "%[src], %[src], %[stride] \n\t"
229  MMI_ULDC1(%[ftmp2], %[src], 0x00)
230  PTR_ADDU "%[src], %[src], %[stride] \n\t"
231  MMI_ULDC1(%[ftmp8], %[src], 0x00)
232  "addi %[h], %[h], -0x02 \n\t"
233 
234  "punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
235  "punpckhbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t"
236  "punpcklbh %[ftmp5], %[ftmp2], %[ftmp0] \n\t"
237  "punpckhbh %[ftmp6], %[ftmp2], %[ftmp0] \n\t"
238  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
239  "pmullh %[ftmp5], %[ftmp5], %[E] \n\t"
240  "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t"
241  "pmullh %[ftmp4], %[ftmp4], %[A] \n\t"
242  "pmullh %[ftmp6], %[ftmp6], %[E] \n\t"
243  "paddh %[ftmp4], %[ftmp4], %[ftmp6] \n\t"
244  "paddh %[ftmp3], %[ftmp3], %[ff_pw_32] \n\t"
245  "paddh %[ftmp4], %[ftmp4], %[ff_pw_32] \n\t"
246  "psrlh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
247  "psrlh %[ftmp4], %[ftmp4], %[ftmp7] \n\t"
248  "packushb %[ftmp1], %[ftmp3], %[ftmp4] \n\t"
249 
250  "punpcklbh %[ftmp3], %[ftmp2], %[ftmp0] \n\t"
251  "punpckhbh %[ftmp4], %[ftmp2], %[ftmp0] \n\t"
252  "punpcklbh %[ftmp5], %[ftmp8], %[ftmp0] \n\t"
253  "punpckhbh %[ftmp6], %[ftmp8], %[ftmp0] \n\t"
254  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
255  "pmullh %[ftmp5], %[ftmp5], %[E] \n\t"
256  "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t"
257  "pmullh %[ftmp4], %[ftmp4], %[A] \n\t"
258  "pmullh %[ftmp6], %[ftmp6], %[E] \n\t"
259  "paddh %[ftmp4], %[ftmp4], %[ftmp6] \n\t"
260  "paddh %[ftmp3], %[ftmp3], %[ff_pw_32] \n\t"
261  "paddh %[ftmp4], %[ftmp4], %[ff_pw_32] \n\t"
262  "psrlh %[ftmp3], %[ftmp3], %[ftmp7] \n\t"
263  "psrlh %[ftmp4], %[ftmp4], %[ftmp7] \n\t"
264  "packushb %[ftmp2], %[ftmp3], %[ftmp4] \n\t"
265 
266  MMI_SDC1(%[ftmp1], %[dst], 0x00)
267  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
268  MMI_SDC1(%[ftmp2], %[dst], 0x00)
269  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
270  "bnez %[h], 1b \n\t"
271  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
272  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
273  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
274  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
275  [ftmp8]"=&f"(ftmp[8]),
276  [dst]"+&r"(dst), [src]"+&r"(src),
277  [h]"+&r"(h)
278  : [stride]"r"((mips_reg)stride),
279  [ff_pw_32]"f"(ff_pw_32.f), [A]"f"(A.f),
280  [E]"f"(E.f), [tmp0]"r"(0x06)
281  : "memory"
282  );
283  }
284 }
285 
286 void ff_avg_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
287  int h, int x, int y)
288 {
289  double ftmp[10];
290  union mmi_intfloat64 A, B, C, D, E;
291  A.i = 64;
292 
293  if(!(x || y)){
294  /* x=0, y=0, A.i=64 */
295  __asm__ volatile (
296  "1: \n\t"
297  MMI_ULDC1(%[ftmp0], %[src], 0x00)
298  PTR_ADDU "%[src], %[src], %[stride] \n\t"
299  MMI_ULDC1(%[ftmp1], %[src], 0x00)
300  PTR_ADDU "%[src], %[src], %[stride] \n\t"
301  MMI_LDC1(%[ftmp2], %[dst], 0x00)
302  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
303  MMI_LDC1(%[ftmp3], %[dst], 0x00)
304  PTR_SUBU "%[dst], %[dst], %[stride] \n\t"
305  "pavgb %[ftmp0], %[ftmp0], %[ftmp2] \n\t"
306  "pavgb %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
307  MMI_SDC1(%[ftmp0], %[dst], 0x00)
308  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
309  MMI_SDC1(%[ftmp1], %[dst], 0x00)
310  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
311  "addi %[h], %[h], -0x02 \n\t"
312  "bnez %[h], 1b \n\t"
313  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
314  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
315  [dst]"+&r"(dst), [src]"+&r"(src),
316  [h]"+&r"(h)
317  : [stride]"r"((mips_reg)stride)
318  : "memory"
319  );
320  } else if (x && y) {
321  /* x!=0, y!=0 */
322  D.i = x * y;
323  B.i = (x << 3) - D.i;
324  C.i = (y << 3) - D.i;
325  A.i = 64 - D.i - B.i - C.i;
326  __asm__ volatile (
327  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
328  "pshufh %[A], %[A], %[ftmp0] \n\t"
329  "pshufh %[B], %[B], %[ftmp0] \n\t"
330  "mtc1 %[tmp0], %[ftmp9] \n\t"
331  "pshufh %[C], %[C], %[ftmp0] \n\t"
332  "pshufh %[D], %[D], %[ftmp0] \n\t"
333 
334  "1: \n\t"
335  MMI_ULDC1(%[ftmp1], %[src], 0x00)
336  MMI_ULDC1(%[ftmp2], %[src], 0x01)
337  PTR_ADDU "%[src], %[src], %[stride] \n\t"
338  MMI_ULDC1(%[ftmp3], %[src], 0x00)
339  MMI_ULDC1(%[ftmp4], %[src], 0x01)
340  "addi %[h], %[h], -0x01 \n\t"
341 
342  "punpcklbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t"
343  "punpckhbh %[ftmp6], %[ftmp1], %[ftmp0] \n\t"
344  "punpcklbh %[ftmp7], %[ftmp2], %[ftmp0] \n\t"
345  "punpckhbh %[ftmp8], %[ftmp2], %[ftmp0] \n\t"
346  "pmullh %[ftmp5], %[ftmp5], %[A] \n\t"
347  "pmullh %[ftmp7], %[ftmp7], %[B] \n\t"
348  "paddh %[ftmp1], %[ftmp5], %[ftmp7] \n\t"
349  "pmullh %[ftmp6], %[ftmp6], %[A] \n\t"
350  "pmullh %[ftmp8], %[ftmp8], %[B] \n\t"
351  "paddh %[ftmp2], %[ftmp6], %[ftmp8] \n\t"
352 
353  "punpcklbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t"
354  "punpckhbh %[ftmp6], %[ftmp3], %[ftmp0] \n\t"
355  "punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t"
356  "punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t"
357  "pmullh %[ftmp5], %[ftmp5], %[C] \n\t"
358  "pmullh %[ftmp7], %[ftmp7], %[D] \n\t"
359  "paddh %[ftmp3], %[ftmp5], %[ftmp7] \n\t"
360  "pmullh %[ftmp6], %[ftmp6], %[C] \n\t"
361  "pmullh %[ftmp8], %[ftmp8], %[D] \n\t"
362  "paddh %[ftmp4], %[ftmp6], %[ftmp8] \n\t"
363 
364  "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
365  "paddh %[ftmp2], %[ftmp2], %[ftmp4] \n\t"
366  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
367  "paddh %[ftmp2], %[ftmp2], %[ff_pw_32] \n\t"
368  "psrlh %[ftmp1], %[ftmp1], %[ftmp9] \n\t"
369  "psrlh %[ftmp2], %[ftmp2], %[ftmp9] \n\t"
370  "packushb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
371  MMI_LDC1(%[ftmp2], %[dst], 0x00)
372  "pavgb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
373  MMI_SDC1(%[ftmp1], %[dst], 0x00)
374  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
375  "bnez %[h], 1b \n\t"
376  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
377  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
378  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
379  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
380  [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]),
381  [dst]"+&r"(dst), [src]"+&r"(src),
382  [h]"+&r"(h)
383  : [stride]"r"((mips_reg)stride),[ff_pw_32]"f"(ff_pw_32.f),
384  [A]"f"(A.f), [B]"f"(B.f),
385  [C]"f"(C.f), [D]"f"(D.f),
386  [tmp0]"r"(0x06)
387  : "memory"
388  );
389  } else if (x) {
390  /* x!=0, y==0 */
391  E.i = x << 3;
392  A.i = 64 - E.i;
393  __asm__ volatile (
394  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
395  "pshufh %[A], %[A], %[ftmp0] \n\t"
396  "pshufh %[E], %[E], %[ftmp0] \n\t"
397  "mtc1 %[tmp0], %[ftmp7] \n\t"
398 
399  "1: \n\t"
400  MMI_ULDC1(%[ftmp1], %[src], 0x00)
401  MMI_ULDC1(%[ftmp2], %[src], 0x01)
402  PTR_ADDU "%[src], %[src], %[stride] \n\t"
403  "addi %[h], %[h], -0x01 \n\t"
404 
405  "punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
406  "punpckhbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t"
407  "punpcklbh %[ftmp5], %[ftmp2], %[ftmp0] \n\t"
408  "punpckhbh %[ftmp6], %[ftmp2], %[ftmp0] \n\t"
409  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
410  "pmullh %[ftmp5], %[ftmp5], %[E] \n\t"
411  "paddh %[ftmp1], %[ftmp3], %[ftmp5] \n\t"
412  "pmullh %[ftmp4], %[ftmp4], %[A] \n\t"
413  "pmullh %[ftmp6], %[ftmp6], %[E] \n\t"
414  "paddh %[ftmp2], %[ftmp4], %[ftmp6] \n\t"
415 
416  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
417  "paddh %[ftmp2], %[ftmp2], %[ff_pw_32] \n\t"
418  "psrlh %[ftmp1], %[ftmp1], %[ftmp7] \n\t"
419  "psrlh %[ftmp2], %[ftmp2], %[ftmp7] \n\t"
420  "packushb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
421  MMI_LDC1(%[ftmp2], %[dst], 0x00)
422  "pavgb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
423  MMI_SDC1(%[ftmp1], %[dst], 0x00)
424  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
425  "bnez %[h], 1b \n\t"
426  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
427  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
428  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
429  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
430  [dst]"+&r"(dst), [src]"+&r"(src),
431  [h]"+&r"(h)
432  : [stride]"r"((mips_reg)stride),
433  [ff_pw_32]"f"(ff_pw_32.f), [tmp0]"r"(0x06),
434  [A]"f"(A.f), [E]"f"(E.f)
435  : "memory"
436  );
437  } else {
438  /* x==0, y!=0 */
439  E.i = y << 3;
440  A.i = 64 - E.i;
441  __asm__ volatile (
442  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
443  "pshufh %[A], %[A], %[ftmp0] \n\t"
444  "pshufh %[E], %[E], %[ftmp0] \n\t"
445  "mtc1 %[tmp0], %[ftmp7] \n\t"
446 
447  "1: \n\t"
448  MMI_ULDC1(%[ftmp1], %[src], 0x00)
449  PTR_ADDU "%[src], %[src], %[stride] \n\t"
450  MMI_ULDC1(%[ftmp2], %[src], 0x00)
451  "addi %[h], %[h], -0x01 \n\t"
452 
453  "punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
454  "punpckhbh %[ftmp4], %[ftmp1], %[ftmp0] \n\t"
455  "punpcklbh %[ftmp5], %[ftmp2], %[ftmp0] \n\t"
456  "punpckhbh %[ftmp6], %[ftmp2], %[ftmp0] \n\t"
457  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
458  "pmullh %[ftmp5], %[ftmp5], %[E] \n\t"
459  "paddh %[ftmp1], %[ftmp3], %[ftmp5] \n\t"
460  "pmullh %[ftmp4], %[ftmp4], %[A] \n\t"
461  "pmullh %[ftmp6], %[ftmp6], %[E] \n\t"
462  "paddh %[ftmp2], %[ftmp4], %[ftmp6] \n\t"
463 
464  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
465  "paddh %[ftmp2], %[ftmp2], %[ff_pw_32] \n\t"
466  "psrlh %[ftmp1], %[ftmp1], %[ftmp7] \n\t"
467  "psrlh %[ftmp2], %[ftmp2], %[ftmp7] \n\t"
468  "packushb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
469  MMI_LDC1(%[ftmp2], %[dst], 0x00)
470  "pavgb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
471  MMI_SDC1(%[ftmp1], %[dst], 0x00)
472  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
473  "bnez %[h], 1b \n\t"
474  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
475  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
476  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
477  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
478  [dst]"+&r"(dst), [src]"+&r"(src),
479  [h]"+&r"(h)
480  : [stride]"r"((mips_reg)stride),
481  [ff_pw_32]"f"(ff_pw_32.f), [tmp0]"r"(0x06),
482  [A]"f"(A.f), [E]"f"(E.f)
483  : "memory"
484  );
485  }
486 }
487 
488 void ff_put_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
489  int h, int x, int y)
490 {
491  double ftmp[8];
492  mips_reg addr[1];
493  union mmi_intfloat64 A, B, C, D, E;
494  DECLARE_VAR_LOW32;
495  A.i = (8 - x) * (8 - y);
496  B.i = x * (8 - y);
497  C.i = (8 - x) * y;
498  D.i = x * y;
499  E.i = B.i + C.i;
500 
501  if (D.i) {
502  __asm__ volatile (
503  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
504  "pshufh %[A], %[A], %[ftmp0] \n\t"
505  "pshufh %[B], %[B], %[ftmp0] \n\t"
506  "mtc1 %[tmp0], %[ftmp7] \n\t"
507  "pshufh %[C], %[C], %[ftmp0] \n\t"
508  "pshufh %[D], %[D], %[ftmp0] \n\t"
509 
510  "1: \n\t"
511  MMI_ULWC1(%[ftmp1], %[src], 0x00)
512  MMI_ULWC1(%[ftmp2], %[src], 0x01)
513  PTR_ADDU "%[src], %[src], %[stride] \n\t"
514  MMI_ULWC1(%[ftmp3], %[src], 0x00)
515  MMI_ULWC1(%[ftmp4], %[src], 0x01)
516 
517  "punpcklbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t"
518  "punpcklbh %[ftmp6], %[ftmp2], %[ftmp0] \n\t"
519  "pmullh %[ftmp5], %[ftmp5], %[A] \n\t"
520  "pmullh %[ftmp6], %[ftmp6], %[B] \n\t"
521  "paddh %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
522  "punpcklbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t"
523  "punpcklbh %[ftmp6], %[ftmp4], %[ftmp0] \n\t"
524  "pmullh %[ftmp5], %[ftmp5], %[C] \n\t"
525  "pmullh %[ftmp6], %[ftmp6], %[D] \n\t"
526  "paddh %[ftmp2], %[ftmp5], %[ftmp6] \n\t"
527  "paddh %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
528  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
529  "psrlh %[ftmp1], %[ftmp1], %[ftmp7] \n\t"
530  "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
531 
532  "addi %[h], %[h], -0x01 \n\t"
533  MMI_SWC1(%[ftmp1], %[dst], 0x00)
534  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
535  "bnez %[h], 1b \n\t"
536  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
537  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
538  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
539  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
540  RESTRICT_ASM_LOW32
541  [dst]"+&r"(dst), [src]"+&r"(src),
542  [h]"+&r"(h)
543  : [stride]"r"((mips_reg)stride),[ff_pw_32]"f"(ff_pw_32.f),
544  [A]"f"(A.f), [B]"f"(B.f),
545  [C]"f"(C.f), [D]"f"(D.f),
546  [tmp0]"r"(0x06)
547  : "memory"
548  );
549  } else if (E.i) {
550  const int step = C.i ? stride : 1;
551  __asm__ volatile (
552  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
553  "pshufh %[A], %[A], %[ftmp0] \n\t"
554  "pshufh %[E], %[E], %[ftmp0] \n\t"
555  "mtc1 %[tmp0], %[ftmp5] \n\t"
556 
557  "1: \n\t"
558  MMI_ULWC1(%[ftmp1], %[src], 0x00)
559  PTR_ADDU "%[addr0], %[src], %[step] \n\t"
560  MMI_ULWC1(%[ftmp2], %[addr0], 0x00)
561  PTR_ADDU "%[src], %[src], %[stride] \n\t"
562  "addi %[h], %[h], -0x01 \n\t"
563  "punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
564  "punpcklbh %[ftmp4], %[ftmp2], %[ftmp0] \n\t"
565  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
566  "pmullh %[ftmp4], %[ftmp4], %[E] \n\t"
567  "paddh %[ftmp1], %[ftmp3], %[ftmp4] \n\t"
568  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
569  "psrlh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
570  "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
571  MMI_SWC1(%[ftmp1], %[dst], 0x00)
572  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
573  "bnez %[h], 1b \n\t"
574  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
575  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
576  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
577  RESTRICT_ASM_LOW32
578  [addr0]"=&r"(addr[0]),
579  [dst]"+&r"(dst), [src]"+&r"(src),
580  [h]"+&r"(h)
581  : [stride]"r"((mips_reg)stride),[step]"r"((mips_reg)step),
582  [ff_pw_32]"f"(ff_pw_32.f), [tmp0]"r"(0x06),
583  [A]"f"(A.f), [E]"f"(E.f)
584  : "memory"
585  );
586  } else {
587  __asm__ volatile (
588  "1: \n\t"
589  MMI_ULWC1(%[ftmp0], %[src], 0x00)
590  PTR_ADDU "%[src], %[src], %[stride] \n\t"
591  MMI_ULWC1(%[ftmp1], %[src], 0x00)
592  PTR_ADDU "%[src], %[src], %[stride] \n\t"
593  "addi %[h], %[h], -0x02 \n\t"
594  MMI_SWC1(%[ftmp0], %[dst], 0x00)
595  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
596  MMI_SWC1(%[ftmp1], %[dst], 0x00)
597  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
598  "bnez %[h], 1b \n\t"
599  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
600  [dst]"+&r"(dst), [src]"+&r"(src),
601  RESTRICT_ASM_LOW32
602  [h]"+&r"(h)
603  : [stride]"r"((mips_reg)stride)
604  : "memory"
605  );
606  }
607 }
608 
609 void ff_avg_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
610  int h, int x, int y)
611 {
612  double ftmp[8];
613  mips_reg addr[1];
614  union mmi_intfloat64 A, B, C, D, E;
615  DECLARE_VAR_LOW32;
616  A.i = (8 - x) *(8 - y);
617  B.i = x * (8 - y);
618  C.i = (8 - x) * y;
619  D.i = x * y;
620  E.i = B.i + C.i;
621 
622  if (D.i) {
623  __asm__ volatile (
624  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
625  "pshufh %[A], %[A], %[ftmp0] \n\t"
626  "pshufh %[B], %[B], %[ftmp0] \n\t"
627  "mtc1 %[tmp0], %[ftmp7] \n\t"
628  "pshufh %[C], %[C], %[ftmp0] \n\t"
629  "pshufh %[D], %[D], %[ftmp0] \n\t"
630 
631  "1: \n\t"
632  MMI_ULWC1(%[ftmp1], %[src], 0x00)
633  MMI_ULWC1(%[ftmp2], %[src], 0x01)
634  PTR_ADDU "%[src], %[src], %[stride] \n\t"
635  MMI_ULWC1(%[ftmp3], %[src], 0x00)
636  MMI_ULWC1(%[ftmp4], %[src], 0x01)
637 
638  "punpcklbh %[ftmp5], %[ftmp1], %[ftmp0] \n\t"
639  "punpcklbh %[ftmp6], %[ftmp2], %[ftmp0] \n\t"
640  "pmullh %[ftmp5], %[ftmp5], %[A] \n\t"
641  "pmullh %[ftmp6], %[ftmp6], %[B] \n\t"
642  "paddh %[ftmp1], %[ftmp5], %[ftmp6] \n\t"
643  "punpcklbh %[ftmp5], %[ftmp3], %[ftmp0] \n\t"
644  "punpcklbh %[ftmp6], %[ftmp4], %[ftmp0] \n\t"
645  "pmullh %[ftmp5], %[ftmp5], %[C] \n\t"
646  "pmullh %[ftmp6], %[ftmp6], %[D] \n\t"
647  "paddh %[ftmp2], %[ftmp5], %[ftmp6] \n\t"
648  "paddh %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
649  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
650  "psrlh %[ftmp1], %[ftmp1], %[ftmp7] \n\t"
651  "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
652  MMI_LWC1(%[ftmp2], %[dst], 0x00)
653  "pavgb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
654 
655  "addi %[h], %[h], -0x01 \n\t"
656  MMI_SWC1(%[ftmp1], %[dst], 0x00)
657  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
658  "bnez %[h], 1b \n\t"
659  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
660  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
661  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
662  [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]),
663  RESTRICT_ASM_LOW32
664  [dst]"+&r"(dst), [src]"+&r"(src),
665  [h]"+&r"(h)
666  : [stride]"r"((mips_reg)stride),[ff_pw_32]"f"(ff_pw_32.f),
667  [A]"f"(A.f), [B]"f"(B.f),
668  [C]"f"(C.f), [D]"f"(D.f),
669  [tmp0]"r"(0x06)
670  : "memory"
671  );
672  } else if (E.i) {
673  const int step = C.i ? stride : 1;
674  __asm__ volatile (
675  "pxor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
676  "pshufh %[A], %[A], %[ftmp0] \n\t"
677  "pshufh %[E], %[E], %[ftmp0] \n\t"
678  "mtc1 %[tmp0], %[ftmp5] \n\t"
679 
680  "1: \n\t"
681  MMI_ULWC1(%[ftmp1], %[src], 0x00)
682  PTR_ADDU "%[addr0], %[src], %[step] \n\t"
683  MMI_ULWC1(%[ftmp2], %[addr0], 0x00)
684  PTR_ADDU "%[src], %[src], %[stride] \n\t"
685  "addi %[h], %[h], -0x01 \n\t"
686  "punpcklbh %[ftmp3], %[ftmp1], %[ftmp0] \n\t"
687  "punpcklbh %[ftmp4], %[ftmp2], %[ftmp0] \n\t"
688  "pmullh %[ftmp3], %[ftmp3], %[A] \n\t"
689  "pmullh %[ftmp4], %[ftmp4], %[E] \n\t"
690  "paddh %[ftmp1], %[ftmp3], %[ftmp4] \n\t"
691  "paddh %[ftmp1], %[ftmp1], %[ff_pw_32] \n\t"
692  "psrlh %[ftmp1], %[ftmp1], %[ftmp5] \n\t"
693  "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
694  MMI_LWC1(%[ftmp2], %[dst], 0x00)
695  "pavgb %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
696  MMI_SWC1(%[ftmp1], %[dst], 0x00)
697  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
698  "bnez %[h], 1b \n\t"
699  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
700  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
701  [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]),
702  RESTRICT_ASM_LOW32
703  [addr0]"=&r"(addr[0]),
704  [dst]"+&r"(dst), [src]"+&r"(src),
705  [h]"+&r"(h)
706  : [stride]"r"((mips_reg)stride),[step]"r"((mips_reg)step),
707  [ff_pw_32]"f"(ff_pw_32.f), [tmp0]"r"(0x06),
708  [A]"f"(A.f), [E]"f"(E.f)
709  : "memory"
710  );
711  } else {
712  __asm__ volatile (
713  "1: \n\t"
714  MMI_ULWC1(%[ftmp0], %[src], 0x00)
715  PTR_ADDU "%[src], %[src], %[stride] \n\t"
716  MMI_ULWC1(%[ftmp1], %[src], 0x00)
717  PTR_ADDU "%[src], %[src], %[stride] \n\t"
718  "addi %[h], %[h], -0x02 \n\t"
719  MMI_LWC1(%[ftmp2], %[dst], 0x00)
720  "pavgb %[ftmp0], %[ftmp0], %[ftmp2] \n\t"
721  MMI_SWC1(%[ftmp0], %[dst], 0x00)
722  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
723  MMI_LWC1(%[ftmp3], %[dst], 0x00)
724  "pavgb %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
725  MMI_SWC1(%[ftmp1], %[dst], 0x00)
726  PTR_ADDU "%[dst], %[dst], %[stride] \n\t"
727  "bnez %[h], 1b \n\t"
728  : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
729  [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
730  [dst]"+&r"(dst), [src]"+&r"(src),
731  RESTRICT_ASM_LOW32
732  [h]"+&r"(h)
733  : [stride]"r"((mips_reg)stride)
734  : "memory"
735  );
736  }
737 }
ff_avg_h264_chroma_mc4_mmi
void ff_avg_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
Definition: h264chroma_mmi.c:609
stride
int stride
Definition: mace.c:144
ff_put_h264_chroma_mc8_mmi
void ff_put_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
Definition: h264chroma_mmi.c:29
h264chroma_mips.h
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
mips_reg
#define mips_reg
Definition: asmdefs.h:46
D
D(D(float, sse)
Definition: rematrix_init.c:29
A
#define A(x)
Definition: vp56_arith.h:28
constants.h
ff_avg_h264_chroma_mc8_mmi
void ff_avg_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
Definition: h264chroma_mmi.c:286
mmiutils.h
ff_pw_32
const union av_intfloat64 ff_pw_32
Definition: constants.c:42
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
E
#define E
Definition: avdct.c:32
src
#define src
Definition: vp8dsp.c:255
PTR_SUBU
#define PTR_SUBU
Definition: asmdefs.h:52
ff_put_h264_chroma_mc4_mmi
void ff_put_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
Definition: h264chroma_mmi.c:488
av_intfloat64::f
double f
Definition: intfloat.h:34
__asm__
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
PTR_ADDU
#define PTR_ADDU
Definition: asmdefs.h:49
B
#define B
Definition: huffyuvdsp.h:32
mmi_intfloat64
Definition: asmdefs.h:103
h
h
Definition: vp9dsp_template.c:2038