FFmpeg
me_cmp.c
Go to the documentation of this file.
1 /*
2  * DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/internal.h"
25 #include "libavutil/mem_internal.h"
26 #include "avcodec.h"
27 #include "copy_block.h"
28 #include "simple_idct.h"
29 #include "me_cmp.h"
30 #include "mpegvideoenc.h"
31 #include "config.h"
32 #include "config_components.h"
33 
34 /* (i - 256) * (i - 256) */
35 const uint32_t ff_square_tab[512] = {
36  65536, 65025, 64516, 64009, 63504, 63001, 62500, 62001, 61504, 61009, 60516, 60025, 59536, 59049, 58564, 58081,
37  57600, 57121, 56644, 56169, 55696, 55225, 54756, 54289, 53824, 53361, 52900, 52441, 51984, 51529, 51076, 50625,
38  50176, 49729, 49284, 48841, 48400, 47961, 47524, 47089, 46656, 46225, 45796, 45369, 44944, 44521, 44100, 43681,
39  43264, 42849, 42436, 42025, 41616, 41209, 40804, 40401, 40000, 39601, 39204, 38809, 38416, 38025, 37636, 37249,
40  36864, 36481, 36100, 35721, 35344, 34969, 34596, 34225, 33856, 33489, 33124, 32761, 32400, 32041, 31684, 31329,
41  30976, 30625, 30276, 29929, 29584, 29241, 28900, 28561, 28224, 27889, 27556, 27225, 26896, 26569, 26244, 25921,
42  25600, 25281, 24964, 24649, 24336, 24025, 23716, 23409, 23104, 22801, 22500, 22201, 21904, 21609, 21316, 21025,
43  20736, 20449, 20164, 19881, 19600, 19321, 19044, 18769, 18496, 18225, 17956, 17689, 17424, 17161, 16900, 16641,
44  16384, 16129, 15876, 15625, 15376, 15129, 14884, 14641, 14400, 14161, 13924, 13689, 13456, 13225, 12996, 12769,
45  12544, 12321, 12100, 11881, 11664, 11449, 11236, 11025, 10816, 10609, 10404, 10201, 10000, 9801, 9604, 9409,
46  9216, 9025, 8836, 8649, 8464, 8281, 8100, 7921, 7744, 7569, 7396, 7225, 7056, 6889, 6724, 6561,
47  6400, 6241, 6084, 5929, 5776, 5625, 5476, 5329, 5184, 5041, 4900, 4761, 4624, 4489, 4356, 4225,
48  4096, 3969, 3844, 3721, 3600, 3481, 3364, 3249, 3136, 3025, 2916, 2809, 2704, 2601, 2500, 2401,
49  2304, 2209, 2116, 2025, 1936, 1849, 1764, 1681, 1600, 1521, 1444, 1369, 1296, 1225, 1156, 1089,
50  1024, 961, 900, 841, 784, 729, 676, 625, 576, 529, 484, 441, 400, 361, 324, 289,
51  256, 225, 196, 169, 144, 121, 100, 81, 64, 49, 36, 25, 16, 9, 4, 1,
52  0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225,
53  256, 289, 324, 361, 400, 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961,
54  1024, 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, 2025, 2116, 2209,
55  2304, 2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136, 3249, 3364, 3481, 3600, 3721, 3844, 3969,
56  4096, 4225, 4356, 4489, 4624, 4761, 4900, 5041, 5184, 5329, 5476, 5625, 5776, 5929, 6084, 6241,
57  6400, 6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744, 7921, 8100, 8281, 8464, 8649, 8836, 9025,
58  9216, 9409, 9604, 9801, 10000, 10201, 10404, 10609, 10816, 11025, 11236, 11449, 11664, 11881, 12100, 12321,
59  12544, 12769, 12996, 13225, 13456, 13689, 13924, 14161, 14400, 14641, 14884, 15129, 15376, 15625, 15876, 16129,
60  16384, 16641, 16900, 17161, 17424, 17689, 17956, 18225, 18496, 18769, 19044, 19321, 19600, 19881, 20164, 20449,
61  20736, 21025, 21316, 21609, 21904, 22201, 22500, 22801, 23104, 23409, 23716, 24025, 24336, 24649, 24964, 25281,
62  25600, 25921, 26244, 26569, 26896, 27225, 27556, 27889, 28224, 28561, 28900, 29241, 29584, 29929, 30276, 30625,
63  30976, 31329, 31684, 32041, 32400, 32761, 33124, 33489, 33856, 34225, 34596, 34969, 35344, 35721, 36100, 36481,
64  36864, 37249, 37636, 38025, 38416, 38809, 39204, 39601, 40000, 40401, 40804, 41209, 41616, 42025, 42436, 42849,
65  43264, 43681, 44100, 44521, 44944, 45369, 45796, 46225, 46656, 47089, 47524, 47961, 48400, 48841, 49284, 49729,
66  50176, 50625, 51076, 51529, 51984, 52441, 52900, 53361, 53824, 54289, 54756, 55225, 55696, 56169, 56644, 57121,
67  57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025,
68 };
69 
70 static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
71  ptrdiff_t stride, int h)
72 {
73  int s = 0, i;
74  const uint32_t *sq = ff_square_tab + 256;
75 
76  for (i = 0; i < h; i++) {
77  s += sq[pix1[0] - pix2[0]];
78  s += sq[pix1[1] - pix2[1]];
79  s += sq[pix1[2] - pix2[2]];
80  s += sq[pix1[3] - pix2[3]];
81  pix1 += stride;
82  pix2 += stride;
83  }
84  return s;
85 }
86 
87 static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
88  ptrdiff_t stride, int h)
89 {
90  int s = 0, i;
91  const uint32_t *sq = ff_square_tab + 256;
92 
93  for (i = 0; i < h; i++) {
94  s += sq[pix1[0] - pix2[0]];
95  s += sq[pix1[1] - pix2[1]];
96  s += sq[pix1[2] - pix2[2]];
97  s += sq[pix1[3] - pix2[3]];
98  s += sq[pix1[4] - pix2[4]];
99  s += sq[pix1[5] - pix2[5]];
100  s += sq[pix1[6] - pix2[6]];
101  s += sq[pix1[7] - pix2[7]];
102  pix1 += stride;
103  pix2 += stride;
104  }
105  return s;
106 }
107 
108 static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
109  ptrdiff_t stride, int h)
110 {
111  int s = 0, i;
112  const uint32_t *sq = ff_square_tab + 256;
113 
114  for (i = 0; i < h; i++) {
115  s += sq[pix1[0] - pix2[0]];
116  s += sq[pix1[1] - pix2[1]];
117  s += sq[pix1[2] - pix2[2]];
118  s += sq[pix1[3] - pix2[3]];
119  s += sq[pix1[4] - pix2[4]];
120  s += sq[pix1[5] - pix2[5]];
121  s += sq[pix1[6] - pix2[6]];
122  s += sq[pix1[7] - pix2[7]];
123  s += sq[pix1[8] - pix2[8]];
124  s += sq[pix1[9] - pix2[9]];
125  s += sq[pix1[10] - pix2[10]];
126  s += sq[pix1[11] - pix2[11]];
127  s += sq[pix1[12] - pix2[12]];
128  s += sq[pix1[13] - pix2[13]];
129  s += sq[pix1[14] - pix2[14]];
130  s += sq[pix1[15] - pix2[15]];
131 
132  pix1 += stride;
133  pix2 += stride;
134  }
135  return s;
136 }
137 
138 static int sum_abs_dctelem_c(int16_t *block)
139 {
140  int sum = 0, i;
141 
142  for (i = 0; i < 64; i++)
143  sum += FFABS(block[i]);
144  return sum;
145 }
146 
147 #define avg2(a, b) (((a) + (b) + 1) >> 1)
148 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
149 
150 static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
151  ptrdiff_t stride, int h)
152 {
153  int s = 0, i;
154 
155  for (i = 0; i < h; i++) {
156  s += abs(pix1[0] - pix2[0]);
157  s += abs(pix1[1] - pix2[1]);
158  s += abs(pix1[2] - pix2[2]);
159  s += abs(pix1[3] - pix2[3]);
160  s += abs(pix1[4] - pix2[4]);
161  s += abs(pix1[5] - pix2[5]);
162  s += abs(pix1[6] - pix2[6]);
163  s += abs(pix1[7] - pix2[7]);
164  s += abs(pix1[8] - pix2[8]);
165  s += abs(pix1[9] - pix2[9]);
166  s += abs(pix1[10] - pix2[10]);
167  s += abs(pix1[11] - pix2[11]);
168  s += abs(pix1[12] - pix2[12]);
169  s += abs(pix1[13] - pix2[13]);
170  s += abs(pix1[14] - pix2[14]);
171  s += abs(pix1[15] - pix2[15]);
172  pix1 += stride;
173  pix2 += stride;
174  }
175  return s;
176 }
177 
178 static inline int pix_median_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
179  ptrdiff_t stride, int h)
180 {
181  int s = 0, i, j;
182 
183 #define V(x) (pix1[x] - pix2[x])
184 
185  s += abs(V(0));
186  s += abs(V(1) - V(0));
187  s += abs(V(2) - V(1));
188  s += abs(V(3) - V(2));
189  s += abs(V(4) - V(3));
190  s += abs(V(5) - V(4));
191  s += abs(V(6) - V(5));
192  s += abs(V(7) - V(6));
193  s += abs(V(8) - V(7));
194  s += abs(V(9) - V(8));
195  s += abs(V(10) - V(9));
196  s += abs(V(11) - V(10));
197  s += abs(V(12) - V(11));
198  s += abs(V(13) - V(12));
199  s += abs(V(14) - V(13));
200  s += abs(V(15) - V(14));
201 
202  pix1 += stride;
203  pix2 += stride;
204 
205  for (i = 1; i < h; i++) {
206  s += abs(V(0) - V(-stride));
207  for (j = 1; j < 16; j++)
208  s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
209  pix1 += stride;
210  pix2 += stride;
211 
212  }
213 #undef V
214  return s;
215 }
216 
217 static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
218  ptrdiff_t stride, int h)
219 {
220  int s = 0, i;
221 
222  for (i = 0; i < h; i++) {
223  s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
224  s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
225  s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
226  s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
227  s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
228  s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
229  s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
230  s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
231  s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
232  s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
233  s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
234  s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
235  s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
236  s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
237  s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
238  s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
239  pix1 += stride;
240  pix2 += stride;
241  }
242  return s;
243 }
244 
245 static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
246  ptrdiff_t stride, int h)
247 {
248  int s = 0, i;
249  uint8_t *pix3 = pix2 + stride;
250 
251  for (i = 0; i < h; i++) {
252  s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
253  s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
254  s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
255  s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
256  s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
257  s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
258  s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
259  s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
260  s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
261  s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
262  s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
263  s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
264  s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
265  s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
266  s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
267  s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
268  pix1 += stride;
269  pix2 += stride;
270  pix3 += stride;
271  }
272  return s;
273 }
274 
275 static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
276  ptrdiff_t stride, int h)
277 {
278  int s = 0, i;
279  uint8_t *pix3 = pix2 + stride;
280 
281  for (i = 0; i < h; i++) {
282  s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
283  s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
284  s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
285  s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
286  s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
287  s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
288  s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
289  s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
290  s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
291  s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
292  s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
293  s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
294  s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
295  s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
296  s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
297  s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
298  pix1 += stride;
299  pix2 += stride;
300  pix3 += stride;
301  }
302  return s;
303 }
304 
305 static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
306  ptrdiff_t stride, int h)
307 {
308  int s = 0, i;
309 
310  for (i = 0; i < h; i++) {
311  s += abs(pix1[0] - pix2[0]);
312  s += abs(pix1[1] - pix2[1]);
313  s += abs(pix1[2] - pix2[2]);
314  s += abs(pix1[3] - pix2[3]);
315  s += abs(pix1[4] - pix2[4]);
316  s += abs(pix1[5] - pix2[5]);
317  s += abs(pix1[6] - pix2[6]);
318  s += abs(pix1[7] - pix2[7]);
319  pix1 += stride;
320  pix2 += stride;
321  }
322  return s;
323 }
324 
325 static inline int pix_median_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
326  ptrdiff_t stride, int h)
327 {
328  int s = 0, i, j;
329 
330 #define V(x) (pix1[x] - pix2[x])
331 
332  s += abs(V(0));
333  s += abs(V(1) - V(0));
334  s += abs(V(2) - V(1));
335  s += abs(V(3) - V(2));
336  s += abs(V(4) - V(3));
337  s += abs(V(5) - V(4));
338  s += abs(V(6) - V(5));
339  s += abs(V(7) - V(6));
340 
341  pix1 += stride;
342  pix2 += stride;
343 
344  for (i = 1; i < h; i++) {
345  s += abs(V(0) - V(-stride));
346  for (j = 1; j < 8; j++)
347  s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
348  pix1 += stride;
349  pix2 += stride;
350 
351  }
352 #undef V
353  return s;
354 }
355 
356 static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
357  ptrdiff_t stride, int h)
358 {
359  int s = 0, i;
360 
361  for (i = 0; i < h; i++) {
362  s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
363  s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
364  s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
365  s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
366  s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
367  s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
368  s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
369  s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
370  pix1 += stride;
371  pix2 += stride;
372  }
373  return s;
374 }
375 
376 static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
377  ptrdiff_t stride, int h)
378 {
379  int s = 0, i;
380  uint8_t *pix3 = pix2 + stride;
381 
382  for (i = 0; i < h; i++) {
383  s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
384  s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
385  s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
386  s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
387  s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
388  s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
389  s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
390  s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
391  pix1 += stride;
392  pix2 += stride;
393  pix3 += stride;
394  }
395  return s;
396 }
397 
398 static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
399  ptrdiff_t stride, int h)
400 {
401  int s = 0, i;
402  uint8_t *pix3 = pix2 + stride;
403 
404  for (i = 0; i < h; i++) {
405  s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
406  s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
407  s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
408  s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
409  s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
410  s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
411  s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
412  s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
413  pix1 += stride;
414  pix2 += stride;
415  pix3 += stride;
416  }
417  return s;
418 }
419 
420 static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
421  ptrdiff_t stride, int h)
422 {
423  int score1 = 0, score2 = 0, x, y;
424 
425  for (y = 0; y < h; y++) {
426  for (x = 0; x < 16; x++)
427  score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
428  if (y + 1 < h) {
429  for (x = 0; x < 15; x++)
430  score2 += FFABS(s1[x] - s1[x + stride] -
431  s1[x + 1] + s1[x + stride + 1]) -
432  FFABS(s2[x] - s2[x + stride] -
433  s2[x + 1] + s2[x + stride + 1]);
434  }
435  s1 += stride;
436  s2 += stride;
437  }
438 
439  if (c)
440  return score1 + FFABS(score2) * c->avctx->nsse_weight;
441  else
442  return score1 + FFABS(score2) * 8;
443 }
444 
445 static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
446  ptrdiff_t stride, int h)
447 {
448  int score1 = 0, score2 = 0, x, y;
449 
450  for (y = 0; y < h; y++) {
451  for (x = 0; x < 8; x++)
452  score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
453  if (y + 1 < h) {
454  for (x = 0; x < 7; x++)
455  score2 += FFABS(s1[x] - s1[x + stride] -
456  s1[x + 1] + s1[x + stride + 1]) -
457  FFABS(s2[x] - s2[x + stride] -
458  s2[x + 1] + s2[x + stride + 1]);
459  }
460  s1 += stride;
461  s2 += stride;
462  }
463 
464  if (c)
465  return score1 + FFABS(score2) * c->avctx->nsse_weight;
466  else
467  return score1 + FFABS(score2) * 8;
468 }
469 
470 static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
471  ptrdiff_t stride, int h)
472 {
473  return 0;
474 }
475 
477 {
478  int i;
479 
480  memset(cmp, 0, sizeof(void *) * 6);
481 
482  for (i = 0; i < 6; i++) {
483  switch (type & 0xFF) {
484  case FF_CMP_SAD:
485  cmp[i] = c->sad[i];
486  break;
487  case FF_CMP_MEDIAN_SAD:
488  cmp[i] = c->median_sad[i];
489  break;
490  case FF_CMP_SATD:
491  cmp[i] = c->hadamard8_diff[i];
492  break;
493  case FF_CMP_SSE:
494  cmp[i] = c->sse[i];
495  break;
496  case FF_CMP_DCT:
497  cmp[i] = c->dct_sad[i];
498  break;
499  case FF_CMP_DCT264:
500  cmp[i] = c->dct264_sad[i];
501  break;
502  case FF_CMP_DCTMAX:
503  cmp[i] = c->dct_max[i];
504  break;
505  case FF_CMP_PSNR:
506  cmp[i] = c->quant_psnr[i];
507  break;
508  case FF_CMP_BIT:
509  cmp[i] = c->bit[i];
510  break;
511  case FF_CMP_RD:
512  cmp[i] = c->rd[i];
513  break;
514  case FF_CMP_VSAD:
515  cmp[i] = c->vsad[i];
516  break;
517  case FF_CMP_VSSE:
518  cmp[i] = c->vsse[i];
519  break;
520  case FF_CMP_ZERO:
521  cmp[i] = zero_cmp;
522  break;
523  case FF_CMP_NSSE:
524  cmp[i] = c->nsse[i];
525  break;
526 #if CONFIG_DWT
527  case FF_CMP_W53:
528  cmp[i]= c->w53[i];
529  break;
530  case FF_CMP_W97:
531  cmp[i]= c->w97[i];
532  break;
533 #endif
534  default:
536  "internal error in cmp function selection\n");
537  }
538  }
539 }
540 
541 #define BUTTERFLY2(o1, o2, i1, i2) \
542  o1 = (i1) + (i2); \
543  o2 = (i1) - (i2);
544 
545 #define BUTTERFLY1(x, y) \
546  { \
547  int a, b; \
548  a = x; \
549  b = y; \
550  x = a + b; \
551  y = a - b; \
552  }
553 
554 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
555 
556 static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
557  uint8_t *src, ptrdiff_t stride, int h)
558 {
559  int i, temp[64], sum = 0;
560 
561  av_assert2(h == 8);
562 
563  for (i = 0; i < 8; i++) {
564  // FIXME: try pointer walks
565  BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
566  src[stride * i + 0] - dst[stride * i + 0],
567  src[stride * i + 1] - dst[stride * i + 1]);
568  BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
569  src[stride * i + 2] - dst[stride * i + 2],
570  src[stride * i + 3] - dst[stride * i + 3]);
571  BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
572  src[stride * i + 4] - dst[stride * i + 4],
573  src[stride * i + 5] - dst[stride * i + 5]);
574  BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
575  src[stride * i + 6] - dst[stride * i + 6],
576  src[stride * i + 7] - dst[stride * i + 7]);
577 
578  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
579  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
580  BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
581  BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
582 
583  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
584  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
585  BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
586  BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
587  }
588 
589  for (i = 0; i < 8; i++) {
590  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
591  BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
592  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
593  BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
594 
595  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
596  BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
597  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
598  BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
599 
600  sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
601  BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
602  BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
603  BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
604  }
605  return sum;
606 }
607 
608 static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
609  uint8_t *dummy, ptrdiff_t stride, int h)
610 {
611  int i, temp[64], sum = 0;
612 
613  av_assert2(h == 8);
614 
615  for (i = 0; i < 8; i++) {
616  // FIXME: try pointer walks
617  BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
618  src[stride * i + 0], src[stride * i + 1]);
619  BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
620  src[stride * i + 2], src[stride * i + 3]);
621  BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
622  src[stride * i + 4], src[stride * i + 5]);
623  BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
624  src[stride * i + 6], src[stride * i + 7]);
625 
626  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
627  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
628  BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
629  BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
630 
631  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
632  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
633  BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
634  BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
635  }
636 
637  for (i = 0; i < 8; i++) {
638  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
639  BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
640  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
641  BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
642 
643  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
644  BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
645  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
646  BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
647 
648  sum +=
649  BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
650  + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
651  + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
652  + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
653  }
654 
655  sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
656 
657  return sum;
658 }
659 
660 static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
661  uint8_t *src2, ptrdiff_t stride, int h)
662 {
663  LOCAL_ALIGNED_16(int16_t, temp, [64]);
664 
665  av_assert2(h == 8);
666 
667  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
668  s->fdsp.fdct(temp);
669  return s->mecc.sum_abs_dctelem(temp);
670 }
671 
672 #if CONFIG_GPL
673 #define DCT8_1D \
674  { \
675  const int s07 = SRC(0) + SRC(7); \
676  const int s16 = SRC(1) + SRC(6); \
677  const int s25 = SRC(2) + SRC(5); \
678  const int s34 = SRC(3) + SRC(4); \
679  const int a0 = s07 + s34; \
680  const int a1 = s16 + s25; \
681  const int a2 = s07 - s34; \
682  const int a3 = s16 - s25; \
683  const int d07 = SRC(0) - SRC(7); \
684  const int d16 = SRC(1) - SRC(6); \
685  const int d25 = SRC(2) - SRC(5); \
686  const int d34 = SRC(3) - SRC(4); \
687  const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
688  const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
689  const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
690  const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
691  DST(0, a0 + a1); \
692  DST(1, a4 + (a7 >> 2)); \
693  DST(2, a2 + (a3 >> 1)); \
694  DST(3, a5 + (a6 >> 2)); \
695  DST(4, a0 - a1); \
696  DST(5, a6 - (a5 >> 2)); \
697  DST(6, (a2 >> 1) - a3); \
698  DST(7, (a4 >> 2) - a7); \
699  }
700 
701 static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
702  uint8_t *src2, ptrdiff_t stride, int h)
703 {
704  int16_t dct[8][8];
705  int i, sum = 0;
706 
707  s->pdsp.diff_pixels_unaligned(dct[0], src1, src2, stride);
708 
709 #define SRC(x) dct[i][x]
710 #define DST(x, v) dct[i][x] = v
711  for (i = 0; i < 8; i++)
712  DCT8_1D
713 #undef SRC
714 #undef DST
715 
716 #define SRC(x) dct[x][i]
717 #define DST(x, v) sum += FFABS(v)
718  for (i = 0; i < 8; i++)
719  DCT8_1D
720 #undef SRC
721 #undef DST
722  return sum;
723 }
724 #endif
725 
726 static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
727  uint8_t *src2, ptrdiff_t stride, int h)
728 {
729  LOCAL_ALIGNED_16(int16_t, temp, [64]);
730  int sum = 0, i;
731 
732  av_assert2(h == 8);
733 
734  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
735  s->fdsp.fdct(temp);
736 
737  for (i = 0; i < 64; i++)
738  sum = FFMAX(sum, FFABS(temp[i]));
739 
740  return sum;
741 }
742 
743 static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
744  uint8_t *src2, ptrdiff_t stride, int h)
745 {
746  LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
747  int16_t *const bak = temp + 64;
748  int sum = 0, i;
749 
750  av_assert2(h == 8);
751  s->mb_intra = 0;
752 
753  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
754 
755  memcpy(bak, temp, 64 * sizeof(int16_t));
756 
757  s->block_last_index[0 /* FIXME */] =
758  s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
759  s->dct_unquantize_inter(s, temp, 0, s->qscale);
761 
762  for (i = 0; i < 64; i++)
763  sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
764 
765  return sum;
766 }
767 
768 static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
769  ptrdiff_t stride, int h)
770 {
771  const uint8_t *scantable = s->intra_scantable.permutated;
772  LOCAL_ALIGNED_16(int16_t, temp, [64]);
773  LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
774  LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
775  int i, last, run, bits, level, distortion, start_i;
776  const int esc_length = s->ac_esc_length;
777  uint8_t *length, *last_length;
778 
779  av_assert2(h == 8);
780 
781  copy_block8(lsrc1, src1, 8, stride, 8);
782  copy_block8(lsrc2, src2, 8, stride, 8);
783 
784  s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8);
785 
786  s->block_last_index[0 /* FIXME */] =
787  last =
788  s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
789 
790  bits = 0;
791 
792  if (s->mb_intra) {
793  start_i = 1;
794  length = s->intra_ac_vlc_length;
795  last_length = s->intra_ac_vlc_last_length;
796  bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
797  } else {
798  start_i = 0;
799  length = s->inter_ac_vlc_length;
800  last_length = s->inter_ac_vlc_last_length;
801  }
802 
803  if (last >= start_i) {
804  run = 0;
805  for (i = start_i; i < last; i++) {
806  int j = scantable[i];
807  level = temp[j];
808 
809  if (level) {
810  level += 64;
811  if ((level & (~127)) == 0)
812  bits += length[UNI_AC_ENC_INDEX(run, level)];
813  else
814  bits += esc_length;
815  run = 0;
816  } else
817  run++;
818  }
819  i = scantable[last];
820 
821  level = temp[i] + 64;
822 
823  av_assert2(level - 64);
824 
825  if ((level & (~127)) == 0) {
826  bits += last_length[UNI_AC_ENC_INDEX(run, level)];
827  } else
828  bits += esc_length;
829  }
830 
831  if (last >= 0) {
832  if (s->mb_intra)
833  s->dct_unquantize_intra(s, temp, 0, s->qscale);
834  else
835  s->dct_unquantize_inter(s, temp, 0, s->qscale);
836  }
837 
838  s->idsp.idct_add(lsrc2, 8, temp);
839 
840  distortion = s->mecc.sse[1](NULL, lsrc2, lsrc1, 8, 8);
841 
842  return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
843 }
844 
845 static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
846  ptrdiff_t stride, int h)
847 {
848  const uint8_t *scantable = s->intra_scantable.permutated;
849  LOCAL_ALIGNED_16(int16_t, temp, [64]);
850  int i, last, run, bits, level, start_i;
851  const int esc_length = s->ac_esc_length;
852  uint8_t *length, *last_length;
853 
854  av_assert2(h == 8);
855 
856  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
857 
858  s->block_last_index[0 /* FIXME */] =
859  last =
860  s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
861 
862  bits = 0;
863 
864  if (s->mb_intra) {
865  start_i = 1;
866  length = s->intra_ac_vlc_length;
867  last_length = s->intra_ac_vlc_last_length;
868  bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
869  } else {
870  start_i = 0;
871  length = s->inter_ac_vlc_length;
872  last_length = s->inter_ac_vlc_last_length;
873  }
874 
875  if (last >= start_i) {
876  run = 0;
877  for (i = start_i; i < last; i++) {
878  int j = scantable[i];
879  level = temp[j];
880 
881  if (level) {
882  level += 64;
883  if ((level & (~127)) == 0)
884  bits += length[UNI_AC_ENC_INDEX(run, level)];
885  else
886  bits += esc_length;
887  run = 0;
888  } else
889  run++;
890  }
891  i = scantable[last];
892 
893  level = temp[i] + 64;
894 
895  av_assert2(level - 64);
896 
897  if ((level & (~127)) == 0)
898  bits += last_length[UNI_AC_ENC_INDEX(run, level)];
899  else
900  bits += esc_length;
901  }
902 
903  return bits;
904 }
905 
906 #define VSAD_INTRA(size) \
907 static int vsad_intra ## size ## _c(MpegEncContext *c, \
908  uint8_t *s, uint8_t *dummy, \
909  ptrdiff_t stride, int h) \
910 { \
911  int score = 0, x, y; \
912  \
913  for (y = 1; y < h; y++) { \
914  for (x = 0; x < size; x += 4) { \
915  score += FFABS(s[x] - s[x + stride]) + \
916  FFABS(s[x + 1] - s[x + stride + 1]) + \
917  FFABS(s[x + 2] - s[x + 2 + stride]) + \
918  FFABS(s[x + 3] - s[x + 3 + stride]); \
919  } \
920  s += stride; \
921  } \
922  \
923  return score; \
924 }
925 VSAD_INTRA(8)
926 VSAD_INTRA(16)
927 
928 #define VSAD(size) \
929 static int vsad ## size ## _c(MpegEncContext *c, \
930  uint8_t *s1, uint8_t *s2, \
931  ptrdiff_t stride, int h) \
932 { \
933  int score = 0, x, y; \
934  \
935  for (y = 1; y < h; y++) { \
936  for (x = 0; x < size; x++) \
937  score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
938  s1 += stride; \
939  s2 += stride; \
940  } \
941  \
942  return score; \
943 }
944 VSAD(8)
945 VSAD(16)
946 
947 #define SQ(a) ((a) * (a))
948 #define VSSE_INTRA(size) \
949 static int vsse_intra ## size ## _c(MpegEncContext *c, \
950  uint8_t *s, uint8_t *dummy, \
951  ptrdiff_t stride, int h) \
952 { \
953  int score = 0, x, y; \
954  \
955  for (y = 1; y < h; y++) { \
956  for (x = 0; x < size; x += 4) { \
957  score += SQ(s[x] - s[x + stride]) + \
958  SQ(s[x + 1] - s[x + stride + 1]) + \
959  SQ(s[x + 2] - s[x + stride + 2]) + \
960  SQ(s[x + 3] - s[x + stride + 3]); \
961  } \
962  s += stride; \
963  } \
964  \
965  return score; \
966 }
967 VSSE_INTRA(8)
968 VSSE_INTRA(16)
969 
970 #define VSSE(size) \
971 static int vsse ## size ## _c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, \
972  ptrdiff_t stride, int h) \
973 { \
974  int score = 0, x, y; \
975  \
976  for (y = 1; y < h; y++) { \
977  for (x = 0; x < size; x++) \
978  score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
979  s1 += stride; \
980  s2 += stride; \
981  } \
982  \
983  return score; \
984 }
985 VSSE(8)
986 VSSE(16)
987 
988 #define WRAPPER8_16_SQ(name8, name16) \
989 static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
990  ptrdiff_t stride, int h) \
991 { \
992  int score = 0; \
993  \
994  score += name8(s, dst, src, stride, 8); \
995  score += name8(s, dst + 8, src + 8, stride, 8); \
996  if (h == 16) { \
997  dst += 8 * stride; \
998  src += 8 * stride; \
999  score += name8(s, dst, src, stride, 8); \
1000  score += name8(s, dst + 8, src + 8, stride, 8); \
1001  } \
1002  return score; \
1003 }
1004 
1005 WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
1006 WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
1007 WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
1008 #if CONFIG_GPL
1009 WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
1010 #endif
1011 WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
1012 WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
1013 WRAPPER8_16_SQ(rd8x8_c, rd16_c)
1014 WRAPPER8_16_SQ(bit8x8_c, bit16_c)
1015 
1017 {
1018  c->sum_abs_dctelem = sum_abs_dctelem_c;
1019 
1020  /* TODO [0] 16 [1] 8 */
1021  c->pix_abs[0][0] = pix_abs16_c;
1022  c->pix_abs[0][1] = pix_abs16_x2_c;
1023  c->pix_abs[0][2] = pix_abs16_y2_c;
1024  c->pix_abs[0][3] = pix_abs16_xy2_c;
1025  c->pix_abs[1][0] = pix_abs8_c;
1026  c->pix_abs[1][1] = pix_abs8_x2_c;
1027  c->pix_abs[1][2] = pix_abs8_y2_c;
1028  c->pix_abs[1][3] = pix_abs8_xy2_c;
1029 
1030 #define SET_CMP_FUNC(name) \
1031  c->name[0] = name ## 16_c; \
1032  c->name[1] = name ## 8x8_c;
1033 
1034  SET_CMP_FUNC(hadamard8_diff)
1035  c->hadamard8_diff[4] = hadamard8_intra16_c;
1036  c->hadamard8_diff[5] = hadamard8_intra8x8_c;
1037  SET_CMP_FUNC(dct_sad)
1038  SET_CMP_FUNC(dct_max)
1039 #if CONFIG_GPL
1040  SET_CMP_FUNC(dct264_sad)
1041 #endif
1042  c->sad[0] = pix_abs16_c;
1043  c->sad[1] = pix_abs8_c;
1044  c->sse[0] = sse16_c;
1045  c->sse[1] = sse8_c;
1046  c->sse[2] = sse4_c;
1047  SET_CMP_FUNC(quant_psnr)
1048  SET_CMP_FUNC(rd)
1049  SET_CMP_FUNC(bit)
1050  c->vsad[0] = vsad16_c;
1051  c->vsad[1] = vsad8_c;
1052  c->vsad[4] = vsad_intra16_c;
1053  c->vsad[5] = vsad_intra8_c;
1054  c->vsse[0] = vsse16_c;
1055  c->vsse[1] = vsse8_c;
1056  c->vsse[4] = vsse_intra16_c;
1057  c->vsse[5] = vsse_intra8_c;
1058  c->nsse[0] = nsse16_c;
1059  c->nsse[1] = nsse8_c;
1060 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
1062 #endif
1063 
1064 #if ARCH_AARCH64
1065  ff_me_cmp_init_aarch64(c, avctx);
1066 #elif ARCH_ALPHA
1067  ff_me_cmp_init_alpha(c, avctx);
1068 #elif ARCH_ARM
1069  ff_me_cmp_init_arm(c, avctx);
1070 #elif ARCH_PPC
1071  ff_me_cmp_init_ppc(c, avctx);
1072 #elif ARCH_X86
1073  ff_me_cmp_init_x86(c, avctx);
1074 #elif ARCH_MIPS
1075  ff_me_cmp_init_mips(c, avctx);
1076 #endif
1077 
1078  c->median_sad[0] = pix_median_abs16_c;
1079  c->median_sad[1] = pix_median_abs8_c;
1080 }
level
uint8_t level
Definition: svq3.c:206
mem_internal.h
src1
const pixel * src1
Definition: h264pred_template.c:421
mpegvideoenc.h
VSSE_INTRA
#define VSSE_INTRA(size)
Definition: me_cmp.c:948
b
#define b
Definition: input.c:34
avg2
#define avg2(a, b)
Definition: me_cmp.c:147
pix_median_abs8_c
static int pix_median_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:325
copy_block8
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
ff_me_cmp_init_x86
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
BUTTERFLYA
#define BUTTERFLYA(x, y)
Definition: me_cmp.c:554
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
bit8x8_c
static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:845
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:794
dummy
int dummy
Definition: motion.c:65
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1016
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
FF_CMP_SSE
#define FF_CMP_SSE
Definition: avcodec.h:786
sse4_c
static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:70
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
FF_CMP_BIT
#define FF_CMP_BIT
Definition: avcodec.h:790
s
#define s(width, name)
Definition: cbs_vp9.c:256
s1
#define s1
Definition: regdef.h:38
bits
uint8_t bits
Definition: vp3data.h:141
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
nsse16_c
static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, ptrdiff_t stride, int h)
Definition: me_cmp.c:420
simple_idct.h
VSAD_INTRA
#define VSAD_INTRA(size)
Definition: me_cmp.c:906
cmp
static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
Definition: motion_est.c:259
rd8x8_c
static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:768
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
quant_psnr8x8_c
static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:743
MECmpContext
Definition: me_cmp.h:53
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:205
VSSE
#define VSSE(size)
Definition: me_cmp.c:970
pix_abs16_x2_c
static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:217
pix_median_abs16_c
static int pix_median_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:178
abs
#define abs(x)
Definition: cuda_runtime.h:35
FF_CMP_MEDIAN_SAD
#define FF_CMP_MEDIAN_SAD
Definition: avcodec.h:800
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_me_cmp_init_mips
void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init_mips.c:25
dct_sad8x8_c
static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:660
s2
#define s2
Definition: regdef.h:39
FF_CMP_PSNR
#define FF_CMP_PSNR
Definition: avcodec.h:789
FF_CMP_W53
#define FF_CMP_W53
Definition: avcodec.h:796
pix_abs8_x2_c
static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:356
ff_me_cmp_init_alpha
av_cold void ff_me_cmp_init_alpha(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_alpha.c:305
FF_CMP_SATD
#define FF_CMP_SATD
Definition: avcodec.h:787
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FF_CMP_ZERO
#define FF_CMP_ZERO
Definition: avcodec.h:792
attributes.h
FF_CMP_SAD
#define FF_CMP_SAD
Definition: avcodec.h:785
BUTTERFLY1
#define BUTTERFLY1(x, y)
Definition: me_cmp.c:545
SRC
#define SRC(x, y)
Definition: h264pred_template.c:825
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
hadamard8_diff8x8_c
static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h)
Definition: me_cmp.c:556
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
ff_me_cmp_init_aarch64
av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init_aarch64.c:31
ff_simple_idct_int16_8bit
void ff_simple_idct_int16_8bit(int16_t *block)
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
copy_block.h
FF_CMP_RD
#define FF_CMP_RD
Definition: avcodec.h:791
pix_abs16_y2_c
static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:245
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
src2
const pixel * src2
Definition: h264pred_template.c:422
sse16_c
static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:108
dct_max8x8_c
static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:726
sum_abs_dctelem_c
static int sum_abs_dctelem_c(int16_t *block)
Definition: me_cmp.c:138
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:795
dct
static void dct(AudioRNNContext *s, float *out, const float *in)
Definition: af_arnndn.c:1012
hadamard8_intra8x8_c
static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src, uint8_t *dummy, ptrdiff_t stride, int h)
Definition: me_cmp.c:608
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
mid_pred
#define mid_pred
Definition: mathops.h:97
pix_abs8_xy2_c
static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:398
me_cmp.h
AVCodecContext
main external API structure.
Definition: avcodec.h:389
WRAPPER8_16_SQ
#define WRAPPER8_16_SQ(name8, name16)
Definition: me_cmp.c:988
zero_cmp
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b, ptrdiff_t stride, int h)
Definition: me_cmp.c:470
FF_CMP_DCT
#define FF_CMP_DCT
Definition: avcodec.h:788
avg4
#define avg4(a, b, c, d)
Definition: me_cmp.c:148
temp
else temp
Definition: vf_mcdeint.c:248
pix_abs8_c
static int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:305
DCT8_1D
#define DCT8_1D(src, srcstride, dst, dststride)
Definition: h264dsp.c:95
ff_me_cmp_init_arm
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init_arm.c:41
DST
#define DST(x, y)
Definition: vp9dsp_template.c:781
ff_me_cmp_init_ppc
av_cold void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:726
ff_dsputil_init_dwt
void ff_dsputil_init_dwt(MECmpContext *c)
Definition: snow_dwt.c:842
FF_CMP_DCTMAX
#define FF_CMP_DCTMAX
Definition: avcodec.h:798
FF_CMP_DCT264
#define FF_CMP_DCT264
Definition: avcodec.h:799
FF_CMP_VSAD
#define FF_CMP_VSAD
Definition: avcodec.h:793
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
me_cmp_func
int(* me_cmp_func)(struct MpegEncContext *c, uint8_t *blk1, uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:48
SET_CMP_FUNC
#define SET_CMP_FUNC(name)
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
V
#define V(x)
h
h
Definition: vp9dsp_template.c:2038
VSAD
#define VSAD(size)
Definition: me_cmp.c:928
sse8_c
static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:87
BUTTERFLY2
#define BUTTERFLY2(o1, o2, i1, i2)
Definition: me_cmp.c:541
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
pix_abs16_xy2_c
static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:275
pix_abs8_y2_c
static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:376
pix_abs16_c
static int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:150
FF_CMP_W97
#define FF_CMP_W97
Definition: avcodec.h:797
nsse8_c
static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, ptrdiff_t stride, int h)
Definition: me_cmp.c:445