39 put_pixels2_8_c(dst, src, stride, height);
42 put_pixels4_8_c(dst, src, stride, height);
45 put_pixels8_8_c(dst, src, stride, height);
48 put_pixels16_8_c(dst, src, stride, height);
58 for (i = 0; i <
height; i++) {
59 for (j = 0; j <
width; j++)
60 dst[j] = ((2 * src[j] + src[j + 1] + 1) *
72 for (i = 0; i <
height; i++) {
73 for (j = 0; j <
width; j++)
74 dst[j] = ((src[j] + 2 * src[j + 1] + 1) *
86 for (i = 0; i <
height; i++) {
87 for (j = 0; j <
width; j++)
88 dst[j] = ((2 * src[j] + src[j + stride] + 1) *
100 for (i = 0; i <
height; i++) {
101 for (j = 0; j <
width; j++)
102 dst[j] = ((4 * src[j] + 3 * src[j + 1] +
103 3 * src[j + stride] + 2 * src[j + stride + 1] + 6) *
115 for (i = 0; i <
height; i++) {
116 for (j = 0; j <
width; j++)
117 dst[j] = ((3 * src[j] + 2 * src[j + 1] +
118 4 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
130 for (i = 0; i <
height; i++) {
131 for (j = 0; j <
width; j++)
132 dst[j] = ((src[j] + 2 * src[j + stride] + 1) *
144 for (i = 0; i <
height; i++) {
145 for (j = 0; j <
width; j++)
146 dst[j] = ((3 * src[j] + 4 * src[j + 1] +
147 2 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
159 for (i = 0; i <
height; i++) {
160 for (j = 0; j <
width; j++)
161 dst[j] = ((2 * src[j] + 3 * src[j + 1] +
162 3 * src[j + stride] + 4 * src[j + stride + 1] + 6) *
174 avg_pixels2_8_c(dst, src, stride, height);
177 avg_pixels4_8_c(dst, src, stride, height);
180 avg_pixels8_8_c(dst, src, stride, height);
183 avg_pixels16_8_c(dst, src, stride, height);
193 for (i = 0; i <
height; i++) {
194 for (j = 0; j <
width; j++)
196 (((2 * src[j] + src[j + 1] + 1) *
197 683) >> 11) + 1) >> 1;
208 for (i = 0; i <
height; i++) {
209 for (j = 0; j <
width; j++)
211 (((src[j] + 2 * src[j + 1] + 1) *
212 683) >> 11) + 1) >> 1;
223 for (i = 0; i <
height; i++) {
224 for (j = 0; j <
width; j++)
226 (((2 * src[j] + src[j + stride] + 1) *
227 683) >> 11) + 1) >> 1;
238 for (i = 0; i <
height; i++) {
239 for (j = 0; j <
width; j++)
241 (((4 * src[j] + 3 * src[j + 1] +
242 3 * src[j + stride] + 2 * src[j + stride + 1] + 6) *
243 2731) >> 15) + 1) >> 1;
254 for (i = 0; i <
height; i++) {
255 for (j = 0; j <
width; j++)
257 (((3 * src[j] + 2 * src[j + 1] +
258 4 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
259 2731) >> 15) + 1) >> 1;
270 for (i = 0; i <
height; i++) {
271 for (j = 0; j <
width; j++)
273 (((src[j] + 2 * src[j + stride] + 1) *
274 683) >> 11) + 1) >> 1;
285 for (i = 0; i <
height; i++) {
286 for (j = 0; j <
width; j++)
288 (((3 * src[j] + 4 * src[j + 1] +
289 2 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
290 2731) >> 15) + 1) >> 1;
301 for (i = 0; i <
height; i++) {
302 for (j = 0; j <
width; j++)
304 (((2 * src[j] + 3 * src[j + 1] +
305 3 * src[j + stride] + 4 * src[j + stride + 1] + 6) *
306 2731) >> 15) + 1) >> 1;
static void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
Macro definitions for various function/variable attributes.
static void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
static void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
tpel_mc_func avg_tpel_pixels_tab[11]
static void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
static void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
static void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height)
GLint GLenum GLboolean GLsizei stride