Go to the documentation of this file.
38 #define ff_put_pixels4x4_l2_mmxext(dst, src1, src2, dststride, src1stride) \
39 ff_put_pixels4x4_l2_mmxext((dst), (src1), (src2), (dststride))
40 #define ff_avg_pixels4x4_l2_mmxext(dst, src1, src2, dststride, src1stride) \
41 ff_avg_pixels4x4_l2_mmxext((dst), (src1), (src2), (dststride))
42 #define ff_put_pixels8x8_l2_sse2 ff_put_pixels8x8_l2_mmxext
43 #define ff_avg_pixels8x8_l2_sse2 ff_avg_pixels8x8_l2_mmxext
45 #define DEF_QPEL(OPNAME)\
46 void ff_ ## OPNAME ## _h264_qpel4_h_lowpass_mmxext(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride);\
47 void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_ssse3(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride);\
48 void ff_ ## OPNAME ## _h264_qpel4_h_lowpass_l2_mmxext(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
49 void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_l2_sse2(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
50 void ff_ ## OPNAME ## _h264_qpel16_h_lowpass_l2_sse2(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
51 void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_l2_ssse3(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);\
52 void ff_ ## OPNAME ## _h264_qpel4_v_lowpass_mmxext(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride);\
53 void ff_ ## OPNAME ## _h264_qpel8or16_v_lowpass_sse2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h);\
54 void ff_ ## OPNAME ## _h264_qpel4_hv_lowpass_h_mmxext(int16_t *tmp, uint8_t *dst, ptrdiff_t dstStride);\
55 void ff_ ## OPNAME ## _h264_qpel8or16_hv1_lowpass_op_sse2(const uint8_t *src, int16_t *tmp, ptrdiff_t srcStride, int size);\
56 void ff_ ## OPNAME ## _h264_qpel8_hv2_lowpass_sse2(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
57 void ff_ ## OPNAME ## _h264_qpel16_hv2_lowpass_sse2(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
58 void ff_ ## OPNAME ## _h264_qpel8_hv2_lowpass_ssse3(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
59 void ff_ ## OPNAME ## _h264_qpel16_hv2_lowpass_ssse3(uint8_t *dst, int16_t *tmp, ptrdiff_t dstStride);\
60 void ff_ ## OPNAME ## _pixels4_l2_shift5_mmxext(uint8_t *dst, const int16_t *src16, const uint8_t *src8, ptrdiff_t dstStride);\
61 void ff_ ## OPNAME ## _pixels8_l2_shift5_sse2(uint8_t *dst, const int16_t *src16, const uint8_t *src8, ptrdiff_t dstStride);\
62 void ff_ ## OPNAME ## _pixels16_l2_shift5_sse2(uint8_t *dst, const int16_t *src16, const uint8_t *src8, ptrdiff_t dstStride);\
69 #define QPEL_H264(OPNAME, MMX)\
70 static av_always_inline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
72 src -= 2*srcStride+2;\
73 ff_put_h264_qpel4_hv_lowpass_v_mmxext(src, tmp, srcStride);\
74 ff_ ## OPNAME ## h264_qpel4_hv_lowpass_h_mmxext(tmp, dst, dstStride);\
77 #define QPEL_H264_H16(OPNAME, EXT) \
78 static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_l2_ ## EXT(uint8_t *dst, const uint8_t *src, const uint8_t *src2, ptrdiff_t dstStride, ptrdiff_t src2Stride)\
80 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst , src , src2 , dstStride, src2Stride);\
81 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst+8, src+8, src2+8, dstStride, src2Stride);\
84 src2 += 8*src2Stride;\
85 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst , src , src2 , dstStride, src2Stride);\
86 ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## EXT(dst+8, src+8, src2+8, dstStride, src2Stride);\
91 #define QPEL_H264_H16_XMM(OPNAME, MMX)\
93 void ff_avg_h264_qpel16_h_lowpass_l2_ssse3(uint8_t *
dst,
const uint8_t *
src,
const uint8_t *
src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);
94 void ff_put_h264_qpel16_h_lowpass_l2_ssse3(uint8_t *
dst,
const uint8_t *
src,
const uint8_t *
src2, ptrdiff_t dstStride, ptrdiff_t src2Stride);
97 #define QPEL_H264_H16_XMM(OPNAME, EXT) QPEL_H264_H16(OPNAME, EXT)
100 #define QPEL_H264_H_XMM(OPNAME, MMX)\
101 QPEL_H264_H16_XMM(OPNAME, MMX)\
102 static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
104 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
105 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
108 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
109 ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
112 #define QPEL_H264_V_XMM(OPNAME, XMM, XMM2)\
113 static av_always_inline void ff_ ## OPNAME ## h264_qpel8_v_lowpass_ ## XMM(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
115 ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## XMM2(dst , src , dstStride, srcStride, 8);\
117 static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## XMM(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
119 ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## XMM2(dst , src , dstStride, srcStride, 16);\
120 ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## XMM2(dst+8, src+8, dstStride, srcStride, 16);\
129 src -= 2*srcStride+2;
131 ff_put_h264_qpel8or16_hv1_lowpass_op_sse2(
src,
tmp, srcStride,
size);
137 #define QPEL_H264_HV_XMM(OPNAME, MMX)\
138 static av_always_inline void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
140 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, srcStride, 8);\
141 ff_ ## OPNAME ## h264_qpel8_hv2_lowpass_ ## MMX(dst, tmp, dstStride);\
143 static av_always_inline void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
145 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, srcStride, 16);\
146 ff_ ## OPNAME ## h264_qpel16_hv2_lowpass_ ## MMX(dst, tmp, dstStride);\
149 #define H264_MC_V_H_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT) \
150 H264_MC_V(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)\
151 H264_MC_H(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)\
152 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)\
154 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN, UNUSED) \
155 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
157 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
160 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
162 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
165 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
167 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
170 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN, UNUSED) \
171 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
173 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
174 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
175 ff_ ## OPNAME ## pixels ## SIZE ## x ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride);\
178 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
180 ff_ ## OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
183 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
185 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
186 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
187 ff_ ## OPNAME ## pixels ## SIZE ## x ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride);\
190 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT) \
191 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
193 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
194 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
195 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
198 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
200 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
201 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
202 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
205 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
207 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
208 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
209 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
212 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
214 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
215 ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
216 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
219 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
221 LOCAL_ALIGNED(ALIGN, uint16_t, temp, [SIZE*(SIZE<8?12:24)]);\
222 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, stride);\
225 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
227 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
228 uint8_t * const halfHV= temp;\
229 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
230 av_assert2(((uintptr_t)temp & 7) == 0);\
231 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
232 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
235 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
237 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
238 uint8_t * const halfHV= temp;\
239 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
240 av_assert2(((uintptr_t)temp & 7) == 0);\
241 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
242 ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
245 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
247 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
248 uint8_t * const halfHV= temp;\
249 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
250 av_assert2(((uintptr_t)temp & 7) == 0);\
251 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
252 ff_ ## OPNAME ## pixels ## SIZE ## _l2_shift5_ ## SHIFT5_EXT(dst, halfV+2, halfHV, stride);\
255 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
257 LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
258 uint8_t * const halfHV= temp;\
259 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
260 av_assert2(((uintptr_t)temp & 7) == 0);\
261 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, stride);\
262 ff_ ## OPNAME ## pixels ## SIZE ## _l2_shift5_ ## SHIFT5_EXT(dst, halfV+3, halfHV, stride);\
265 #define H264_MC(QPEL, SIZE, MMX, ALIGN, SHIFT5_EXT)\
266 QPEL(put_, SIZE, MMX, ALIGN, SHIFT5_EXT) \
267 QPEL(avg_, SIZE, MMX, ALIGN, SHIFT5_EXT) \
269 #define H264_MC_816(QPEL, XMM, SHIFT5_EXT)\
270 QPEL(put_, 8, XMM, 16, SHIFT5_EXT)\
271 QPEL(put_, 16,XMM, 16, SHIFT5_EXT)\
272 QPEL(avg_, 8, XMM, 16, SHIFT5_EXT)\
273 QPEL(avg_, 16,XMM, 16, SHIFT5_EXT)\
295 #define LUMA_MC_OP(OP, NUM, DEPTH, TYPE, OPT) \
296 void ff_ ## OP ## _h264_qpel ## NUM ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT \
297 (uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
299 #define LUMA_MC_4(DEPTH, TYPE, OPT) \
300 LUMA_MC_OP(put, 4, DEPTH, TYPE, OPT) \
301 LUMA_MC_OP(avg, 4, DEPTH, TYPE, OPT)
303 #define LUMA_MC_816(DEPTH, TYPE, OPT) \
304 LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \
305 LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \
306 LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \
307 LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT)
346 #define SET_QPEL_FUNCS_1PP(PFX, IDX, SIZE, CPU, PREFIX) \
348 c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
349 c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
350 c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
351 c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
352 c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
353 c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
354 c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
355 c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
356 c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
357 c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
358 c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
359 c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
360 c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
361 c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
362 c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
364 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
366 c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
367 SET_QPEL_FUNCS_1PP(PFX, IDX, SIZE, CPU, PREFIX); \
370 #define H264_QPEL_FUNCS(x, y, CPU) \
372 c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
373 c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
374 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
375 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
378 #define H264_QPEL_FUNCS_10(x, y, CPU) \
380 c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
381 c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
382 c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
383 c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
392 if (!high_bit_depth) {
404 if (!high_bit_depth) {
433 if (!high_bit_depth) {
void ff_put_h264_qpel4_hv_lowpass_v_mmxext(const uint8_t *src, int16_t *tmp, ptrdiff_t srcStride)
void ff_avg_pixels16x16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size)
#define QPEL_H264_HV_XMM(OPNAME, MMX)
#define H264_MC_V_H_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
static atomic_int cpu_flags
av_cold void ff_h264qpel_init_x86(H264QpelContext *c, int bit_depth)
#define H264_MC_V(OPNAME, SIZE, MMX, ALIGN, UNUSED)
#define H264_MC_H(OPNAME, SIZE, MMX, ALIGN, UNUSED)
#define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN, SHIFT5_EXT)
void ff_avg_pixels4_mmxext(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
#define H264_QPEL_FUNCS(x, y, CPU)
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX)
void ff_put_pixels16x16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define QPEL_H264_V_XMM(OPNAME, XMM, XMM2)
#define ff_put_pixels4x4_l2_mmxext(dst, src1, src2, dststride, src1stride)
#define LUMA_MC_4(DEPTH, TYPE, OPT)
#define QPEL_H264(OPNAME, MMX)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
#define SET_QPEL_FUNCS_1PP(PFX, IDX, SIZE, CPU, PREFIX)
#define EXTERNAL_SSE2(flags)
#define ff_avg_pixels4x4_l2_mmxext(dst, src1, src2, dststride, src1stride)
#define QPEL_H264_H_XMM(OPNAME, MMX)
void ff_avg_pixels8x8_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size)
#define LUMA_MC_816(DEPTH, TYPE, OPT)
#define H264_MC(QPEL, SIZE, MMX, ALIGN, SHIFT5_EXT)
#define H264_MC_816(QPEL, XMM, SHIFT5_EXT)
#define EXTERNAL_SSSE3(flags)
#define H264_QPEL_FUNCS_10(x, y, CPU)
#define EXTERNAL_MMXEXT(flags)
static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, const uint8_t *src, ptrdiff_t srcStride, int size)