Go to the documentation of this file.
36 #define SCALE_OFFSET -1
40 static int try_8x8basis_ssse3(
const int16_t rem[64],
const int16_t
weight[64],
const int16_t
basis[64],
int scale)
48 "pxor %%xmm2, %%xmm2 \n\t"
49 "movd %4, %%xmm3 \n\t"
50 "punpcklwd %%xmm3, %%xmm3 \n\t"
51 "pshufd $0, %%xmm3, %%xmm3 \n\t"
54 "movdqa (%1, %0), %%xmm0 \n\t"
55 "movdqa 16(%1, %0), %%xmm1 \n\t"
56 "pmulhrsw %%xmm3, %%xmm0 \n\t"
57 "pmulhrsw %%xmm3, %%xmm1 \n\t"
58 "paddw (%2, %0), %%xmm0 \n\t"
59 "paddw 16(%2, %0), %%xmm1 \n\t"
60 "psraw $6, %%xmm0 \n\t"
61 "psraw $6, %%xmm1 \n\t"
62 "pmullw (%3, %0), %%xmm0 \n\t"
63 "pmullw 16(%3, %0), %%xmm1 \n\t"
64 "pmaddwd %%xmm0, %%xmm0 \n\t"
65 "pmaddwd %%xmm1, %%xmm1 \n\t"
66 "paddd %%xmm1, %%xmm0 \n\t"
67 "psrld $4, %%xmm0 \n\t"
68 "paddd %%xmm0, %%xmm2 \n\t"
72 "pshufd $0x0E, %%xmm2, %%xmm0 \n\t"
73 "paddd %%xmm0, %%xmm2 \n\t"
74 "pshufd $0x01, %%xmm2, %%xmm0 \n\t"
75 "paddd %%xmm0, %%xmm2 \n\t"
76 "psrld $2, %%xmm2 \n\t"
77 "movd %%xmm2, %0 \n\t"
85 static void add_8x8basis_ssse3(int16_t rem[64],
const int16_t
basis[64],
int scale)
92 "movd %3, %%xmm2 \n\t"
93 "punpcklwd %%xmm2, %%xmm2 \n\t"
94 "pshufd $0, %%xmm2, %%xmm2 \n\t"
97 "movdqa (%1, %0), %%xmm0 \n\t"
98 "movdqa 16(%1, %0), %%xmm1 \n\t"
99 "pmulhrsw %%xmm2, %%xmm0 \n\t"
100 "pmulhrsw %%xmm2, %%xmm1 \n\t"
101 "paddw (%2, %0), %%xmm0 \n\t"
102 "paddw 16(%2, %0), %%xmm1 \n\t"
103 "movdqa %%xmm0, (%2, %0) \n\t"
104 "movdqa %%xmm1, 16(%2, %0) \n\t"
113 for (
i=0;
i<8*8;
i++) {
122 static void draw_edges_mmx(uint8_t *buf, ptrdiff_t
wrap,
int width,
int height,
123 int w,
int h,
int sides)
125 uint8_t *ptr, *last_line;
133 "movd (%0), %%mm0 \n\t"
134 "punpcklbw %%mm0, %%mm0 \n\t"
135 "punpcklwd %%mm0, %%mm0 \n\t"
136 "punpckldq %%mm0, %%mm0 \n\t"
137 "movq %%mm0, -8(%0) \n\t"
138 "movq -8(%0, %2), %%mm1 \n\t"
139 "punpckhbw %%mm1, %%mm1 \n\t"
140 "punpckhwd %%mm1, %%mm1 \n\t"
141 "punpckhdq %%mm1, %%mm1 \n\t"
142 "movq %%mm1, (%0, %2) \n\t"
149 }
else if (
w == 16) {
152 "movd (%0), %%mm0 \n\t"
153 "punpcklbw %%mm0, %%mm0 \n\t"
154 "punpcklwd %%mm0, %%mm0 \n\t"
155 "punpckldq %%mm0, %%mm0 \n\t"
156 "movq %%mm0, -8(%0) \n\t"
157 "movq %%mm0, -16(%0) \n\t"
158 "movq -8(%0, %2), %%mm1 \n\t"
159 "punpckhbw %%mm1, %%mm1 \n\t"
160 "punpckhwd %%mm1, %%mm1 \n\t"
161 "punpckhdq %%mm1, %%mm1 \n\t"
162 "movq %%mm1, (%0, %2) \n\t"
163 "movq %%mm1, 8(%0, %2) \n\t"
174 "movd (%0), %%mm0 \n\t"
175 "punpcklbw %%mm0, %%mm0 \n\t"
176 "punpcklwd %%mm0, %%mm0 \n\t"
177 "movd %%mm0, -4(%0) \n\t"
178 "movd -4(%0, %2), %%mm1 \n\t"
179 "punpcklbw %%mm1, %%mm1 \n\t"
180 "punpckhwd %%mm1, %%mm1 \n\t"
181 "punpckhdq %%mm1, %%mm1 \n\t"
182 "movd %%mm1, (%0, %2) \n\t"
195 for (
i = 0;
i <
h;
i++)
199 for (
i = 0;
i <
h;
i++)
201 memcpy(last_line + (
i + 1) *
wrap, last_line,
width +
w +
w);
224 c->draw_edges = draw_edges_mmx;
228 #if HAVE_SSSE3_INLINE
231 c->try_8x8basis = try_8x8basis_ssse3;
233 c->add_8x8basis = add_8x8basis_ssse3;
#define INLINE_MMX(flags)
static int16_t basis[64][64]
av_cold void ff_mpegvideoencdsp_init_x86(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static atomic_int cpu_flags
int flags
AV_CODEC_FLAG_*.
const h264_weight_func weight
int ff_pix_norm1_sse2(const uint8_t *pix, ptrdiff_t line_size)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define EXTERNAL_XOP(flags)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int ff_pix_sum16_xop(const uint8_t *pix, ptrdiff_t line_size)
#define EXTERNAL_SSE2(flags)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int ff_pix_sum16_sse2(const uint8_t *pix, ptrdiff_t line_size)
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
#define INLINE_SSSE3(flags)
main external API structure.
#define XMM_CLOBBERS_ONLY(...)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void scale(int *out, const int *in, const int w, const int h, const int shift)