FFmpeg
cpu.c
Go to the documentation of this file.
1 /*
2  * CPU detection code, extracted from mmx.h
3  * (c)1997-99 by H. Dietz and R. Fisher
4  * Converted to C and improved by Fabrice Bellard.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/cpu_internal.h"
30 
31 #if HAVE_X86ASM
32 
33 #define cpuid(index, eax, ebx, ecx, edx) \
34  ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
35 
36 #define xgetbv(index, eax, edx) \
37  ff_cpu_xgetbv(index, &eax, &edx)
38 
39 #elif HAVE_INLINE_ASM
40 
41 /* ebx saving is necessary for PIC. gcc seems unable to see it alone */
42 #define cpuid(index, eax, ebx, ecx, edx) \
43  __asm__ volatile ( \
44  "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \
45  "cpuid \n\t" \
46  "xchg %%"FF_REG_b", %%"FF_REG_S \
47  : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
48  : "0" (index), "2"(0))
49 
50 #define xgetbv(index, eax, edx) \
51  __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
52 
53 #define get_eflags(x) \
54  __asm__ volatile ("pushfl \n" \
55  "pop %0 \n" \
56  : "=r"(x))
57 
58 #define set_eflags(x) \
59  __asm__ volatile ("push %0 \n" \
60  "popfl \n" \
61  :: "r"(x))
62 
63 #endif /* HAVE_INLINE_ASM */
64 
65 #if ARCH_X86_64
66 
67 #define cpuid_test() 1
68 
69 #elif HAVE_X86ASM
70 
71 #define cpuid_test ff_cpu_cpuid_test
72 
73 #elif HAVE_INLINE_ASM
74 
75 static int cpuid_test(void)
76 {
77  x86_reg a, c;
78 
79  /* Check if CPUID is supported by attempting to toggle the ID bit in
80  * the EFLAGS register. */
81  get_eflags(a);
82  set_eflags(a ^ 0x200000);
83  get_eflags(c);
84 
85  return a != c;
86 }
87 #endif
88 
89 /* Function to test if multimedia instructions are supported... */
91 {
92  int rval = 0;
93 
94 #ifdef cpuid
95 
96  int eax, ebx, ecx, edx;
97  int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
98  int family = 0, model = 0;
99  union { int i[3]; char c[12]; } vendor;
100  int xcr0_lo = 0, xcr0_hi = 0;
101 
102  if (!cpuid_test())
103  return 0; /* CPUID not supported */
104 
105  cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
106 
107  if (max_std_level >= 1) {
108  cpuid(1, eax, ebx, ecx, std_caps);
109  family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
110  model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
111  if (std_caps & (1 << 15))
112  rval |= AV_CPU_FLAG_CMOV;
113  if (std_caps & (1 << 23))
114  rval |= AV_CPU_FLAG_MMX;
115  if (std_caps & (1 << 25))
116  rval |= AV_CPU_FLAG_MMXEXT;
117 #if HAVE_SSE
118  if (std_caps & (1 << 25))
119  rval |= AV_CPU_FLAG_SSE;
120  if (std_caps & (1 << 26))
121  rval |= AV_CPU_FLAG_SSE2;
122  if (ecx & 1)
123  rval |= AV_CPU_FLAG_SSE3;
124  if (ecx & 0x2)
125  rval |= AV_CPU_FLAG_CLMUL;
126  if (ecx & 0x00000200 )
127  rval |= AV_CPU_FLAG_SSSE3;
128  if (ecx & 0x00080000 )
129  rval |= AV_CPU_FLAG_SSE4;
130  if (ecx & 0x00100000 )
131  rval |= AV_CPU_FLAG_SSE42;
132  if (ecx & 0x02000000 )
133  rval |= AV_CPU_FLAG_AESNI;
134 #if HAVE_AVX
135  /* Check OXSAVE and AVX bits */
136  if ((ecx & 0x18000000) == 0x18000000) {
137  /* Check for OS support */
138  xgetbv(0, xcr0_lo, xcr0_hi);
139  if ((xcr0_lo & 0x6) == 0x6) {
140  rval |= AV_CPU_FLAG_AVX;
141  if (ecx & 0x00001000)
142  rval |= AV_CPU_FLAG_FMA3;
143  }
144  }
145 #endif /* HAVE_AVX */
146 #endif /* HAVE_SSE */
147  }
148  if (max_std_level >= 7) {
149  cpuid(7, eax, ebx, ecx, edx);
150 #if HAVE_AVX2
151  if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
152  rval |= AV_CPU_FLAG_AVX2;
153 #if HAVE_AVX512 /* F, CD, BW, DQ, VL */
154  if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
155  if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) {
156  rval |= AV_CPU_FLAG_AVX512;
157 #if HAVE_AVX512ICL
158  if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42)
159  rval |= AV_CPU_FLAG_AVX512ICL;
160 #endif /* HAVE_AVX512ICL */
161  }
162  }
163 #endif /* HAVE_AVX512 */
164 #endif /* HAVE_AVX2 */
165  /* BMI1/2 don't need OS support */
166  if (ebx & 0x00000008) {
167  rval |= AV_CPU_FLAG_BMI1;
168  if (ebx & 0x00000100)
169  rval |= AV_CPU_FLAG_BMI2;
170  }
171  }
172 
173  cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
174 
175  if (max_ext_level >= 0x80000001) {
176  cpuid(0x80000001, eax, ebx, ecx, ext_caps);
177  if (ext_caps & (1U << 31))
178  rval |= AV_CPU_FLAG_3DNOW;
179  if (ext_caps & (1 << 30))
180  rval |= AV_CPU_FLAG_3DNOWEXT;
181  if (ext_caps & (1 << 23))
182  rval |= AV_CPU_FLAG_MMX;
183  if (ext_caps & (1 << 22))
184  rval |= AV_CPU_FLAG_MMXEXT;
185 
186  if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
187  /* Allow for selectively disabling SSE2 functions on AMD processors
188  with SSE2 support but not SSE4a. This includes Athlon64, some
189  Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
190  than SSE2 often enough to utilize this special-case flag.
191  AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
192  so that SSE2 is used unless explicitly disabled by checking
193  AV_CPU_FLAG_SSE2SLOW. */
194  if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
195  rval |= AV_CPU_FLAG_SSE2SLOW;
196 
197  /* Similar to the above but for AVX functions on AMD processors.
198  This is necessary only for functions using YMM registers on Bulldozer
199  and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
200  functions using XMM registers are always faster on them.
201  AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
202  used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
203  if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
204  rval |= AV_CPU_FLAG_AVXSLOW;
205 
206  /* Zen 3 and earlier have slow gather */
207  if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2))
208  rval |= AV_CPU_FLAG_SLOW_GATHER;
209  }
210 
211  /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
212  * used unless the OS has AVX support. */
213  if (rval & AV_CPU_FLAG_AVX) {
214  if (ecx & 0x00000800)
215  rval |= AV_CPU_FLAG_XOP;
216  if (ecx & 0x00010000)
217  rval |= AV_CPU_FLAG_FMA4;
218  }
219  }
220 
221  if (!strncmp(vendor.c, "GenuineIntel", 12)) {
222  if (family == 6 && (model == 9 || model == 13 || model == 14)) {
223  /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
224  * 6/14 (core1 "yonah") theoretically support sse2, but it's
225  * usually slower than mmx, so let's just pretend they don't.
226  * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
227  * enabled so that SSE2 is not used unless explicitly enabled
228  * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
229  * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
230  if (rval & AV_CPU_FLAG_SSE2)
232  if (rval & AV_CPU_FLAG_SSE3)
234  }
235  /* The Atom processor has SSSE3 support, which is useful in many cases,
236  * but sometimes the SSSE3 version is slower than the SSE2 equivalent
237  * on the Atom, but is generally faster on other processors supporting
238  * SSSE3. This flag allows for selectively disabling certain SSSE3
239  * functions on the Atom. */
240  if (family == 6 && model == 28)
241  rval |= AV_CPU_FLAG_ATOM;
242 
243  /* Conroe has a slow shuffle unit. Check the model number to ensure not
244  * to include crippled low-end Penryns and Nehalems that lack SSE4. */
245  if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
246  family == 6 && model < 23)
247  rval |= AV_CPU_FLAG_SSSE3SLOW;
248 
249  /* Ice Lake and below have slow gather due to Gather Data Sampling
250  * mitigation. */
251  if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 143)
252  rval |= AV_CPU_FLAG_SLOW_GATHER;
253  }
254 
255 #endif /* cpuid */
256 
257  return rval;
258 }
259 
261 {
262  int flags = av_get_cpu_flags();
263 
265  return 64;
266  if (flags & (AV_CPU_FLAG_AVX2 |
272  return 32;
273  if (flags & (AV_CPU_FLAG_AESNI |
284  return 16;
285 
286  return 8;
287 }
flags
const SwsFlags flags[]
Definition: swscale.c:61
AV_CPU_FLAG_SSSE3SLOW
#define AV_CPU_FLAG_SSSE3SLOW
SSSE3 supported, but usually not faster.
Definition: cpu.h:45
AV_CPU_FLAG_SSE3
#define AV_CPU_FLAG_SSE3
Prescott SSE3 functions.
Definition: cpu.h:41
cpu.h
AV_CPU_FLAG_SSE3SLOW
#define AV_CPU_FLAG_SSE3SLOW
SSE3 supported, but usually not faster.
Definition: cpu.h:42
x86_reg
int x86_reg
Definition: asm.h:71
AV_CPU_FLAG_3DNOW
#define AV_CPU_FLAG_3DNOW
AMD 3DNOW.
Definition: cpu.h:35
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:109
AV_CPU_FLAG_BMI1
#define AV_CPU_FLAG_BMI1
Bit Manipulation Instruction Set 1.
Definition: cpu.h:58
AV_CPU_FLAG_SSSE3
#define AV_CPU_FLAG_SSSE3
Conroe SSSE3 functions.
Definition: cpu.h:44
AV_CPU_FLAG_XOP
#define AV_CPU_FLAG_XOP
Bulldozer XOP functions.
Definition: cpu.h:53
AV_CPU_FLAG_3DNOWEXT
#define AV_CPU_FLAG_3DNOWEXT
AMD 3DNowExt.
Definition: cpu.h:40
AV_CPU_FLAG_SLOW_GATHER
#define AV_CPU_FLAG_SLOW_GATHER
CPU has slow gathers.
Definition: cpu.h:62
AV_CPU_FLAG_AVX512
#define AV_CPU_FLAG_AVX512
AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used.
Definition: cpu.h:60
ff_get_cpu_max_align_x86
size_t ff_get_cpu_max_align_x86(void)
Definition: cpu.c:260
cpu_internal.h
AV_CPU_FLAG_SSE4
#define AV_CPU_FLAG_SSE4
Penryn SSE4.1 functions.
Definition: cpu.h:47
asm.h
AV_CPU_FLAG_AVX512ICL
#define AV_CPU_FLAG_AVX512ICL
F/CD/BW/DQ/VL/VNNI/IFMA/VBMI/VBMI2/VPOPCNTDQ/BITALG/GFNI/VAES/VPCLMULQDQ.
Definition: cpu.h:61
AV_CPU_FLAG_CMOV
#define AV_CPU_FLAG_CMOV
supports cmov instruction
Definition: cpu.h:55
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CPU_FLAG_SSE2
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:37
AV_CPU_FLAG_CLMUL
#define AV_CPU_FLAG_CLMUL
Carry-less Multiplication instruction.
Definition: cpu.h:50
AV_CPU_FLAG_AVXSLOW
#define AV_CPU_FLAG_AVXSLOW
AVX supported, but slow when using YMM registers (e.g. Bulldozer)
Definition: cpu.h:52
AV_CPU_FLAG_AVX
#define AV_CPU_FLAG_AVX
AVX functions: requires OS support even if YMM registers aren't used.
Definition: cpu.h:51
AV_CPU_FLAG_FMA4
#define AV_CPU_FLAG_FMA4
Bulldozer FMA4 functions.
Definition: cpu.h:54
cpu.h
AV_CPU_FLAG_AVX2
#define AV_CPU_FLAG_AVX2
AVX2 functions: requires OS support even if YMM registers aren't used.
Definition: cpu.h:56
AV_CPU_FLAG_SSE2SLOW
#define AV_CPU_FLAG_SSE2SLOW
SSE2 supported, but usually not faster.
Definition: cpu.h:38
AV_CPU_FLAG_FMA3
#define AV_CPU_FLAG_FMA3
Haswell FMA3 functions.
Definition: cpu.h:57
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:622
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_CPU_FLAG_SSE42
#define AV_CPU_FLAG_SSE42
Nehalem SSE4.2 functions.
Definition: cpu.h:48
ff_get_cpu_flags_x86
int ff_get_cpu_flags_x86(void)
Definition: cpu.c:90
AV_CPU_FLAG_ATOM
#define AV_CPU_FLAG_ATOM
Atom processor, some SSSE3 instructions are slower.
Definition: cpu.h:46
U
#define U(x)
Definition: vpx_arith.h:37
AV_CPU_FLAG_MMX
#define AV_CPU_FLAG_MMX
standard MMX
Definition: cpu.h:32
AV_CPU_FLAG_AESNI
#define AV_CPU_FLAG_AESNI
Advanced Encryption Standard functions.
Definition: cpu.h:49
AV_CPU_FLAG_SSE
#define AV_CPU_FLAG_SSE
SSE functions.
Definition: cpu.h:36
AV_CPU_FLAG_MMXEXT
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:33
AV_CPU_FLAG_BMI2
#define AV_CPU_FLAG_BMI2
Bit Manipulation Instruction Set 2.
Definition: cpu.h:59