16 #include <linux/module.h>
36 if (cpu_has_xsave && best->
function == 0x1) {
52 static int is_efer_nx(
void)
54 unsigned long long efer = 0;
66 for (i = 0; i < vcpu->
arch.cpuid_nent; ++
i) {
67 e = &vcpu->
arch.cpuid_entries[
i];
73 if (entry && (entry->
edx & (1 << 20)) && !is_efer_nx()) {
74 entry->
edx &= ~(1 << 20);
98 for (i = 0; i < cpuid->
nent; i++) {
100 vcpu->
arch.cpuid_entries[
i].eax = cpuid_entries[
i].
eax;
101 vcpu->
arch.cpuid_entries[
i].ebx = cpuid_entries[
i].
ebx;
102 vcpu->
arch.cpuid_entries[
i].ecx = cpuid_entries[
i].
ecx;
103 vcpu->
arch.cpuid_entries[
i].edx = cpuid_entries[
i].
edx;
104 vcpu->
arch.cpuid_entries[
i].index = 0;
105 vcpu->
arch.cpuid_entries[
i].flags = 0;
106 vcpu->
arch.cpuid_entries[
i].padding[0] = 0;
107 vcpu->
arch.cpuid_entries[
i].padding[1] = 0;
108 vcpu->
arch.cpuid_entries[
i].padding[2] = 0;
110 vcpu->
arch.cpuid_nent = cpuid->
nent;
111 cpuid_fix_nx_cap(vcpu);
118 vfree(cpuid_entries);
136 vcpu->
arch.cpuid_nent = cpuid->
nent;
153 if (cpuid->
nent < vcpu->
arch.cpuid_nent)
162 cpuid->
nent = vcpu->
arch.cpuid_nent;
166 static void cpuid_mask(
u32 *
word,
int wordnum)
181 static bool supported_xcr0_bit(
unsigned bit)
188 #define F(x) bit(X86_FEATURE_##x)
194 unsigned f_nx = is_efer_nx() ?
F(NX) : 0;
198 unsigned f_lm =
F(LM);
200 unsigned f_gbpages = 0;
207 const u32 kvm_supported_word0_x86_features =
209 F(TSC) |
F(
MSR) |
F(PAE) |
F(MCE) |
210 F(CX8) |
F(APIC) | 0 |
F(SEP) |
211 F(MTRR) |
F(PGE) |
F(MCA) |
F(CMOV) |
212 F(
PAT) |
F(PSE36) | 0 |
F(CLFLSH) |
214 F(FXSR) |
F(XMM) |
F(XMM2) |
F(SELFSNOOP) |
217 const u32 kvm_supported_word1_x86_features =
219 F(TSC) |
F(
MSR) |
F(PAE) |
F(MCE) |
221 F(MTRR) |
F(PGE) |
F(MCA) |
F(CMOV) |
222 F(
PAT) |
F(PSE36) | 0 |
223 f_nx | 0 |
F(MMXEXT) |
F(MMX) |
224 F(FXSR) |
F(FXSR_OPT) | f_gbpages | f_rdtscp |
225 0 | f_lm |
F(3DNOWEXT) |
F(3DNOW);
227 const u32 kvm_supported_word4_x86_features =
228 F(XMM3) |
F(PCLMULQDQ) | 0 |
230 0 |
F(SSSE3) | 0 | 0 |
231 F(FMA) |
F(CX16) | 0 |
233 F(XMM4_2) |
F(X2APIC) |
F(MOVBE) |
F(POPCNT) |
234 0 |
F(AES) |
F(XSAVE) | 0 |
F(AVX) |
237 const u32 kvm_supported_word6_x86_features =
238 F(LAHF_LM) |
F(CMP_LEGACY) | 0 | 0 |
239 F(CR8_LEGACY) |
F(ABM) |
F(SSE4A) |
F(MISALIGNSSE) |
240 F(3DNOWPREFETCH) |
F(OSVW) | 0 |
F(XOP) |
241 0 |
F(FMA4) |
F(TBM);
244 const u32 kvm_supported_word5_x86_features =
245 F(XSTORE) |
F(XSTORE_EN) |
F(XCRYPT) |
F(XCRYPT_EN) |
246 F(ACE2) |
F(ACE2_EN) |
F(PHE) |
F(PHE_EN) |
250 const u32 kvm_supported_word9_x86_features =
251 F(FSGSBASE) |
F(BMI1) |
F(HLE) |
F(AVX2) |
F(SMEP) |
252 F(BMI2) |
F(ERMS) | f_invpcid |
F(RTM);
259 if (*nent >= maxnent)
262 do_cpuid_1_ent(entry,
function, index);
270 entry->
edx &= kvm_supported_word0_x86_features;
271 cpuid_mask(&entry->
edx, 0);
272 entry->
ecx &= kvm_supported_word4_x86_features;
273 cpuid_mask(&entry->
ecx, 4);
276 entry->
ecx |=
F(X2APIC);
287 for (t = 1; t <
times; ++
t) {
288 if (*nent >= maxnent)
291 do_cpuid_1_ent(&entry[t],
function, 0);
304 if (*nent >= maxnent)
307 cache_type = entry[i - 1].
eax & 0x1f;
310 do_cpuid_1_ent(&entry[i],
function, i);
321 entry->
ebx &= kvm_supported_word9_x86_features;
322 cpuid_mask(&entry->
ebx, 9);
344 memset(&cap, 0,
sizeof(cap));
353 edx.
split.reserved = 0;
368 if (*nent >= maxnent)
371 level_type = entry[i - 1].
ecx & 0xff00;
374 do_cpuid_1_ent(&entry[i],
function, i);
385 for (idx = 1, i = 1; idx < 64; ++
idx) {
386 if (*nent >= maxnent)
389 do_cpuid_1_ent(&entry[i],
function, idx);
390 if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
400 static const char signature[12] =
"KVMKVMKVM\0\0";
401 const u32 *sigptr = (
const u32 *)signature;
403 entry->
ebx = sigptr[0];
404 entry->
ecx = sigptr[1];
405 entry->
edx = sigptr[2];
424 entry->
eax =
min(entry->
eax, 0x8000001a);
427 entry->
edx &= kvm_supported_word1_x86_features;
428 cpuid_mask(&entry->
edx, 1);
429 entry->
ecx &= kvm_supported_word6_x86_features;
430 cpuid_mask(&entry->
ecx, 6);
433 unsigned g_phys_as = (entry->
eax >> 16) & 0xff;
434 unsigned virt_as =
max((entry->
eax >> 8) & 0xff, 48
U);
435 unsigned phys_as = entry->
eax & 0xff;
439 entry->
eax = g_phys_as | (virt_as << 8);
440 entry->
ebx = entry->
edx = 0;
444 entry->
ecx = entry->
edx = 0;
453 entry->
eax =
min(entry->
eax, 0xC0000004);
456 entry->
edx &= kvm_supported_word5_x86_features;
457 cpuid_mask(&entry->
edx, 5);
502 { .
func = 0, .has_leaf_count =
true },
503 { .func = 0x80000000, .has_leaf_count =
true },
504 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count =
true },
525 r = do_cpuid_ent(&cpuid_entries[nent], ent->
func, ent->
idx,
534 limit = cpuid_entries[nent - 1].
eax;
535 for (func = ent->
func + 1; func <= limit && nent < cpuid->nent &&
r == 0; ++func)
536 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->
idx,
551 vfree(cpuid_entries);
556 static int move_to_next_stateful_cpuid_entry(
struct kvm_vcpu *vcpu,
int i)
559 int j, nent = vcpu->
arch.cpuid_nent;
563 for (j = i + 1; ; j = (j + 1) % nent) {
594 for (i = 0; i < vcpu->
arch.cpuid_nent; ++
i) {
597 e = &vcpu->
arch.cpuid_entries[
i];
598 if (is_matching_cpuid_entry(e,
function, index)) {
600 move_to_next_stateful_cpuid_entry(vcpu, i);
614 if (!best || best->
eax < 0x80000008)
618 return best->
eax & 0xff;
634 if (!maxlevel || maxlevel->
eax >=
function)
636 if (
function & 0x80000000) {
652 best = check_cpuid_limit(vcpu,
function, index);
660 *eax = *ebx = *ecx = *edx = 0;
675 trace_kvm_cpuid(
function, eax, ebx, ecx, edx);