11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kvm_host.h>
14 #include "linux/lockdep.h"
15 #include <linux/export.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 #include <linux/sched/stat.h>
20 #include <asm/processor.h>
22 #include <asm/fpu/xstate.h>
24 #include <asm/cpuid.h>
42 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
44 xstate_bv &= XFEATURE_MASK_EXTEND;
46 if (xstate_bv & 0x1) {
47 u32 eax, ebx, ecx, edx, offset;
48 cpuid_count(0xD,
feature_bit, &eax, &ebx, &ecx, &edx);
51 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
54 ret = max(ret, offset + eax);
69 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
70 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
80 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
83 struct kvm_cpuid_entry2 *entries,
int nent, u32
function, u64 index)
85 struct kvm_cpuid_entry2 *e;
98 lockdep_assert_irqs_enabled();
100 for (i = 0; i < nent; i++) {
103 if (e->function !=
function)
111 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
125 WARN_ON_ONCE(cpuid_function_is_indexed(
function));
134 struct kvm_cpuid_entry2 *entries,
137 struct kvm_cpuid_entry2 *best;
147 int vaddr_bits = (best->eax & 0xff00) >> 8;
149 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
161 xfeatures = best->eax | ((u64)best->edx << 32);
162 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
166 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
173 struct kvm_cpuid_entry2 *orig;
176 if (nent != vcpu->arch.cpuid_nent)
179 for (i = 0; i < nent; i++) {
180 orig = &vcpu->arch.cpuid_entries[i];
181 if (e2[i].
function != orig->function ||
182 e2[i].index != orig->index ||
183 e2[i].flags != orig->flags ||
184 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
185 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
195 struct kvm_hypervisor_cpuid cpuid = {};
196 struct kvm_cpuid_entry2 *entry;
199 for_each_possible_hypervisor_cpuid_base(base) {
205 signature[0] = entry->ebx;
206 signature[1] = entry->ecx;
207 signature[2] = entry->edx;
209 if (!memcmp(signature, sig,
sizeof(signature))) {
211 cpuid.limit = entry->eax;
221 struct kvm_cpuid_entry2 *entries,
int nent)
223 u32 base = vcpu->arch.kvm_cpuid.base;
235 vcpu->arch.cpuid_nent);
247 vcpu->arch.pv_cpuid.features = best->eax;
256 struct kvm_cpuid_entry2 *best;
268 struct kvm_cpuid_entry2 *best;
273 if (boot_cpu_has(X86_FEATURE_XSAVE))
278 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
282 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
297 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
298 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
304 vcpu->arch.ia32_misc_enable_msr &
305 MSR_IA32_MISC_ENABLE_MWAIT);
317 #ifdef CONFIG_KVM_HYPERV
318 struct kvm_cpuid_entry2 *entry;
322 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
331 struct kvm_cpuid_entry2 *best;
335 bitmap_zero(vcpu->arch.governed_features.enabled,
336 KVM_MAX_NR_GOVERNED_FEATURES);
349 allow_gbpages =
tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
364 vcpu->arch.guest_supported_xcr0 =
374 vcpu->arch.cr4_guest_rsvd_bits =
378 vcpu->arch.cpuid_nent));
381 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
392 struct kvm_cpuid_entry2 *best;
395 if (!best || best->eax < 0x80000008)
399 return best->eax & 0xff;
414 static int kvm_set_cpuid(
struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry2 *e2,
441 #ifdef CONFIG_KVM_HYPERV
453 kvfree(vcpu->arch.cpuid_entries);
454 vcpu->arch.cpuid_entries = e2;
455 vcpu->arch.cpuid_nent = nent;
458 #ifdef CONFIG_KVM_XEN
469 struct kvm_cpuid_entry __user *entries)
472 struct kvm_cpuid_entry *e = NULL;
473 struct kvm_cpuid_entry2 *e2 = NULL;
475 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
479 e = vmemdup_array_user(entries, cpuid->nent,
sizeof(*e));
483 e2 = kvmalloc_array(cpuid->nent,
sizeof(*e2), GFP_KERNEL_ACCOUNT);
489 for (i = 0; i < cpuid->nent; i++) {
490 e2[i].function = e[i].function;
491 e2[i].eax = e[i].eax;
492 e2[i].ebx = e[i].ebx;
493 e2[i].ecx = e[i].ecx;
494 e2[i].edx = e[i].edx;
497 e2[i].padding[0] = 0;
498 e2[i].padding[1] = 0;
499 e2[i].padding[2] = 0;
513 struct kvm_cpuid2 *cpuid,
514 struct kvm_cpuid_entry2 __user *entries)
516 struct kvm_cpuid_entry2 *e2 = NULL;
519 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
523 e2 = vmemdup_array_user(entries, cpuid->nent,
sizeof(*e2));
536 struct kvm_cpuid2 *cpuid,
537 struct kvm_cpuid_entry2 __user *entries)
539 if (cpuid->nent < vcpu->arch.cpuid_nent)
542 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
543 vcpu->arch.cpuid_nent *
sizeof(
struct kvm_cpuid_entry2)))
546 cpuid->nent = vcpu->arch.cpuid_nent;
554 struct kvm_cpuid_entry2 entry;
559 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
564 static __always_inline
568 BUILD_BUG_ON(leaf < NCAPINTS);
570 kvm_cpu_caps[leaf] = mask;
578 BUILD_BUG_ON(leaf >= NCAPINTS);
580 kvm_cpu_caps[leaf] &= mask;
588 unsigned int f_gbpages =
F(GBPAGES);
589 unsigned int f_lm =
F(LM);
590 unsigned int f_xfd =
F(XFD);
592 unsigned int f_gbpages = 0;
593 unsigned int f_lm = 0;
594 unsigned int f_xfd = 0;
596 memset(kvm_cpu_caps, 0,
sizeof(kvm_cpu_caps));
598 BUILD_BUG_ON(
sizeof(kvm_cpu_caps) - (
NKVMCAPINTS *
sizeof(*kvm_cpu_caps)) >
599 sizeof(boot_cpu_data.x86_capability));
601 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
602 sizeof(kvm_cpu_caps) - (
NKVMCAPINTS *
sizeof(*kvm_cpu_caps)));
609 F(XMM3) |
F(PCLMULQDQ) | 0 |
611 0 |
F(SSSE3) | 0 | 0 |
612 F(FMA) |
F(CX16) | 0 |
F(PDCM) |
613 F(PCID) | 0 |
F(XMM4_1) |
614 F(XMM4_2) |
F(X2APIC) |
F(MOVBE) |
F(POPCNT) |
615 0 |
F(AES) |
F(XSAVE) | 0 |
F(AVX) |
622 F(FPU) |
F(VME) |
F(DE) |
F(PSE) |
623 F(TSC) |
F(MSR) |
F(PAE) |
F(MCE) |
624 F(CX8) |
F(APIC) | 0 |
F(SEP) |
625 F(MTRR) |
F(PGE) |
F(MCA) |
F(CMOV) |
626 F(PAT) |
F(PSE36) | 0 |
F(CLFLUSH) |
628 F(FXSR) |
F(XMM) |
F(XMM2) |
F(SELFSNOOP) |
633 F(FSGSBASE) |
F(SGX) |
F(BMI1) |
F(HLE) |
F(AVX2) |
634 F(FDP_EXCPTN_ONLY) |
F(SMEP) |
F(BMI2) |
F(ERMS) |
F(INVPCID) |
635 F(RTM) |
F(ZERO_FCS_FDS) | 0 |
F(AVX512F) |
636 F(AVX512DQ) |
F(RDSEED) |
F(ADX) |
F(SMAP) |
F(AVX512IFMA) |
637 F(CLFLUSHOPT) |
F(CLWB) | 0 |
F(AVX512PF) |
638 F(AVX512ER) |
F(AVX512CD) |
F(SHA_NI) |
F(AVX512BW) |
642 F(AVX512VBMI) |
F(LA57) |
F(PKU) | 0 |
F(RDPID) |
643 F(AVX512_VPOPCNTDQ) |
F(UMIP) |
F(AVX512_VBMI2) |
F(GFNI) |
644 F(VAES) |
F(VPCLMULQDQ) |
F(AVX512_VNNI) |
F(AVX512_BITALG) |
645 F(CLDEMOTE) |
F(MOVDIRI) |
F(MOVDIR64B) | 0 |
646 F(SGX_LC) |
F(BUS_LOCK_DETECT)
649 if (cpuid_ecx(7) &
F(LA57))
656 if (!
tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
660 F(AVX512_4VNNIW) |
F(AVX512_4FMAPS) |
F(SPEC_CTRL) |
661 F(SPEC_CTRL_SSBD) |
F(ARCH_CAPABILITIES) |
F(INTEL_STIBP) |
662 F(MD_CLEAR) |
F(AVX512_VP2INTERSECT) |
F(FSRM) |
663 F(SERIALIZE) |
F(TSXLDTRK) |
F(AVX512_FP16) |
664 F(AMX_TILE) |
F(AMX_INT8) |
F(AMX_BF16) |
F(FLUSH_L1D)
671 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
673 if (boot_cpu_has(X86_FEATURE_STIBP))
675 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
679 F(AVX_VNNI) |
F(AVX512_BF16) |
F(CMPCCXADD) |
680 F(FZRM) |
F(FSRS) |
F(FSRC) |
681 F(AMX_FP16) |
F(AVX_IFMA) |
F(LAM)
685 F(AVX_VNNI_INT8) |
F(AVX_NE_CONVERT) |
F(PREFETCHITI) |
690 F(INTEL_PSFD) |
F(IPRED_CTRL) |
F(RRSBA_CTRL) |
F(DDPD_U) |
691 F(BHI_CTRL) |
F(MCDT_NO)
695 F(XSAVEOPT) |
F(XSAVEC) |
F(XGETBV1) |
F(XSAVES) | f_xfd
699 SF(SGX1) |
SF(SGX2) |
SF(SGX_EDECCSSA)
703 F(LAHF_LM) |
F(CMP_LEGACY) | 0 | 0 |
704 F(CR8_LEGACY) |
F(ABM) |
F(SSE4A) |
F(MISALIGNSSE) |
705 F(3DNOWPREFETCH) |
F(OSVW) | 0 |
F(XOP) |
706 0 |
F(FMA4) |
F(TBM) |
711 F(FPU) |
F(VME) |
F(DE) |
F(PSE) |
712 F(TSC) |
F(MSR) |
F(PAE) |
F(MCE) |
713 F(CX8) |
F(APIC) | 0 |
F(SYSCALL) |
714 F(MTRR) |
F(PGE) |
F(MCA) |
F(CMOV) |
715 F(PAT) |
F(PSE36) | 0 |
716 F(NX) | 0 |
F(MMXEXT) |
F(MMX) |
717 F(FXSR) |
F(FXSR_OPT) | f_gbpages |
F(RDTSCP) |
718 0 | f_lm |
F(3DNOWEXT) |
F(3DNOW)
729 F(CLZERO) |
F(XSAVEERPTR) |
730 F(WBNOINVD) |
F(AMD_IBPB) |
F(AMD_IBRS) |
F(AMD_SSBD) |
F(VIRT_SSBD) |
731 F(AMD_SSB_NO) |
F(AMD_STIBP) |
F(AMD_STIBP_ALWAYS_ON) |
740 if (boot_cpu_has(X86_FEATURE_IBPB))
742 if (boot_cpu_has(X86_FEATURE_IBRS))
744 if (boot_cpu_has(X86_FEATURE_STIBP))
746 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
748 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
754 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
755 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
765 0 |
F(SEV) | 0 |
F(SEV_ES) |
769 F(NO_NESTED_DATA_BP) |
F(LFENCE_RDTSC) | 0 |
770 F(NULL_SEL_CLR_BASE) |
F(AUTOIBRS) | 0 |
792 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
794 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
799 F(XSTORE) |
F(XSTORE_EN) |
F(XCRYPT) |
F(XCRYPT_EN) |
800 F(ACE2) |
F(ACE2_EN) |
F(PHE) |
F(PHE_EN) |
814 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
836 u32
function, u32 index)
843 memset(entry, 0,
sizeof(*entry));
844 entry->function =
function;
845 entry->index = index;
846 switch (
function & 0xC0000000) {
857 static int max_cpuid_80000000;
858 if (!READ_ONCE(max_cpuid_80000000))
859 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
860 if (
function > READ_ONCE(max_cpuid_80000000))
869 cpuid_count(entry->function, entry->index,
870 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
872 if (cpuid_function_is_indexed(
function))
873 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
880 struct kvm_cpuid_entry2 *entry;
886 entry->function = func;
896 entry->ecx =
F(MOVBE);
900 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
903 entry->ecx =
F(RDPID);
915 struct kvm_cpuid_entry2 *entry;
930 entry->eax = min(entry->eax, 0x1fU);
952 WARN_ON_ONCE((entry->eax & 0xff) > 1);
961 for (i = 1; entry->eax & 0x1f; ++i) {
975 max_idx = entry->eax = min(entry->eax, 2u);
1003 union cpuid10_eax eax;
1004 union cpuid10_edx edx;
1006 if (!
enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
1007 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1012 eax.split.num_counters =
kvm_pmu_cap.num_counters_gp;
1014 eax.split.mask_length =
kvm_pmu_cap.events_mask_len;
1015 edx.split.num_counters_fixed =
kvm_pmu_cap.num_counters_fixed;
1016 edx.split.bit_width_fixed =
kvm_pmu_cap.bit_width_fixed;
1019 edx.split.anythread_deprecated = 1;
1020 edx.split.reserved1 = 0;
1021 edx.split.reserved2 = 0;
1023 entry->eax = eax.full;
1026 entry->edx = edx.full;
1035 entry->eax = entry->ebx = entry->ecx = 0;
1041 entry->eax &= permitted_xcr0;
1043 entry->ecx = entry->ebx;
1044 entry->edx &= permitted_xcr0 >> 32;
1045 if (!permitted_xcr0)
1053 if (entry->eax & (
F(XSAVES)|
F(XSAVEC)))
1057 WARN_ON_ONCE(permitted_xss != 0);
1060 entry->ecx &= permitted_xss;
1061 entry->edx &= permitted_xss >> 32;
1063 for (i = 2; i < 64; ++i) {
1065 if (permitted_xcr0 & BIT_ULL(i))
1067 else if (permitted_xss & BIT_ULL(i))
1084 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1090 entry->ecx &= ~BIT_ULL(2);
1098 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1109 entry->ebx &= SGX_MISC_EXINFO;
1122 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1128 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1132 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1140 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1144 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1151 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1155 case KVM_CPUID_SIGNATURE: {
1156 const u32 *sigptr = (
const u32 *)KVM_SIGNATURE;
1157 entry->eax = KVM_CPUID_FEATURES;
1158 entry->ebx = sigptr[0];
1159 entry->ecx = sigptr[1];
1160 entry->edx = sigptr[2];
1163 case KVM_CPUID_FEATURES:
1164 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1165 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1166 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1167 (1 << KVM_FEATURE_ASYNC_PF) |
1168 (1 << KVM_FEATURE_PV_EOI) |
1169 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1170 (1 << KVM_FEATURE_PV_UNHALT) |
1171 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1172 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1173 (1 << KVM_FEATURE_PV_SEND_IPI) |
1174 (1 << KVM_FEATURE_POLL_CONTROL) |
1175 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1176 (1 << KVM_FEATURE_ASYNC_PF_INT);
1178 if (sched_info_on())
1179 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1186 entry->eax = min(entry->eax, 0x80000022);
1200 if (entry->eax >= 0x8000001d &&
1201 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1202 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1203 entry->eax = max(entry->eax, 0x80000021);
1206 entry->ebx &= ~GENMASK(27, 16);
1215 entry->edx &= ~GENMASK(17, 16);
1221 entry->edx &= boot_cpu_data.x86_power;
1222 entry->eax = entry->ebx = entry->ecx = 0;
1225 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1226 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1227 unsigned phys_as = entry->eax & 0xff;
1240 g_phys_as = boot_cpu_data.x86_phys_bits;
1241 else if (!g_phys_as)
1242 g_phys_as = phys_as;
1244 entry->eax = g_phys_as | (virt_as << 8);
1245 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1252 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1262 entry->ecx = entry->edx = 0;
1265 entry->eax &= GENMASK(2, 0);
1266 entry->ebx = entry->ecx = entry->edx = 0;
1270 entry->eax = entry->ebx = entry->ecx = 0;
1275 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1279 entry->ebx &= ~GENMASK(31, 12);
1284 entry->ebx &= ~GENMASK(11, 6);
1288 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1291 entry->ebx = entry->ecx = entry->edx = 0;
1296 union cpuid_0x80000022_ebx ebx;
1298 entry->ecx = entry->edx = 0;
1300 entry->eax = entry->ebx;
1307 ebx.split.num_core_pmc =
kvm_pmu_cap.num_counters_gp;
1309 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
1311 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
1313 entry->ebx = ebx.full;
1319 entry->eax = min(entry->eax, 0xC0000004);
1330 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1345 if (type == KVM_GET_EMULATED_CPUID)
1351 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1360 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1368 for (func = func + 1; func <= limit; ++func) {
1378 __u32 num_entries,
unsigned int ioctl_type)
1383 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1394 for (i = 0; i < num_entries; i++) {
1395 if (copy_from_user(pad, entries[i].padding,
sizeof(pad)))
1398 if (pad[0] || pad[1] || pad[2])
1405 struct kvm_cpuid_entry2 __user *entries,
1408 static const u32 funcs[] = {
1417 if (cpuid->nent < 1)
1419 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1420 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1425 array.
entries = kvcalloc(cpuid->nent,
sizeof(
struct kvm_cpuid_entry2), GFP_KERNEL);
1431 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1436 cpuid->nent = array.
nent;
1439 array.
nent *
sizeof(
struct kvm_cpuid_entry2)))
1448 u32
function, u32 index)
1491 static struct kvm_cpuid_entry2 *
1494 struct kvm_cpuid_entry2 *basic, *
class;
1495 u32
function = *fn_ptr;
1505 if (
function >= 0x40000000 &&
function <= 0x4fffffff)
1507 else if (
function >= 0xc0000000)
1512 if (
class && function <= class->eax)
1521 *fn_ptr = basic->eax;
1532 u32 *ecx, u32 *edx,
bool exact_only)
1534 u32 orig_function = *eax,
function = *eax, index = *ecx;
1535 struct kvm_cpuid_entry2 *entry;
1536 bool exact, used_max_basic =
false;
1541 if (!entry && !exact_only) {
1543 used_max_basic = !!entry;
1551 if (
function == 7 && index == 0) {
1554 (data & TSX_CTRL_CPUID_CLEAR))
1555 *ebx &= ~(
F(RTM) |
F(HLE));
1556 }
else if (
function == 0x80000007) {
1558 *edx &= ~
SF(CONSTANT_TSC);
1561 *eax = *ebx = *ecx = *edx = 0;
1569 if (
function == 0xb ||
function == 0x1f) {
1572 *ecx = index & 0xff;
1577 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1585 u32 eax, ebx, ecx, edx;
1590 eax = kvm_rax_read(vcpu);
1591 ecx = kvm_rcx_read(vcpu);
1592 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx,
false);
1593 kvm_rax_write(vcpu, eax);
1594 kvm_rbx_write(vcpu, ebx);
1595 kvm_rcx_write(vcpu, ecx);
1596 kvm_rdx_write(vcpu, edx);
static __always_inline void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent)
static struct kvm_cpuid_entry2 * get_next_cpuid(struct kvm_cpuid_array *array)
static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type)
static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
#define CENTAUR_CPUID_SIGNATURE
EXPORT_SYMBOL_GPL(kvm_cpu_caps)
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries)
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
static struct kvm_cpuid_entry2 * __kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, int nent)
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries)
static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type)
static struct kvm_cpuid_entry2 * kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly
static struct kvm_cpuid_entry2 * cpuid_entry2_find(struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent)
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only)
u32 xstate_required_size(u64 xstate_bv, bool compacted)
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, const char *sig)
static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, int nent)
static struct kvm_cpuid_entry2 * get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
void kvm_set_cpu_caps(void)
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries, unsigned int type)
static int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries)
static struct kvm_cpuid_entry2 * do_host_cpuid(struct kvm_cpuid_array *array, u32 function, u32 index)
static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, __u32 num_entries, unsigned int ioctl_type)
#define KVM_CPUID_INDEX_NOT_SIGNIFICANT
static int kvm_check_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, int nent)
static int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, unsigned int leaf)
@ KVM_NR_GOVERNED_FEATURES
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
static bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
static bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
static bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
static bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
static __always_inline u64 rsvd_bits(int s, int e)
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
struct x86_pmu_capability __read_mostly kvm_pmu_cap
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
#define feature_bit(name)
static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, unsigned int x86_feature)
static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, unsigned int x86_feature, bool set)
static __always_inline u32 * __cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, u32 reg)
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
struct kvm_cpuid_entry2 * entries
struct kvm_timer lapic_timer
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated)
bool __read_mostly enable_pmu
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
#define __cr4_reserved_bits(__cpu_has, __c)
static u64 kvm_get_filtered_xcr0(void)
static bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
static bool kvm_hlt_in_guest(struct kvm *kvm)
static bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)