2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/objtool.h>
5 #include <linux/percpu.h>
7 #include <asm/debugreg.h>
8 #include <asm/mmu_context.h>
27 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
33 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
34 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
35 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
36 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
37 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
39 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
48 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
49 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
56 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
63 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
83 pr_err(
"Missing field from shadow_read_only_field %x\n",
91 entry.
offset +=
sizeof(u32);
104 pr_err(
"Missing field from shadow_read_write_field %x\n",
107 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
108 field <= GUEST_TR_AR_BYTES,
109 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
117 case GUEST_PML_INDEX:
121 case VMX_PREEMPTION_TIMER_VALUE:
125 case GUEST_INTR_STATUS:
139 entry.
offset +=
sizeof(u32);
155 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
156 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
163 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
164 X86_EFLAGS_SF | X86_EFLAGS_OF))
170 u32 vm_instruction_error)
173 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
174 X86_EFLAGS_SF | X86_EFLAGS_OF))
206 kvm_make_request(KVM_REQ_TRIPLE_FAULT,
vcpu);
207 pr_debug_ratelimited(
"nested vmx abort, indicator %d\n", indicator);
217 return low | ((u64)high << 32);
222 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
229 #ifdef CONFIG_KVM_HYPERV
230 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
235 vmx->
nested.hv_evmcs = NULL;
241 hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
242 hv_vcpu->nested.vm_id = 0;
243 hv_vcpu->nested.vp_id = 0;
250 #ifdef CONFIG_KVM_HYPERV
262 if (!guest_cpuid_has_evmcs(
vcpu) ||
289 dest->ds_sel = src->ds_sel;
290 dest->es_sel = src->es_sel;
316 vcpu->arch.regs_dirty = 0;
333 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES,
vcpu);
379 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
383 return VALID_PAGE(root_hpa) &&
390 unsigned long roots = 0;
392 struct kvm_mmu_root_info *cached_root;
396 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
397 cached_root = &vcpu->arch.mmu->prev_roots[i];
401 roots |= KVM_MMU_ROOT_PREVIOUS(i);
416 vm_exit_reason = EXIT_REASON_PML_FULL;
421 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
423 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
455 vcpu->arch.mmu = &
vcpu->arch.guest_mmu;
461 vcpu->arch.walk_mmu = &
vcpu->arch.nested_mmu;
466 vcpu->arch.mmu = &
vcpu->arch.root_mmu;
467 vcpu->arch.walk_mmu = &
vcpu->arch.root_mmu;
473 bool inequality, bit;
479 return inequality ^ bit;
494 if (vector == PF_VECTOR)
543 unsigned long *msr_bitmap_l0,
546 if (type &
MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
547 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
549 if (type &
MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
550 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
557 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
558 unsigned word = msr / BITS_PER_LONG;
561 msr_bitmap[word + (0x800 /
sizeof(long))] = ~0;
565 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \
567 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
568 unsigned long *msr_bitmap_l1, \
569 unsigned long *msr_bitmap_l0, u32 msr) \
571 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
572 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
573 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
575 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
581 unsigned long *msr_bitmap_l1,
582 unsigned long *msr_bitmap_l0,
586 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
589 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
602 unsigned long *msr_bitmap_l1;
622 if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap &&
623 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
630 msr_bitmap_l1 = (
unsigned long *)map->hva;
647 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
648 unsigned word = msr / BITS_PER_LONG;
650 msr_bitmap_l0[word] = msr_bitmap_l1[word];
655 msr_bitmap_l1, msr_bitmap_l0,
661 msr_bitmap_l1, msr_bitmap_l0,
665 msr_bitmap_l1, msr_bitmap_l0,
746 VM_EXIT_ACK_INTR_ON_EXIT;
887 struct vmx_msr_entry *e)
890 if (
CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
892 if (
CC(e->index == MSR_IA32_UCODE_WRITE) ||
893 CC(e->index == MSR_IA32_UCODE_REV))
895 if (
CC(e->reserved != 0))
901 struct vmx_msr_entry *e)
903 if (
CC(e->index == MSR_FS_BASE) ||
904 CC(e->index == MSR_GS_BASE) ||
905 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) ||
912 struct vmx_msr_entry *e)
914 if (
CC(e->index == MSR_IA32_SMBASE) ||
926 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
941 struct vmx_msr_entry e;
944 for (i = 0; i < count; i++) {
945 if (unlikely(i >= max_msr_list_size))
950 pr_debug_ratelimited(
951 "%s cannot read MSR entry (%u, 0x%08llx)\n",
952 __func__, i, gpa + i *
sizeof(e));
956 pr_debug_ratelimited(
957 "%s check failed (%u, 0x%x, 0x%x)\n",
958 __func__, i, e.index, e.reserved);
962 pr_debug_ratelimited(
963 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
964 __func__, i, e.index, e.value);
985 if (msr_index == MSR_IA32_TSC) {
998 pr_debug_ratelimited(
"%s cannot read MSR (0x%x)\n", __func__,
1006 struct vmx_msr_entry *e)
1009 gpa + i *
sizeof(*e),
1010 e, 2 *
sizeof(u32))) {
1011 pr_debug_ratelimited(
1012 "%s cannot read MSR entry (%u, 0x%08llx)\n",
1013 __func__, i, gpa + i *
sizeof(*e));
1017 pr_debug_ratelimited(
1018 "%s check failed (%u, 0x%x, 0x%x)\n",
1019 __func__, i, e->index, e->reserved);
1029 struct vmx_msr_entry e;
1032 for (i = 0; i < count; i++) {
1033 if (unlikely(i >= max_msr_list_size))
1043 gpa + i *
sizeof(e) +
1044 offsetof(
struct vmx_msr_entry, value),
1045 &data,
sizeof(data))) {
1046 pr_debug_ratelimited(
1047 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1048 __func__, i, e.index, data);
1060 struct vmx_msr_entry e;
1063 for (i = 0; i < count; i++) {
1067 if (e.index == msr_index)
1078 bool in_vmcs12_store_list;
1079 int msr_autostore_slot;
1080 bool in_autostore_list;
1084 in_autostore_list = msr_autostore_slot >= 0;
1087 if (in_vmcs12_store_list && !in_autostore_list) {
1096 pr_warn_ratelimited(
1097 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1101 last = autostore->
nr++;
1102 autostore->
val[last].index = msr_index;
1103 }
else if (!in_vmcs12_store_list && in_autostore_list) {
1104 last = --autostore->
nr;
1105 autostore->
val[msr_autostore_slot] = autostore->
val[last];
1116 bool nested_ept,
bool reload_pdptrs,
1117 enum vm_entry_failure_code *entry_failure_code)
1120 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1130 *entry_failure_code = ENTRY_FAIL_PDPTE;
1134 vcpu->arch.cr3 = cr3;
1191 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST,
vcpu);
1207 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST,
vcpu);
1218 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT,
vcpu);
1226 return (superset | subset) == superset;
1231 const u64 feature_and_reserved =
1233 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1235 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1245 if (data & BIT_ULL(48))
1248 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1249 vmx_basic_vmcs_revision_id(data))
1252 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1260 u32 **low, u32 **high)
1262 switch (msr_index) {
1263 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1267 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1271 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1275 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1279 case MSR_IA32_VMX_PROCBASED_CTLS2:
1308 *highp = data >> 32;
1314 const u64 feature_and_reserved_bits =
1316 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1317 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1319 GENMASK_ULL(13, 9) | BIT_ULL(31);
1327 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1328 vmx_misc_preemption_timer_rate(data) !=
1329 vmx_misc_preemption_timer_rate(vmx_misc))
1332 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1335 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1338 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1363 switch (msr_index) {
1364 case MSR_IA32_VMX_CR0_FIXED0:
1366 case MSR_IA32_VMX_CR4_FIXED0:
1404 switch (msr_index) {
1405 case MSR_IA32_VMX_BASIC:
1407 case MSR_IA32_VMX_PINBASED_CTLS:
1408 case MSR_IA32_VMX_PROCBASED_CTLS:
1409 case MSR_IA32_VMX_EXIT_CTLS:
1410 case MSR_IA32_VMX_ENTRY_CTLS:
1421 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1422 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1423 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1424 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1425 case MSR_IA32_VMX_PROCBASED_CTLS2:
1427 case MSR_IA32_VMX_MISC:
1429 case MSR_IA32_VMX_CR0_FIXED0:
1430 case MSR_IA32_VMX_CR4_FIXED0:
1432 case MSR_IA32_VMX_CR0_FIXED1:
1433 case MSR_IA32_VMX_CR4_FIXED1:
1439 case MSR_IA32_VMX_EPT_VPID_CAP:
1441 case MSR_IA32_VMX_VMCS_ENUM:
1444 case MSR_IA32_VMX_VMFUNC:
1460 switch (msr_index) {
1461 case MSR_IA32_VMX_BASIC:
1462 *pdata = msrs->
basic;
1464 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1465 case MSR_IA32_VMX_PINBASED_CTLS:
1469 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1470 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1472 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1473 case MSR_IA32_VMX_PROCBASED_CTLS:
1477 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1478 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1480 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1481 case MSR_IA32_VMX_EXIT_CTLS:
1485 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1486 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1488 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1489 case MSR_IA32_VMX_ENTRY_CTLS:
1493 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1494 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1496 case MSR_IA32_VMX_MISC:
1501 case MSR_IA32_VMX_CR0_FIXED0:
1504 case MSR_IA32_VMX_CR0_FIXED1:
1507 case MSR_IA32_VMX_CR4_FIXED0:
1510 case MSR_IA32_VMX_CR4_FIXED1:
1513 case MSR_IA32_VMX_VMCS_ENUM:
1516 case MSR_IA32_VMX_PROCBASED_CTLS2:
1521 case MSR_IA32_VMX_EPT_VPID_CAP:
1525 case MSR_IA32_VMX_VMFUNC:
1551 if (WARN_ON(!shadow_vmcs))
1576 const int max_fields[] = {
1586 if (WARN_ON(!shadow_vmcs))
1591 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1592 for (i = 0; i < max_fields[q]; i++) {
1593 field = fields[q][i];
1606 #ifdef CONFIG_KVM_HYPERV
1609 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->
vcpu);
1615 if (unlikely(!(hv_clean_fields &
1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) {
1617 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
1618 hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
1619 hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
1622 if (unlikely(!(hv_clean_fields &
1623 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1627 evmcs->guest_interruptibility_info;
1634 if (unlikely(!(hv_clean_fields &
1635 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1637 evmcs->cpu_based_vm_exec_control;
1640 if (unlikely(!(hv_clean_fields &
1641 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1645 if (unlikely(!(hv_clean_fields &
1646 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1650 if (unlikely(!(hv_clean_fields &
1651 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1653 evmcs->vm_entry_intr_info_field;
1655 evmcs->vm_entry_exception_error_code;
1657 evmcs->vm_entry_instruction_len;
1660 if (unlikely(!(hv_clean_fields &
1661 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1687 if (unlikely(!(hv_clean_fields &
1688 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1690 evmcs->pin_based_vm_exec_control;
1693 evmcs->secondary_vm_exec_control;
1696 if (unlikely(!(hv_clean_fields &
1697 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1702 if (unlikely(!(hv_clean_fields &
1703 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1707 if (unlikely(!(hv_clean_fields &
1708 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1747 if (unlikely(!(hv_clean_fields &
1748 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1756 if (unlikely(!(hv_clean_fields &
1757 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1768 if (unlikely(!(hv_clean_fields &
1769 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1778 if (unlikely(!(hv_clean_fields &
1779 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1784 if (unlikely(!(hv_clean_fields &
1785 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1795 evmcs->guest_pending_dbg_exceptions;
1848 KVM_BUG_ON(1, vmx->
vcpu.kvm);
1854 #ifdef CONFIG_KVM_HYPERV
1981 evmcs->guest_pending_dbg_exceptions =
2011 evmcs->guest_interruptibility_info =
2016 evmcs->vm_entry_exception_error_code =
2026 KVM_BUG_ON(1, vmx->
vcpu.kvm);
2035 struct kvm_vcpu *vcpu,
bool from_launch)
2037 #ifdef CONFIG_KVM_HYPERV
2039 bool evmcs_gpa_changed =
false;
2042 if (likely(!guest_cpuid_has_evmcs(
vcpu)))
2051 if (unlikely(evmcs_gpa != vmx->
nested.hv_evmcs_vmptr)) {
2057 &vmx->
nested.hv_evmcs_map))
2090 vmx->
nested.hv_evmcs_vmptr = evmcs_gpa;
2092 evmcs_gpa_changed =
true;
2111 if (from_launch || evmcs_gpa_changed) {
2112 vmx->
nested.hv_evmcs->hv_clean_fields &=
2113 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2142 kvm_make_request(KVM_REQ_EVENT, &vmx->
vcpu);
2145 return HRTIMER_NORESTART;
2165 u64 preemption_timeout)
2173 if (preemption_timeout == 0) {
2178 if (
vcpu->arch.virtual_tsc_khz == 0)
2182 preemption_timeout *= 1000000;
2183 do_div(preemption_timeout,
vcpu->arch.virtual_tsc_khz);
2185 ktime_add_ns(ktime_get(), preemption_timeout),
2186 HRTIMER_MODE_ABS_PINNED);
2195 return vmx->
vcpu.arch.efer | (EFER_LMA | EFER_LME);
2197 return vmx->
vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2202 struct kvm *kvm = vmx->
vcpu.kvm;
2228 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2288 exec_control = __pin_controls_get(vmcs01);
2290 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2297 exec_control &= ~PIN_BASED_POSTED_INTR;
2298 pin_controls_set(vmx, exec_control);
2303 exec_control = __exec_controls_get(vmcs01);
2304 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2305 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2306 exec_control &= ~CPU_BASED_TPR_SHADOW;
2310 if (exec_control & CPU_BASED_TPR_SHADOW)
2312 #ifdef CONFIG_X86_64
2314 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2315 CPU_BASED_CR8_STORE_EXITING;
2322 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2323 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2331 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2332 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2334 exec_controls_set(vmx, exec_control);
2340 exec_control = __secondary_exec_controls_get(vmcs01);
2343 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2344 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2345 SECONDARY_EXEC_ENABLE_INVPCID |
2346 SECONDARY_EXEC_ENABLE_RDTSCP |
2347 SECONDARY_EXEC_ENABLE_XSAVES |
2348 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2349 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2350 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2351 SECONDARY_EXEC_ENABLE_VMFUNC |
2352 SECONDARY_EXEC_DESC);
2355 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2359 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
2362 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2369 exec_control |= SECONDARY_EXEC_DESC;
2371 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2376 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2378 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2381 secondary_exec_controls_set(vmx, exec_control);
2396 exec_control = __vm_entry_controls_get(vmcs01);
2398 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
2399 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
2401 if (guest_efer & EFER_LMA)
2402 exec_control |= VM_ENTRY_IA32E_MODE;
2404 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2406 vm_entry_controls_set(vmx, exec_control);
2415 exec_control = __vm_exit_controls_get(vmcs01);
2417 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2419 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
2420 vm_exit_controls_set(vmx, exec_control);
2445 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2446 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2487 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2488 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2572 enum vm_entry_failure_code *entry_failure_code)
2576 bool load_guest_pdptrs_vmcs12 =
false;
2583 !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2605 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2616 vcpu->arch.l1_tsc_offset,
2621 vcpu->arch.l1_tsc_scaling_ratio,
2626 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2659 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2665 from_vmentry, entry_failure_code))
2689 WARN_ON_ONCE(
kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2691 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2705 evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2728 switch (new_eptp & VMX_EPTP_MT_MASK) {
2729 case VMX_EPTP_MT_UC:
2733 case VMX_EPTP_MT_WB:
2742 switch (new_eptp & VMX_EPTP_PWL_MASK) {
2743 case VMX_EPTP_PWL_5:
2747 case VMX_EPTP_PWL_4:
2760 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2865 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2866 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2867 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2868 bool should_have_error_code;
2870 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2874 if (
CC(intr_type == INTR_TYPE_RESERVED) ||
2875 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2880 if (
CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2881 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2882 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2886 should_have_error_code =
2887 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2889 if (
CC(has_error_code != should_have_error_code))
2893 if (
CC(has_error_code &&
2898 if (
CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2902 switch (intr_type) {
2903 case INTR_TYPE_SOFT_EXCEPTION:
2904 case INTR_TYPE_SOFT_INTR:
2905 case INTR_TYPE_PRIV_SW_EXCEPTION:
2927 #ifdef CONFIG_KVM_HYPERV
2928 if (guest_cpuid_has_evmcs(
vcpu))
2938 #ifdef CONFIG_X86_64
2940 !!(
vcpu->arch.efer & EFER_LMA)))
3034 offsetof(
struct vmcs12, hdr),
3060 enum vm_entry_failure_code *entry_failure_code)
3064 *entry_failure_code = ENTRY_FAIL_DEFAULT;
3079 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
3127 unsigned long cr3, cr4;
3150 cr3 = __get_current_cr3_fast();
3156 cr4 = cr4_read_shadow();
3175 trace_kvm_nested_vmenter_failed(
3176 "early hardware check VM-instruction error: ", error);
3177 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3184 if (hw_breakpoint_active())
3185 set_debugreg(__this_cpu_read(cpu_dr7), 7);
3197 VMX_EXIT_REASONS_FAILED_VMENTRY));
3202 #ifdef CONFIG_KVM_HYPERV
3203 static bool nested_get_evmcs_page(
struct kvm_vcpu *
vcpu)
3212 if (guest_cpuid_has_evmcs(
vcpu) &&
3236 struct kvm_host_map *map;
3238 if (!vcpu->arch.pdptrs_from_userspace &&
3256 pr_debug_ratelimited(
"%s: no backing for APIC-access address in vmcs12\n",
3258 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3259 vcpu->run->internal.suberror =
3260 KVM_INTERNAL_ERROR_EMULATION;
3261 vcpu->run->internal.ndata = 0;
3270 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3282 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3297 (
struct pi_desc *)(((
void *)map->hva) +
3309 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
3313 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3315 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3322 #ifdef CONFIG_KVM_HYPERV
3329 if (!nested_get_evmcs_page(vcpu)) {
3330 pr_debug_ratelimited(
"%s: enlightened vmptrld failed\n",
3332 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3333 vcpu->run->internal.suberror =
3334 KVM_INTERNAL_ERROR_EMULATION;
3335 vcpu->run->internal.ndata = 0;
3376 offset_in_page(dst),
sizeof(gpa)))
3399 kvm_inject_gp(
vcpu, 0);
3411 return ((rvi & 0xf0) > (vppr & 0xf0));
3432 enum vm_entry_failure_code entry_failure_code;
3433 bool evaluate_pending_interrupts;
3435 .
basic = EXIT_REASON_INVALID_STATE,
3436 .failed_vmentry = 1,
3452 evaluate_pending_interrupts = exec_controls_get(vmx) &
3453 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3456 if (!evaluate_pending_interrupts)
3502 &entry_failure_code)) {
3503 exit_reason.
basic = EXIT_REASON_INVALID_STATE;
3505 goto vmentry_fail_vmexit;
3512 exit_reason.
basic = EXIT_REASON_INVALID_STATE;
3514 goto vmentry_fail_vmexit_guest_mode;
3522 exit_reason.
basic = EXIT_REASON_MSR_LOAD_FAIL;
3524 goto vmentry_fail_vmexit_guest_mode;
3534 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3543 if (unlikely(evaluate_pending_interrupts))
3544 kvm_make_request(KVM_REQ_EVENT, vcpu);
3570 vmentry_fail_vmexit_guest_mode:
3575 vmentry_fail_vmexit:
3649 if (
CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
3654 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3655 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3674 goto vmentry_failed;
3680 kvm_make_request(KVM_REQ_EVENT, vcpu);
3685 vmx->
vcpu.arch.l1tf_flush_l1d =
true;
3700 case GUEST_ACTIVITY_HLT:
3714 case GUEST_ACTIVITY_WAIT_SIPI:
3716 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3751 static inline unsigned long
3755 (
vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3758 vcpu->arch.cr0_guest_owned_bits));
3761 static inline unsigned long
3765 (
vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3768 vcpu->arch.cr4_guest_owned_bits));
3773 u32 vm_exit_reason, u32 exit_intr_info)
3795 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
3796 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
3799 }
else if (vcpu->arch.exception.injected) {
3800 nr = vcpu->arch.exception.vector;
3801 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3805 vcpu->arch.event_exit_inst_len;
3806 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3808 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3810 if (vcpu->arch.exception.has_error_code) {
3811 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3813 vcpu->arch.exception.error_code;
3817 }
else if (vcpu->arch.nmi_injected) {
3819 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3820 }
else if (vcpu->arch.interrupt.injected) {
3821 nr = vcpu->arch.interrupt.nr;
3822 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3824 if (vcpu->arch.interrupt.soft) {
3825 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3827 vcpu->arch.event_exit_inst_len;
3829 idt_vectoring |= INTR_TYPE_EXT_INTR;
3878 if (max_irr != 256) {
3884 vapic_page, &max_irr);
3886 if ((u8)max_irr > ((u8)status & 0xff)) {
3888 status |= (u8)max_irr;
3903 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
3904 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
3906 unsigned long exit_qual;
3908 if (ex->has_payload) {
3909 exit_qual = ex->payload;
3910 }
else if (ex->vector == PF_VECTOR) {
3911 exit_qual = vcpu->arch.cr2;
3912 }
else if (ex->vector == DB_VECTOR) {
3913 exit_qual = vcpu->arch.dr6;
3914 exit_qual &= ~DR6_BT;
3915 exit_qual ^= DR6_ACTIVE_LOW;
3936 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3940 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3942 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3946 intr_info |= INTR_INFO_UNBLOCK_NMI;
3969 if (!ex->pending || ex->vector != DB_VECTOR)
3973 return ex->payload & ~DR6_BD;
3996 unsigned long pending_dbg;
4000 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg);
4114 bool block_nested_events = block_nested_exceptions ||
4119 if (block_nested_events)
4123 if (
vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
4133 if (block_nested_events)
4137 if (
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
4155 if (
vcpu->arch.exception_vmexit.pending &&
4157 if (block_nested_exceptions)
4164 if (
vcpu->arch.exception.pending &&
4166 if (block_nested_exceptions)
4172 if (block_nested_events)
4179 if (
vcpu->arch.exception_vmexit.pending) {
4180 if (block_nested_exceptions)
4187 if (
vcpu->arch.exception.pending) {
4188 if (block_nested_exceptions)
4194 if (block_nested_events)
4201 if (block_nested_events)
4207 if (block_nested_events)
4213 NMI_VECTOR | INTR_TYPE_NMI_INTR |
4214 INTR_INFO_VALID_MASK, 0);
4219 vcpu->arch.nmi_pending = 0;
4225 if (block_nested_events)
4243 if (ktime_to_ns(remaining) <= 0)
4246 value = ktime_to_ns(remaining) *
vcpu->arch.virtual_tsc_khz;
4247 do_div(value, 1000000);
4254 case GUEST_ES_SELECTOR:
4255 case GUEST_CS_SELECTOR:
4256 case GUEST_SS_SELECTOR:
4257 case GUEST_DS_SELECTOR:
4258 case GUEST_FS_SELECTOR:
4259 case GUEST_GS_SELECTOR:
4260 case GUEST_LDTR_SELECTOR:
4261 case GUEST_TR_SELECTOR:
4262 case GUEST_ES_LIMIT:
4263 case GUEST_CS_LIMIT:
4264 case GUEST_SS_LIMIT:
4265 case GUEST_DS_LIMIT:
4266 case GUEST_FS_LIMIT:
4267 case GUEST_GS_LIMIT:
4268 case GUEST_LDTR_LIMIT:
4269 case GUEST_TR_LIMIT:
4270 case GUEST_GDTR_LIMIT:
4271 case GUEST_IDTR_LIMIT:
4272 case GUEST_ES_AR_BYTES:
4273 case GUEST_DS_AR_BYTES:
4274 case GUEST_FS_AR_BYTES:
4275 case GUEST_GS_AR_BYTES:
4276 case GUEST_LDTR_AR_BYTES:
4277 case GUEST_TR_AR_BYTES:
4284 case GUEST_LDTR_BASE:
4286 case GUEST_GDTR_BASE:
4287 case GUEST_IDTR_BASE:
4288 case GUEST_PENDING_DBG_EXCEPTIONS:
4395 if (
vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4397 else if (
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4433 (vm_entry_controls_get(
to_vmx(
vcpu)) & VM_ENTRY_IA32E_MODE);
4496 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
4512 enum vm_entry_failure_code ignored;
4513 struct kvm_segment seg;
4518 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4520 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4539 vcpu->arch.cr4_guest_owned_bits = ~
vmcs_readl(CR4_GUEST_HOST_MASK);
4571 WARN_ON_ONCE(
kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4576 seg = (
struct kvm_segment) {
4578 .limit = 0xFFFFFFFF,
4590 seg = (
struct kvm_segment) {
4592 .limit = 0xFFFFFFFF,
4611 seg = (
struct kvm_segment) {
4620 memset(&seg, 0,
sizeof(seg));
4639 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4652 return efer_msr->
data;
4661 struct vmx_msr_entry g, h;
4674 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4689 vcpu->arch.cr4_guest_owned_bits = ~
vmcs_readl(CR4_GUEST_HOST_MASK);
4721 pr_debug_ratelimited(
4722 "%s read MSR index failed (%u, 0x%08llx)\n",
4730 pr_debug_ratelimited(
4731 "%s read MSR failed (%u, 0x%08llx)\n",
4735 if (h.index != g.index)
4737 if (h.value == g.value)
4741 pr_debug_ratelimited(
4742 "%s check failed (%u, 0x%x, 0x%x)\n",
4743 __func__, j, h.index, h.reserved);
4748 pr_debug_ratelimited(
4749 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4750 __func__, j, h.index, h.value);
4768 u32 exit_intr_info,
unsigned long exit_qualification)
4779 #ifdef CONFIG_KVM_HYPERV
4780 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4787 (void)nested_get_evmcs_page(vcpu);
4805 hrtimer_cancel(&
to_vmx(vcpu)->
nested.preemption_timer);
4808 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4810 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4813 if (likely(!vmx->
fail)) {
4838 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4849 vcpu->arch.nmi_injected =
false;
4864 indirect_branch_prediction_barrier();
4871 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4894 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4899 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4907 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4909 if (likely(!vmx->
fail)) {
4915 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4953 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4968 struct kvm_segment s;
4978 int scaling = vmx_instruction_info & 3;
4979 int addr_size = (vmx_instruction_info >> 7) & 7;
4980 bool is_reg = vmx_instruction_info & (1u << 10);
4981 int seg_reg = (vmx_instruction_info >> 15) & 7;
4982 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4983 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4984 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4985 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4994 off = exit_qualification;
4996 off = (gva_t)sign_extend64(off, 31);
4997 else if (addr_size == 0)
4998 off = (gva_t)sign_extend64(off, 15);
5013 else if (addr_size == 0)
5024 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
5025 *ret = s.base + off;
5041 *ret = (s.base + off) & 0xffffffff;
5053 exn = ((s.type & 0xa) == 0 || (s.type & 8));
5058 exn = ((s.type & 0xa) == 8);
5065 exn = (s.unusable != 0);
5073 if (!(s.base == 0 && s.limit == 0xffffffff &&
5074 ((s.type & 8) || !(s.type & 4))))
5075 exn = exn || ((u64)off + len - 1 > s.limit);
5079 seg_reg == VCPU_SREG_SS ?
5080 SS_VECTOR : GP_VECTOR,
5097 sizeof(*vmpointer), &gva)) {
5150 goto out_cached_vmcs12;
5155 goto out_cached_shadow_vmcs12;
5158 goto out_shadow_vmcs;
5161 HRTIMER_MODE_ABS_PINNED);
5179 out_cached_shadow_vmcs12:
5196 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
5197 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
5231 kvm_inject_gp(
vcpu, 0);
5245 kvm_inject_gp(
vcpu, 0);
5250 != VMXON_NEEDED_FEATURES) {
5251 kvm_inject_gp(
vcpu, 0);
5317 kvm_make_request(KVM_REQ_EVENT,
vcpu);
5356 vmptr + offsetof(
struct vmcs12,
5358 &zero,
sizeof(zero));
5382 u32 instr_info =
vmcs_read32(VMX_INSTRUCTION_INFO);
5385 unsigned long field;
5404 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5443 if (instr_info & BIT(10)) {
5448 instr_info,
true, len, &gva))
5462 #define SHADOW_FIELD_RW(x, y) case x:
5474 #define SHADOW_FIELD_RO(x, y) case x:
5488 u32 instr_info =
vmcs_read32(VMX_INSTRUCTION_INFO);
5491 unsigned long field;
5514 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5517 if (instr_info & BIT(10))
5522 instr_info,
false, len, &gva))
5541 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5558 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
5594 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5638 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5642 offsetof(
struct vmcs12, hdr),
5645 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5652 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5664 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5677 u32 instr_info =
vmcs_read32(VMX_INSTRUCTION_INFO);
5690 true,
sizeof(gpa_t), &gva))
5705 u32 vmx_instruction_info, types;
5706 unsigned long type, roots_to_free;
5707 struct kvm_mmu *mmu;
5713 int i, r, gpr_index;
5716 SECONDARY_EXEC_ENABLE_EPT) ||
5725 vmx_instruction_info =
vmcs_read32(VMX_INSTRUCTION_INFO);
5731 if (type >= 32 || !(types & (1 << type)))
5732 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5738 vmx_instruction_info,
false,
sizeof(
operand), &gva))
5748 mmu = &vcpu->arch.guest_mmu;
5751 case VMX_EPT_EXTENT_CONTEXT:
5754 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5759 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5761 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5763 mmu->prev_roots[i].pgd,
5765 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5768 case VMX_EPT_EXTENT_GLOBAL:
5769 roots_to_free = KVM_MMU_ROOTS_ALL;
5785 u32 vmx_instruction_info;
5786 unsigned long type, types;
5797 SECONDARY_EXEC_ENABLE_VPID) ||
5806 vmx_instruction_info =
vmcs_read32(VMX_INSTRUCTION_INFO);
5813 if (type >= 32 || !(types & (1 << type)))
5815 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5821 vmx_instruction_info,
false,
sizeof(
operand), &gva))
5829 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5833 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5841 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5844 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5845 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5848 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5851 case VMX_VPID_EXTENT_ALL_CONTEXT:
5878 u32 index = kvm_rcx_read(vcpu);
5883 if (index >= VMFUNC_EPTP_ENTRIES)
5887 &new_eptp, index * 8, 8))
5902 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
5912 u32
function = kvm_rax_read(vcpu);
5967 gpa_t bitmap, last_bitmap;
5970 last_bitmap = INVALID_GPA;
5976 else if (port < 0x10000)
5980 bitmap += (port & 0x7fff) / 8;
5982 if (last_bitmap != bitmap)
5985 if (b & (1 << (port & 7)))
5990 last_bitmap = bitmap;
6000 unsigned short port;
6024 u32 msr_index = kvm_rcx_read(vcpu);
6036 if (exit_reason.
basic == EXIT_REASON_MSR_WRITE)
6038 if (msr_index >= 0xc0000000) {
6039 msr_index -= 0xc0000000;
6044 if (msr_index < 1024*8) {
6048 return 1 & (b >> (msr_index & 7));
6100 CPU_BASED_CR3_STORE_EXITING)
6105 CPU_BASED_CR8_STORE_EXITING)
6137 encls_leaf = kvm_rax_read(vcpu);
6138 if (encls_leaf > 62)
6147 unsigned long field;
6164 return 1 & (b >> (field & 7));
6180 return entry_intr_info == (INTR_INFO_VALID_MASK
6181 | INTR_TYPE_OTHER_EVENT);
6193 switch ((u16)exit_reason.
basic) {
6194 case EXIT_REASON_EXCEPTION_NMI:
6199 return vcpu->arch.apf.host_apf_flags ||
6203 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
6206 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
6212 case EXIT_REASON_EXTERNAL_INTERRUPT:
6214 case EXIT_REASON_MCE_DURING_VMENTRY:
6216 case EXIT_REASON_EPT_VIOLATION:
6224 case EXIT_REASON_EPT_MISCONFIG:
6232 case EXIT_REASON_PREEMPTION_TIMER:
6234 case EXIT_REASON_PML_FULL:
6240 case EXIT_REASON_VMFUNC:
6243 case EXIT_REASON_BUS_LOCK:
6249 #ifdef CONFIG_KVM_HYPERV
6250 case EXIT_REASON_VMCALL:
6272 switch ((u16)exit_reason.
basic) {
6273 case EXIT_REASON_EXCEPTION_NMI:
6280 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6281 case EXIT_REASON_EXTERNAL_INTERRUPT:
6283 case EXIT_REASON_TRIPLE_FAULT:
6285 case EXIT_REASON_INTERRUPT_WINDOW:
6287 case EXIT_REASON_NMI_WINDOW:
6289 case EXIT_REASON_TASK_SWITCH:
6291 case EXIT_REASON_CPUID:
6293 case EXIT_REASON_HLT:
6295 case EXIT_REASON_INVD:
6297 case EXIT_REASON_INVLPG:
6299 case EXIT_REASON_RDPMC:
6301 case EXIT_REASON_RDRAND:
6303 case EXIT_REASON_RDSEED:
6305 case EXIT_REASON_RDTSC:
case EXIT_REASON_RDTSCP:
6307 case EXIT_REASON_VMREAD:
6310 case EXIT_REASON_VMWRITE:
6313 case EXIT_REASON_VMCALL:
case EXIT_REASON_VMCLEAR:
6314 case EXIT_REASON_VMLAUNCH:
case EXIT_REASON_VMPTRLD:
6315 case EXIT_REASON_VMPTRST:
case EXIT_REASON_VMRESUME:
6316 case EXIT_REASON_VMOFF:
case EXIT_REASON_VMON:
6317 case EXIT_REASON_INVEPT:
case EXIT_REASON_INVVPID:
6323 case EXIT_REASON_CR_ACCESS:
6325 case EXIT_REASON_DR_ACCESS:
6327 case EXIT_REASON_IO_INSTRUCTION:
6329 case EXIT_REASON_GDTR_IDTR:
case EXIT_REASON_LDTR_TR:
6331 case EXIT_REASON_MSR_READ:
6332 case EXIT_REASON_MSR_WRITE:
6334 case EXIT_REASON_INVALID_STATE:
6336 case EXIT_REASON_MWAIT_INSTRUCTION:
6338 case EXIT_REASON_MONITOR_TRAP_FLAG:
6340 case EXIT_REASON_MONITOR_INSTRUCTION:
6342 case EXIT_REASON_PAUSE_INSTRUCTION:
6345 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6346 case EXIT_REASON_MCE_DURING_VMENTRY:
6348 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6350 case EXIT_REASON_APIC_ACCESS:
6351 case EXIT_REASON_APIC_WRITE:
6352 case EXIT_REASON_EOI_INDUCED:
6359 case EXIT_REASON_INVPCID:
6363 case EXIT_REASON_WBINVD:
6365 case EXIT_REASON_XSETBV:
6367 case EXIT_REASON_XSAVES:
case EXIT_REASON_XRSTORS:
6375 case EXIT_REASON_UMWAIT:
6376 case EXIT_REASON_TPAUSE:
6378 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
6379 case EXIT_REASON_ENCLS:
6381 case EXIT_REASON_NOTIFY:
6397 unsigned long exit_qual;
6406 if (unlikely(vmx->
fail)) {
6407 trace_kvm_nested_vmenter_failed(
6408 "hardware VM-instruction error: ",
6412 goto reflect_vmexit;
6446 struct kvm_nested_state __user *user_kvm_nested_state,
6451 struct kvm_nested_state kvm_state = {
6453 .format = KVM_STATE_NESTED_FORMAT_VMX,
6454 .size =
sizeof(kvm_state),
6456 .
hdr.vmx.vmxon_pa = INVALID_GPA,
6457 .hdr.vmx.vmcs12_pa = INVALID_GPA,
6458 .hdr.vmx.preemption_timer_deadline = 0,
6460 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6461 &user_kvm_nested_state->data.vmx[0];
6464 return kvm_state.size +
sizeof(*user_vmx_nested_state);
6475 kvm_state.size +=
sizeof(user_vmx_nested_state->vmcs12);
6479 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6484 kvm_state.size +=
sizeof(user_vmx_nested_state->shadow_vmcs12);
6488 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6491 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6494 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6497 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
6500 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
6504 kvm_state.hdr.vmx.flags |=
6505 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6506 kvm_state.hdr.vmx.preemption_timer_deadline =
6512 if (user_data_size < kvm_state.size)
6515 if (copy_to_user(user_kvm_nested_state, &kvm_state,
sizeof(kvm_state)))
6548 BUILD_BUG_ON(
sizeof(user_vmx_nested_state->vmcs12) <
VMCS12_SIZE);
6549 BUILD_BUG_ON(
sizeof(user_vmx_nested_state->shadow_vmcs12) <
VMCS12_SIZE);
6560 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
6565 return kvm_state.size;
6578 struct kvm_nested_state __user *user_kvm_nested_state,
6579 struct kvm_nested_state *kvm_state)
6583 enum vm_entry_failure_code ignored;
6584 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6585 &user_kvm_nested_state->data.vmx[0];
6588 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
6591 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
6592 if (kvm_state->hdr.vmx.smm.flags)
6595 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
6607 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6617 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6618 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6621 if (kvm_state->hdr.vmx.smm.flags &
6622 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6625 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6635 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6636 : kvm_state->hdr.vmx.smm.flags)
6639 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6640 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6643 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6650 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
6659 if (kvm_state->size <
sizeof(*kvm_state) +
sizeof(*
vmcs12)) {
6661 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6662 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
6663 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
6669 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
6670 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6675 #ifdef CONFIG_KVM_HYPERV
6676 }
else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6684 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
6690 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6694 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6699 if (copy_from_user(
vmcs12, user_vmx_nested_state->vmcs12,
sizeof(*
vmcs12)))
6705 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6709 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6712 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6719 if (kvm_state->size <
6720 sizeof(*kvm_state) +
6721 sizeof(user_vmx_nested_state->vmcs12) +
sizeof(*shadow_vmcs12))
6722 goto error_guest_mode;
6724 if (copy_from_user(shadow_vmcs12,
6725 user_vmx_nested_state->shadow_vmcs12,
6726 sizeof(*shadow_vmcs12))) {
6728 goto error_guest_mode;
6733 goto error_guest_mode;
6737 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6740 kvm_state->hdr.vmx.preemption_timer_deadline;
6746 goto error_guest_mode;
6752 goto error_guest_mode;
6755 kvm_make_request(KVM_REQ_EVENT, vcpu);
6776 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6784 unsigned int max_idx, idx;
6810 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6814 PIN_BASED_EXT_INTR_MASK |
6815 PIN_BASED_NMI_EXITING |
6816 PIN_BASED_VIRTUAL_NMIS |
6819 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6820 PIN_BASED_VMX_PREEMPTION_TIMER;
6827 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6831 #ifdef CONFIG_X86_64
6832 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6834 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6835 VM_EXIT_CLEAR_BNDCFGS;
6837 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6838 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6839 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT |
6840 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
6850 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6854 #ifdef CONFIG_X86_64
6855 VM_ENTRY_IA32E_MODE |
6857 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
6859 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER |
6860 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
6870 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6874 CPU_BASED_INTR_WINDOW_EXITING |
6875 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6876 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6877 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6878 CPU_BASED_CR3_STORE_EXITING |
6879 #ifdef CONFIG_X86_64
6880 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6882 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6883 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6884 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6885 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6886 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6894 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6895 CPU_BASED_USE_MSR_BITMAPS;
6899 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6910 SECONDARY_EXEC_DESC |
6911 SECONDARY_EXEC_ENABLE_RDTSCP |
6912 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6913 SECONDARY_EXEC_WBINVD_EXITING |
6914 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6915 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6916 SECONDARY_EXEC_RDRAND_EXITING |
6917 SECONDARY_EXEC_ENABLE_INVPCID |
6918 SECONDARY_EXEC_ENABLE_VMFUNC |
6919 SECONDARY_EXEC_RDSEED_EXITING |
6920 SECONDARY_EXEC_ENABLE_XSAVES |
6921 SECONDARY_EXEC_TSC_SCALING |
6922 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
6929 SECONDARY_EXEC_SHADOW_VMCS;
6934 SECONDARY_EXEC_ENABLE_EPT;
6936 VMX_EPT_PAGE_WALK_4_BIT |
6937 VMX_EPT_PAGE_WALK_5_BIT |
6939 VMX_EPT_INVEPT_BIT |
6940 VMX_EPT_EXECUTE_ONLY_BIT;
6943 msrs->
ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6944 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6945 VMX_EPT_1GB_PAGE_BIT;
6948 SECONDARY_EXEC_ENABLE_PML;
6968 SECONDARY_EXEC_ENABLE_VPID;
6969 msrs->
vpid_caps = VMX_VPID_INVVPID_BIT |
6975 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6979 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6988 msrs->
misc_low = (u32)vmcs_conf->
misc & VMX_MISC_SAVE_EFER_LMA;
6990 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6992 VMX_MISC_ACTIVITY_HLT |
6993 VMX_MISC_ACTIVITY_WAIT_SIPI;
7007 VMX_BASIC_TRUE_CTLS |
7009 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
7012 msrs->
basic |= VMX_BASIC_INOUT;
7022 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
7023 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
7028 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->
cr0_fixed1);
7029 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->
cr4_fixed1);
7105 __get_free_page(GFP_KERNEL);
7141 #ifdef CONFIG_KVM_HYPERV
static bool cpu_has_vmx_vmfunc(void)
static bool vmx_pt_mode_is_host_guest(void)
static bool cpu_has_vmx_shadow_vmcs(void)
bool __read_mostly enable_ept_ad_bits
static bool cpu_has_vmx_apicv(void)
static bool cpu_has_load_ia32_efer(void)
static bool cpu_has_secondary_exec_ctrls(void)
static bool vmx_umip_emulated(void)
bool __read_mostly enable_pml
static bool cpu_has_vmx_basic_inout(void)
static bool cpu_has_vmx_pml(void)
static bool cpu_has_vmx_encls_vmexit(void)
static bool cpu_has_vmx_msr_bitmap(void)
bool __read_mostly enable_ept
bool __read_mostly enable_vpid
static bool cpu_has_vmx_preemption_timer(void)
bool __read_mostly enable_unrestricted_guest
bool __read_mostly flexpriority_enabled
static bool cpu_has_vmx_posted_intr(void)
static int ept_caps_to_lpage_level(u32 ept_caps)
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly
static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment)
static bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
static bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
static void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled)
static __always_inline int evmcs_field_offset(unsigned long field, u16 *clean_field)
#define KVM_EVMCS_VERSION
static u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs, unsigned long field, u16 offset)
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
static void kvm_register_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
static unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static void enter_guest_mode(struct kvm_vcpu *vcpu)
static void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void leave_guest_mode(struct kvm_vcpu *vcpu)
static u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define X86EMUL_IO_NEEDED
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len)
void vcpu_put(struct kvm_vcpu *vcpu)
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len)
void vcpu_load(struct kvm_vcpu *vcpu)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len)
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned int offset, unsigned long len)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
static u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots)
void kvm_init_mmu(struct kvm_vcpu *vcpu)
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free)
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp)
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
static bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
static bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
static bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
static bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
static bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
static bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
static bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
static unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
static int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
static unsigned long nested_read_cr4(struct vmcs12 *fields)
static struct vmcs12 * get_vmcs12(struct kvm_vcpu *vcpu)
#define nested_guest_cr4_valid
static struct vmcs12 * get_shadow_vmcs12(struct kvm_vcpu *vcpu)
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
static unsigned long nested_read_cr0(struct vmcs12 *fields)
static bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
static bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
static bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
static bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
static unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
static int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
static bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
static bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
static bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
static int nested_cpu_has_ept(struct vmcs12 *vmcs12)
static bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
@ NVMX_VMENTRY_KVM_INTERNAL_ERROR
static bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
#define nested_host_cr4_valid
static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
static bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, u64 data)
#define vcpu_to_pmu(vcpu)
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
bool __read_mostly enable_sgx
void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static bool is_smm(struct kvm_vcpu *vcpu)
unsigned long pending_events
struct vmcs_host_state host_state
unsigned long * msr_bitmap
struct vmcs * shadow_vmcs
struct hrtimer preemption_timer
struct vmcs12 * cached_shadow_vmcs12
struct kvm_host_map virtual_apic_map
struct nested_vmx_msrs msrs
bool force_msr_bitmap_recalc
struct gfn_to_hva_cache vmcs12_cache
bool reload_vmcs01_apic_access_page
struct kvm_host_map msr_bitmap_map
bool has_preemption_timer_deadline
u64 preemption_timer_deadline
bool preemption_timer_expired
struct kvm_host_map pi_desc_map
struct gfn_to_hva_cache shadow_vmcs12_cache
bool need_vmcs12_to_shadow_sync
bool enlightened_vmcs_enabled
struct vmcs12 * cached_vmcs12
bool update_vmcs01_apicv_status
struct kvm_host_map apic_access_page_map
struct nested_vmx::@39 smm
bool update_vmcs01_cpu_dirty_logging
bool change_vmcs01_virtual_apic_mode
struct loaded_vmcs vmcs02
bool need_sync_vmcs02_to_vmcs12_rare
struct vcpu_vmx::msr_autostore msr_autostore
struct vcpu_vmx::@41 segment_cache
struct loaded_vmcs vmcs01
struct loaded_vmcs * loaded_vmcs
struct vcpu_vmx::msr_autoload msr_autoload
u64 msr_ia32_feature_control
unsigned long exit_qualification
union vmx_exit_reason exit_reason
u32 page_fault_error_code_mask
u64 posted_intr_desc_addr
natural_width guest_sysenter_eip
natural_width guest_ldtr_base
natural_width guest_es_base
u32 vm_entry_msr_load_count
natural_width guest_tr_base
natural_width guest_gs_base
u32 vm_entry_exception_error_code
natural_width cr0_read_shadow
u32 page_fault_error_code_match
natural_width host_ia32_sysenter_eip
u32 cpu_based_vm_exec_control
natural_width host_fs_base
natural_width guest_cs_base
u32 vm_exit_instruction_len
natural_width host_idtr_base
natural_width host_gdtr_base
u32 secondary_vm_exec_control
natural_width guest_ss_base
natural_width guest_linear_address
natural_width host_ia32_sysenter_esp
u32 vm_entry_intr_info_field
u64 vm_exit_msr_load_addr
u32 idt_vectoring_info_field
u32 vmx_preemption_timer_value
u32 vm_entry_instruction_len
natural_width guest_idtr_base
u64 guest_physical_address
u32 vm_exit_msr_store_count
natural_width host_tr_base
natural_width guest_sysenter_esp
u64 vm_exit_msr_store_addr
u32 idt_vectoring_error_code
natural_width exit_qualification
natural_width guest_gdtr_base
u64 virtual_apic_page_addr
u32 vm_exit_intr_error_code
natural_width host_gs_base
u32 vm_exit_msr_load_count
natural_width guest_ds_base
u64 guest_ia32_perf_global_ctrl
natural_width guest_rflags
natural_width cr0_guest_host_mask
natural_width guest_fs_base
u32 guest_interruptibility_info
u64 vm_entry_msr_load_addr
natural_width cr4_guest_host_mask
u32 host_ia32_sysenter_cs
natural_width guest_pending_dbg_exceptions
u32 pin_based_vm_exec_control
u64 host_ia32_perf_global_ctrl
natural_width cr4_read_shadow
u32 cpu_based_2nd_exec_ctrl
struct nested_vmx_msrs nested
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
static bool nested_exit_on_intr(struct vcpu_svm *svm)
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
const unsigned int nr_vmcs12_fields
const unsigned short vmcs12_field_offsets[]
static short get_vmcs12_field_offset(unsigned long field)
static void vmcs12_write_any(struct vmcs12 *vmcs12, unsigned long field, u16 offset, u64 field_value)
static u64 vmcs12_read_any(struct vmcs12 *vmcs12, unsigned long field, u16 offset)
static __always_inline bool is_nmi(u32 intr_info)
static bool is_page_fault(u32 intr_info)
#define VMCS_FIELD_INDEX_SHIFT
static bool is_alignment_check(u32 intr_info)
static int vmcs_field_readonly(unsigned long field)
static bool is_breakpoint(u32 intr_info)
static bool is_debug(u32 intr_info)
static bool is_exception_with_error_code(u32 intr_info)
static unsigned int vmcs_field_index(unsigned long field)
static bool is_double_fault(u32 intr_info)
bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version)
void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
static bool evmptr_is_valid(u64 evmptr)
#define EVMPTR_MAP_PENDING
static bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
static bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
static struct hv_enlightened_vmcs * nested_vmx_evmcs(struct vcpu_vmx *vmx)
static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
static int handle_vmwrite(struct kvm_vcpu *vcpu)
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int handle_vmptrst(struct kvm_vcpu *vcpu)
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int max_shadow_read_write_fields
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int types)
static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int handle_vmptrld(struct kvm_vcpu *vcpu)
void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
#define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw)
static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, u32 msr_index)
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int handle_vmresume(struct kvm_vcpu *vcpu)
#define VMXON_CR0_ALWAYSON
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, enum vm_entry_failure_code *entry_failure_code)
static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
static bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool is_vmenter)
static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex)
static int handle_vmread(struct kvm_vcpu *vcpu)
static bool vmx_control_verify(u32 control, u32 low, u32 high)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, struct vmx_msr_entry *e)
static bool is_shadow_field_ro(unsigned long field)
static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
static int handle_vmlaunch(struct kvm_vcpu *vcpu)
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, bool from_launch)
static int handle_vmxoff(struct kvm_vcpu *vcpu)
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO)
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, struct vmcs12 *vmcs12)
static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, struct loaded_vmcs *prev)
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
static bool is_shadow_field_rw(unsigned long field)
static int handle_invept(struct kvm_vcpu *vcpu)
static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
static void nested_release_vmcs12(struct kvm_vcpu *vcpu)
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code)
void nested_vmx_hardware_unsetup(void)
static u64 vmx_control_msr(u32 low, u32 high)
static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
void vmx_leave_nested(struct kvm_vcpu *vcpu)
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code)
static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
struct kvm_x86_nested_ops vmx_nested_ops
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, int *ret)
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
static unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int enter_vmx_operation(struct kvm_vcpu *vcpu)
static int max_shadow_read_only_fields
static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
__init int nested_vmx_hardware_setup(int(*exit_handlers[])(struct kvm_vcpu *))
static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
#define vmx_vmwrite_bitmap
static int handle_invvpid(struct kvm_vcpu *vcpu)
#define VMCS12_IDX_TO_ENC(idx)
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
static u64 nested_vmx_calc_vmcs_enum_msr(void)
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, int size)
static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enum vm_entry_failure_code *entry_failure_code)
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, gpa_t addr)
static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex)
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
static void nested_release_evmcs(struct kvm_vcpu *vcpu)
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data)
static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int type)
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, union vmx_exit_reason exit_reason)
static bool is_vmcs12_ext_field(unsigned long field)
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
static bool __read_mostly enable_shadow_vmcs
static struct vmcs * alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
static unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
module_param(nested_early_check, bool, S_IRUGO)
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, u32 count, u64 addr)
static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
static u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
#define VMX_VPID_EXTENT_SUPPORTED_MASK
static bool __read_mostly nested_early_check
static int vmx_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size)
static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, u32 error_code)
static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info)
static unsigned long * vmx_bitmap[VMX_BITMAP_NR]
#define vmx_vmread_bitmap
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
static int vmx_set_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state *kvm_state)
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
static int handle_vmxon(struct kvm_vcpu *vcpu)
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
static u64 * vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
static int handle_vmclear(struct kvm_vcpu *vcpu)
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
void nested_vmx_set_vmcs_shadowing_bitmap(void)
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void free_nested(struct kvm_vcpu *vcpu)
int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
#define VMXON_CR4_ALWAYSON
static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, u64 preemption_timeout)
static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, gpa_t bitmap)
static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
static struct shadow_vmcs_field shadow_read_write_fields[]
static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
static int nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u32 **low, u32 **high)
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int handle_vmfunc(struct kvm_vcpu *vcpu)
static struct shadow_vmcs_field shadow_read_only_fields[]
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
static void nested_vmx_setup_secondary_ctls(u32 ept_caps, struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
static void init_vmcs_shadow_fields(void)
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
void ept_save_pdptrs(struct kvm_vcpu *vcpu)
void free_vmcs(struct vmcs *vmcs)
void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy)
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
struct vmx_uret_msr * vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
int vmx_get_cpl(struct kvm_vcpu *vcpu)
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
static bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, unsigned int flags)
#define VMX_REGS_LAZY_LOAD_SET
static struct vmcs * alloc_vmcs(bool shadow)
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
static int vmx_get_instr_info_reg2(u32 vmx_instr_info)
static unsigned long vmx_l1_guest_owned_cr0_bits(void)
static bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
static u8 vmx_get_rvi(void)
#define MAX_NR_LOADSTORE_MSRS
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
static void vpid_sync_vcpu_addr(int vpid, gva_t addr)
static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
static __always_inline u64 vmcs_read64(unsigned long field)
static __always_inline void vmcs_write16(unsigned long field, u16 value)
static __always_inline void vmcs_write32(unsigned long field, u32 value)
static void vmcs_load(struct vmcs *vmcs)
static __always_inline unsigned long __vmcs_readl(unsigned long field)
static void vpid_sync_context(int vpid)
static __always_inline u32 vmcs_read32(unsigned long field)
static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
static __always_inline void vmcs_write64(unsigned long field, u64 value)
static void vmcs_clear(struct vmcs *vmcs)
static __always_inline unsigned long vmcs_readl(unsigned long field)
static __always_inline u16 vmcs_read16(unsigned long field)
bool __read_mostly enable_apicv
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
u64 __read_mostly host_efer
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e)
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
static bool is_protmode(struct kvm_vcpu *vcpu)
static bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
static bool x86_exception_has_error_code(unsigned int vector)
static bool kvm_exception_is_soft(unsigned int nr)
static bool kvm_mpx_supported(void)
static bool kvm_pat_valid(u64 data)
static bool mmu_is_nested(struct kvm_vcpu *vcpu)
static bool kvm_dr7_valid(u64 data)
static void kvm_register_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
static bool is_pae_paging(struct kvm_vcpu *vcpu)
static bool is_long_mode(struct kvm_vcpu *vcpu)
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
static bool is_64_bit_mode(struct kvm_vcpu *vcpu)
static bool kvm_notify_vmexit_enabled(struct kvm *kvm)
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)