15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/highmem.h>
18 #include <linux/hrtimer.h>
19 #include <linux/kernel.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
25 #include <linux/objtool.h>
26 #include <linux/sched.h>
27 #include <linux/sched/smt.h>
28 #include <linux/slab.h>
29 #include <linux/tboot.h>
30 #include <linux/trace_events.h>
31 #include <linux/entry-kvm.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/debugreg.h>
39 #include <asm/fpu/api.h>
40 #include <asm/fpu/xstate.h>
41 #include <asm/idtentry.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/reboot.h>
45 #include <asm/perf_event.h>
46 #include <asm/mmu_context.h>
47 #include <asm/mshyperv.h>
48 #include <asm/mwait.h>
49 #include <asm/spec-ctrl.h>
75 static const struct x86_cpu_id vmx_cpu_id[] = {
76 X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
79 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
129 #define MSR_BITMAP_MODE_X2APIC 1
130 #define MSR_BITMAP_MODE_X2APIC_APICV 2
132 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
144 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
145 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
146 #define KVM_VM_CR0_ALWAYS_ON \
147 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
149 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
150 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
151 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
153 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
155 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
156 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
157 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
158 RTIT_STATUS_BYTECNT))
176 MSR_IA32_SYSENTER_CS,
177 MSR_IA32_SYSENTER_ESP,
178 MSR_IA32_SYSENTER_EIP,
180 MSR_CORE_C3_RESIDENCY,
181 MSR_CORE_C6_RESIDENCY,
182 MSR_CORE_C7_RESIDENCY,
225 static const struct {
229 [VMENTER_L1D_FLUSH_AUTO] = {
"auto",
true},
230 [VMENTER_L1D_FLUSH_NEVER] = {
"never",
true},
231 [VMENTER_L1D_FLUSH_COND] = {
"cond",
true},
232 [VMENTER_L1D_FLUSH_ALWAYS] = {
"always",
true},
233 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {
"EPT disabled",
false},
234 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {
"not required",
false},
237 #define L1D_CACHE_ORDER 4
245 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
246 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
251 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
256 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
261 if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
262 switch (l1tf_mitigation) {
263 case L1TF_MITIGATION_OFF:
264 l1tf = VMENTER_L1D_FLUSH_NEVER;
266 case L1TF_MITIGATION_FLUSH_NOWARN:
267 case L1TF_MITIGATION_FLUSH:
268 case L1TF_MITIGATION_FLUSH_NOSMT:
269 l1tf = VMENTER_L1D_FLUSH_COND;
271 case L1TF_MITIGATION_FULL:
272 case L1TF_MITIGATION_FULL_FORCE:
273 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
276 }
else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
277 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
281 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
302 l1tf_vmx_mitigation = l1tf;
304 if (l1tf != VMENTER_L1D_FLUSH_NEVER)
305 static_branch_enable(&vmx_l1d_should_flush);
307 static_branch_disable(&vmx_l1d_should_flush);
309 if (l1tf == VMENTER_L1D_FLUSH_COND)
310 static_branch_enable(&vmx_l1d_flush_cond);
312 static_branch_disable(&vmx_l1d_flush_cond);
338 if (!boot_cpu_has(X86_BUG_L1TF))
347 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
352 mutex_lock(&vmx_l1d_flush_mutex);
354 mutex_unlock(&vmx_l1d_flush_mutex);
361 return sysfs_emit(s,
"???\n");
373 msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
375 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
401 !boot_cpu_has_bug(X86_BUG_MDS) &&
402 !boot_cpu_has_bug(X86_BUG_TAA);
409 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
410 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
411 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
412 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
413 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
414 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
428 #define vmx_insn_failed(fmt...) \
431 pr_warn_ratelimited(fmt); \
439 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
445 instrumentation_begin();
447 instrumentation_end();
496 #define VMX_SEGMENT_FIELD(seg) \
497 [VCPU_SREG_##seg] = { \
498 .selector = GUEST_##seg##_SELECTOR, \
499 .base = GUEST_##seg##_BASE, \
500 .limit = GUEST_##seg##_LIMIT, \
501 .ar_bytes = GUEST_##seg##_AR_BYTES, \
527 #if IS_ENABLED(CONFIG_HYPERV)
528 static struct kvm_x86_ops vmx_x86_ops
__initdata;
533 static int hv_enable_l2_tlb_flush(
struct kvm_vcpu *vcpu)
535 struct hv_enlightened_vmcs *evmcs;
536 hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
538 if (partition_assist_page == INVALID_PAGE)
543 evmcs->partition_assist_page = partition_assist_page;
544 evmcs->hv_vm_id = (
unsigned long)vcpu->kvm;
545 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
554 if (!enlightened_vmcs)
561 if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
562 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
566 for_each_online_cpu(cpu) {
567 if (!hv_get_vp_assist_page(cpu)) {
568 enlightened_vmcs =
false;
573 if (enlightened_vmcs) {
574 pr_info(
"Using Hyper-V Enlightened VMCS\n");
575 static_branch_enable(&__kvm_is_using_evmcs);
578 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
579 vmx_x86_ops.enable_l2_tlb_flush
580 = hv_enable_l2_tlb_flush;
583 enlightened_vmcs =
false;
589 struct hv_vp_assist_page *vp_ap;
599 vp_ap = hv_get_vp_assist_page(smp_processor_id());
600 if (WARN_ON_ONCE(!vp_ap))
607 vp_ap->nested_control.features.directhypercall = 0;
608 vp_ap->current_nested_vmcs = 0;
609 vp_ap->enlighten_vmentry = 0;
654 u32 eax = cpuid_eax(0x00000001), i;
657 eax &= ~(0x3U << 14 | 0xfU << 28);
686 case 0x800 ... 0x8ff:
689 case MSR_IA32_RTIT_STATUS:
690 case MSR_IA32_RTIT_OUTPUT_BASE:
691 case MSR_IA32_RTIT_OUTPUT_MASK:
692 case MSR_IA32_RTIT_CR3_MATCH:
693 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
697 case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
698 case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
699 case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
700 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
701 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
708 WARN(!r,
"Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
750 asm goto(
"1: vmxoff\n\t"
751 _ASM_EXTABLE(1b, %l[fault])
752 :::
"cc",
"memory" : fault);
754 cr4_clear_bits(X86_CR4_VMXE);
758 cr4_clear_bits(X86_CR4_VMXE);
764 int cpu = raw_smp_processor_id();
767 kvm_rebooting =
true;
775 if (!(__read_cr4() & X86_CR4_VMXE))
778 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu,
cpu),
788 int cpu = raw_smp_processor_id();
793 per_cpu(current_vmcs,
cpu) = NULL;
819 smp_call_function_single(
cpu,
878 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
879 (1u << DB_VECTOR) | (1u << AC_VECTOR);
887 eb |= (1u << GP_VECTOR);
888 if ((vcpu->guest_debug &
889 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
890 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
891 eb |= 1u << BP_VECTOR;
895 eb &= ~(1u << PF_VECTOR);
905 int mask = 0, match = 0;
915 mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
916 match = PFERR_PRESENT_MASK;
927 if (vcpu->arch.xfd_no_write_intercept)
928 eb |= (1u << NM_VECTOR);
938 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
946 unsigned int flags = 0;
963 unsigned long entry,
unsigned long exit)
965 vm_entry_controls_clearbit(vmx, entry);
966 vm_exit_controls_clearbit(vmx, exit);
973 for (i = 0; i < m->
nr; ++i) {
974 if (m->
val[i].index == msr)
989 VM_ENTRY_LOAD_IA32_EFER,
990 VM_EXIT_LOAD_IA32_EFER);
994 case MSR_CORE_PERF_GLOBAL_CTRL:
997 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
998 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1007 m->guest.val[i] = m->guest.val[m->guest.nr];
1016 m->host.val[i] = m->host.val[m->host.nr];
1021 unsigned long entry,
unsigned long exit,
1022 unsigned long guest_val_vmcs,
unsigned long host_val_vmcs,
1023 u64 guest_val, u64 host_val)
1026 if (host_val_vmcs != HOST_IA32_EFER)
1028 vm_entry_controls_setbit(vmx, entry);
1029 vm_exit_controls_setbit(vmx, exit);
1033 u64 guest_val, u64 host_val,
bool entry_only)
1042 VM_ENTRY_LOAD_IA32_EFER,
1043 VM_EXIT_LOAD_IA32_EFER,
1046 guest_val, host_val);
1050 case MSR_CORE_PERF_GLOBAL_CTRL:
1053 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1054 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1055 GUEST_IA32_PERF_GLOBAL_CTRL,
1056 HOST_IA32_PERF_GLOBAL_CTRL,
1057 guest_val, host_val);
1061 case MSR_IA32_PEBS_ENABLE:
1067 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1076 printk_once(KERN_WARNING
"Not enough msr switch entries. "
1077 "Can't add msr %x\n", msr);
1084 m->guest.val[i].index = msr;
1085 m->guest.val[i].value = guest_val;
1094 m->host.val[j].index = msr;
1095 m->host.val[j].value = host_val;
1100 u64 guest_efer = vmx->
vcpu.arch.efer;
1101 u64 ignore_bits = 0;
1106 guest_efer |= EFER_NX;
1111 ignore_bits |= EFER_SCE;
1112 #ifdef CONFIG_X86_64
1113 ignore_bits |= EFER_LMA | EFER_LME;
1115 if (guest_efer & EFER_LMA)
1116 ignore_bits &= ~(u64)EFER_SCE;
1126 if (!(guest_efer & EFER_LMA))
1127 guest_efer &= ~EFER_LME;
1142 guest_efer &= ~ignore_bits;
1151 #ifdef CONFIG_X86_32
1157 static unsigned long segment_base(u16 selector)
1159 struct desc_struct *table;
1162 if (!(selector & ~SEGMENT_RPL_MASK))
1165 table = get_current_gdt_ro();
1167 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1168 u16 ldt_selector = kvm_read_ldt();
1170 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1173 table = (
struct desc_struct *)segment_base(ldt_selector);
1175 v = get_desc_base(&table[selector >> 3]);
1196 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->
status);
1197 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->
output_base);
1198 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->
output_mask);
1199 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->
cr3_match);
1200 for (i = 0; i < addr_range; i++) {
1201 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->
addr_a[i]);
1202 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->
addr_b[i]);
1210 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->
status);
1211 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->
output_base);
1212 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->
output_mask);
1213 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->
cr3_match);
1214 for (i = 0; i < addr_range; i++) {
1215 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->
addr_a[i]);
1216 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->
addr_b[i]);
1231 wrmsrl(MSR_IA32_RTIT_CTL, 0);
1256 unsigned long fs_base,
unsigned long gs_base)
1258 if (unlikely(fs_sel != host->
fs_sel)) {
1265 if (unlikely(gs_sel != host->
gs_sel)) {
1272 if (unlikely(fs_base != host->
fs_base)) {
1276 if (unlikely(gs_base != host->
gs_base)) {
1286 #ifdef CONFIG_X86_64
1287 int cpu = raw_smp_processor_id();
1324 host_state->
ldt_sel = kvm_read_ldt();
1326 #ifdef CONFIG_X86_64
1327 savesegment(ds, host_state->ds_sel);
1328 savesegment(es, host_state->es_sel);
1330 gs_base = cpu_kernelmode_gs_base(cpu);
1331 if (likely(is_64bit_mm(current->mm))) {
1332 current_save_fsgs();
1333 fs_sel = current->thread.fsindex;
1334 gs_sel = current->thread.gsindex;
1335 fs_base = current->thread.fsbase;
1336 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1340 fs_base = read_msr(MSR_FS_BASE);
1341 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1344 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1365 ++vmx->
vcpu.stat.host_state_reload;
1367 #ifdef CONFIG_X86_64
1368 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1371 kvm_load_ldt(host_state->
ldt_sel);
1372 #ifdef CONFIG_X86_64
1373 load_gs_index(host_state->
gs_sel);
1375 loadsegment(gs, host_state->
gs_sel);
1378 if (host_state->
fs_sel & 7)
1379 loadsegment(fs, host_state->
fs_sel);
1380 #ifdef CONFIG_X86_64
1381 if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1382 loadsegment(ds, host_state->ds_sel);
1383 loadsegment(es, host_state->es_sel);
1386 invalidate_tss_limit();
1387 #ifdef CONFIG_X86_64
1388 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1390 load_fixmap_gdt(raw_smp_processor_id());
1395 #ifdef CONFIG_X86_64
1396 static u64 vmx_read_guest_kernel_gs_base(
struct vcpu_vmx *vmx)
1400 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1402 return vmx->msr_guest_kernel_gs_base;
1405 static void vmx_write_guest_kernel_gs_base(
struct vcpu_vmx *vmx, u64 data)
1409 wrmsrl(MSR_KERNEL_GS_BASE, data);
1411 vmx->msr_guest_kernel_gs_base = data;
1422 if (!already_loaded) {
1424 local_irq_disable();
1435 &per_cpu(loaded_vmcss_on_cpu, cpu));
1439 prev = per_cpu(current_vmcs, cpu);
1451 if (!buddy || WARN_ON_ONCE(buddy->
vmcs != prev))
1452 indirect_branch_prediction_barrier();
1455 if (!already_loaded) {
1456 void *gdt = get_current_gdt_ro();
1462 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1469 (
unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1472 if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1475 (
unsigned long)(cpu_entry_stack(cpu) + 1));
1530 unsigned long old_rflags;
1548 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1552 if ((old_rflags ^ vmx->
rflags) & X86_EFLAGS_VM)
1563 u32 interruptibility =
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1566 if (interruptibility & GUEST_INTR_STATE_STI)
1567 ret |= KVM_X86_SHADOW_INT_STI;
1568 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1569 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1576 u32 interruptibility_old =
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1577 u32 interruptibility = interruptibility_old;
1579 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1581 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1582 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1583 else if (mask & KVM_X86_SHADOW_INT_STI)
1584 interruptibility |= GUEST_INTR_STATE_STI;
1586 if ((interruptibility != interruptibility_old))
1587 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1593 unsigned long value;
1615 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1616 !(data & RTIT_CTL_FABRIC_EN) &&
1618 PT_CAP_single_range_output))
1625 value = intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_mtc_periods);
1626 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_mtc) &&
1627 !test_bit((data & RTIT_CTL_MTC_RANGE) >>
1628 RTIT_CTL_MTC_RANGE_OFFSET, &value))
1631 PT_CAP_cycle_thresholds);
1632 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_psb_cyc) &&
1633 !test_bit((data & RTIT_CTL_CYC_THRESH) >>
1634 RTIT_CTL_CYC_THRESH_OFFSET, &value))
1636 value = intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_psb_periods);
1637 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_psb_cyc) &&
1638 !test_bit((data & RTIT_CTL_PSB_FREQ) >>
1639 RTIT_CTL_PSB_FREQ_OFFSET, &value))
1646 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1649 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1652 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1655 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1663 void *insn,
int insn_len)
1682 unsigned long rip, orig_rip;
1693 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1694 exit_reason.
basic != EXIT_REASON_EPT_MISCONFIG) {
1717 "skipping instruction after SGX enclave VM-Exit");
1720 rip = orig_rip + instr_len;
1721 #ifdef CONFIG_X86_64
1727 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !
is_64_bit_mode(vcpu))
1767 (!
vcpu->arch.exception.pending ||
1768 vcpu->arch.exception.vector == DB_VECTOR) &&
1769 (!
vcpu->arch.exception_vmexit.pending ||
1770 vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1772 kvm_make_request(KVM_REQ_EVENT,
vcpu);
1793 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1794 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1799 struct kvm_queued_exception *ex = &vcpu->arch.exception;
1800 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
1805 if (ex->has_error_code) {
1816 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
1817 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1823 inc_eip =
vcpu->arch.event_exit_inst_len;
1832 vmx->
vcpu.arch.event_exit_inst_len);
1833 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1835 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1843 bool load_into_hardware)
1862 #ifdef CONFIG_X86_64
1863 bool load_syscall_msrs;
1870 (vmx->
vcpu.arch.efer & EFER_SCE);
1925 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
1935 #define KVM_SUPPORTED_FEATURE_CONTROL (FEAT_CTL_LOCKED | \
1936 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | \
1937 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
1938 FEAT_CTL_SGX_LC_ENABLED | \
1939 FEAT_CTL_SGX_ENABLED | \
1940 FEAT_CTL_LMCE_ENABLED)
1943 struct msr_data *msr)
1945 uint64_t valid_bits;
1954 if (!msr->host_initiated &&
1958 if (msr->host_initiated)
1963 return !(msr->data & ~valid_bits);
1968 switch (msr->index) {
1983 static int vmx_get_msr(
struct kvm_vcpu *vcpu,
struct msr_data *msr_info)
1989 switch (msr_info->index) {
1990 #ifdef CONFIG_X86_64
1997 case MSR_KERNEL_GS_BASE:
1998 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
2003 case MSR_IA32_TSX_CTRL:
2004 if (!msr_info->host_initiated &&
2005 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2008 case MSR_IA32_UMWAIT_CONTROL:
2014 case MSR_IA32_SPEC_CTRL:
2015 if (!msr_info->host_initiated &&
2021 case MSR_IA32_SYSENTER_CS:
2024 case MSR_IA32_SYSENTER_EIP:
2025 msr_info->data =
vmcs_readl(GUEST_SYSENTER_EIP);
2027 case MSR_IA32_SYSENTER_ESP:
2028 msr_info->data =
vmcs_readl(GUEST_SYSENTER_ESP);
2030 case MSR_IA32_BNDCFGS:
2032 (!msr_info->host_initiated &&
2037 case MSR_IA32_MCG_EXT_CTL:
2038 if (!msr_info->host_initiated &&
2040 FEAT_CTL_LMCE_ENABLED))
2042 msr_info->data = vcpu->arch.mcg_ext_ctl;
2044 case MSR_IA32_FEAT_CTL:
2047 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2048 if (!msr_info->host_initiated &&
2052 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
2060 #ifdef CONFIG_KVM_HYPERV
2068 if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
2073 case MSR_IA32_RTIT_CTL:
2078 case MSR_IA32_RTIT_STATUS:
2083 case MSR_IA32_RTIT_CR3_MATCH:
2086 PT_CAP_cr3_filtering))
2090 case MSR_IA32_RTIT_OUTPUT_BASE:
2093 PT_CAP_topa_output) &&
2095 PT_CAP_single_range_output)))
2099 case MSR_IA32_RTIT_OUTPUT_MASK:
2102 PT_CAP_topa_output) &&
2104 PT_CAP_single_range_output)))
2108 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2109 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2118 case MSR_IA32_DEBUGCTLMSR:
2119 msr_info->data =
vmcs_read64(GUEST_IA32_DEBUGCTL);
2125 msr_info->data = msr->
data;
2137 #ifdef CONFIG_X86_64
2141 return (
unsigned long)
data;
2148 if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2149 (host_initiated ||
guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2150 debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
2154 debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
2164 static int vmx_set_msr(
struct kvm_vcpu *vcpu,
struct msr_data *msr_info)
2169 u32 msr_index = msr_info->index;
2170 u64
data = msr_info->data;
2173 switch (msr_index) {
2177 #ifdef CONFIG_X86_64
2186 case MSR_KERNEL_GS_BASE:
2187 vmx_write_guest_kernel_gs_base(vmx,
data);
2203 vcpu->arch.xfd_no_write_intercept =
true;
2208 case MSR_IA32_SYSENTER_CS:
2213 case MSR_IA32_SYSENTER_EIP:
2220 case MSR_IA32_SYSENTER_ESP:
2227 case MSR_IA32_DEBUGCTLMSR: {
2231 if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
2233 data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2234 invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2241 VM_EXIT_SAVE_DEBUG_CONTROLS)
2246 (
data & DEBUGCTLMSR_LBR))
2250 case MSR_IA32_BNDCFGS:
2252 (!msr_info->host_initiated &&
2256 (
data & MSR_IA32_BNDCFGS_RSVD))
2266 case MSR_IA32_UMWAIT_CONTROL:
2271 if (
data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2276 case MSR_IA32_SPEC_CTRL:
2277 if (!msr_info->host_initiated &&
2304 case MSR_IA32_TSX_CTRL:
2305 if (!msr_info->host_initiated &&
2306 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2308 if (
data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2311 case MSR_IA32_CR_PAT:
2317 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2323 case MSR_IA32_MCG_EXT_CTL:
2324 if ((!msr_info->host_initiated &&
2325 !(
to_vmx(vcpu)->msr_ia32_feature_control &
2326 FEAT_CTL_LMCE_ENABLED)) ||
2327 (
data & ~MCG_EXT_CTL_LMCE_EN))
2329 vcpu->arch.mcg_ext_ctl =
data;
2331 case MSR_IA32_FEAT_CTL:
2336 if (msr_info->host_initiated &&
data == 0)
2342 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2354 if (!msr_info->host_initiated &&
2360 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] =
data;
2363 if (!msr_info->host_initiated)
2368 case MSR_IA32_RTIT_CTL:
2377 case MSR_IA32_RTIT_STATUS:
2384 case MSR_IA32_RTIT_CR3_MATCH:
2388 PT_CAP_cr3_filtering))
2392 case MSR_IA32_RTIT_OUTPUT_BASE:
2396 PT_CAP_topa_output) &&
2398 PT_CAP_single_range_output))
2404 case MSR_IA32_RTIT_OUTPUT_MASK:
2408 PT_CAP_topa_output) &&
2410 PT_CAP_single_range_output))
2414 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2417 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2427 case MSR_IA32_PERF_CAPABILITIES:
2437 if (
data & PERF_CAP_PEBS_FORMAT) {
2438 if ((
data & PERF_CAP_PEBS_MASK) !=
2461 if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2469 unsigned long guest_owned_bits;
2475 vcpu->arch.regs[VCPU_REGS_RSP] =
vmcs_readl(GUEST_RSP);
2478 vcpu->arch.regs[VCPU_REGS_RIP] =
vmcs_readl(GUEST_RIP);
2480 case VCPU_EXREG_PDPTR:
2484 case VCPU_EXREG_CR0:
2485 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2487 vcpu->arch.cr0 &= ~guest_owned_bits;
2488 vcpu->arch.cr0 |=
vmcs_readl(GUEST_CR0) & guest_owned_bits;
2490 case VCPU_EXREG_CR3:
2495 if (!(exec_controls_get(
to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2498 case VCPU_EXREG_CR4:
2499 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2501 vcpu->arch.cr4 &= ~guest_owned_bits;
2502 vcpu->arch.cr4 |=
vmcs_readl(GUEST_CR4) & guest_owned_bits;
2505 KVM_BUG_ON(1, vcpu->kvm);
2518 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2529 if (boot_cpu_data.x86 == 0x6) {
2530 switch (boot_cpu_data.x86_model) {
2531 case INTEL_FAM6_NEHALEM_EP:
2532 case INTEL_FAM6_NEHALEM:
2533 case INTEL_FAM6_WESTMERE:
2534 case INTEL_FAM6_WESTMERE_EP:
2535 case INTEL_FAM6_NEHALEM_EX:
2547 u32 vmx_msr_low, vmx_msr_high;
2548 u32 ctl = ctl_min | ctl_opt;
2550 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2552 ctl &= vmx_msr_high;
2567 rdmsrl(msr, allowed);
2569 return ctl_opt & allowed;
2575 u32 vmx_msr_low, vmx_msr_high;
2576 u32 _pin_based_exec_control = 0;
2577 u32 _cpu_based_exec_control = 0;
2578 u32 _cpu_based_2nd_exec_control = 0;
2579 u64 _cpu_based_3rd_exec_control = 0;
2580 u32 _vmexit_control = 0;
2581 u32 _vmentry_control = 0;
2593 }
const vmcs_entry_exit_pairs[] = {
2594 { VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL },
2595 { VM_ENTRY_LOAD_IA32_PAT, VM_EXIT_LOAD_IA32_PAT },
2596 { VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER },
2597 { VM_ENTRY_LOAD_BNDCFGS, VM_EXIT_CLEAR_BNDCFGS },
2598 { VM_ENTRY_LOAD_IA32_RTIT_CTL, VM_EXIT_CLEAR_IA32_RTIT_CTL },
2601 memset(vmcs_conf, 0,
sizeof(*vmcs_conf));
2605 MSR_IA32_VMX_PROCBASED_CTLS,
2606 &_cpu_based_exec_control))
2608 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2611 MSR_IA32_VMX_PROCBASED_CTLS2,
2612 &_cpu_based_2nd_exec_control))
2615 #ifndef CONFIG_X86_64
2616 if (!(_cpu_based_2nd_exec_control &
2617 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2618 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2621 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2622 _cpu_based_2nd_exec_control &= ~(
2623 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2624 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2625 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2627 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2628 &vmx_cap->
ept, &vmx_cap->
vpid);
2630 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2632 pr_warn_once(
"EPT CAP should not exist if not support "
2633 "1-setting enable EPT VM-execution control\n");
2640 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2642 pr_warn_once(
"VPID CAP should not exist if not support "
2643 "1-setting enable VPID VM-execution control\n");
2652 _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
2654 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
2655 _cpu_based_3rd_exec_control =
2657 MSR_IA32_VMX_PROCBASED_CTLS3);
2661 MSR_IA32_VMX_EXIT_CTLS,
2667 MSR_IA32_VMX_PINBASED_CTLS,
2668 &_pin_based_exec_control))
2672 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2673 if (!(_cpu_based_2nd_exec_control &
2674 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2675 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2679 MSR_IA32_VMX_ENTRY_CTLS,
2683 for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
2684 u32 n_ctrl = vmcs_entry_exit_pairs[i].entry_control;
2685 u32 x_ctrl = vmcs_entry_exit_pairs[i].exit_control;
2687 if (!(_vmentry_control & n_ctrl) == !(_vmexit_control & x_ctrl))
2690 pr_warn_once(
"Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
2691 _vmentry_control & n_ctrl, _vmexit_control & x_ctrl);
2696 _vmentry_control &= ~n_ctrl;
2697 _vmexit_control &= ~x_ctrl;
2700 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2703 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2706 #ifdef CONFIG_X86_64
2708 if (vmx_msr_high & (1u<<16))
2713 if (((vmx_msr_high >> 18) & 15) != 6)
2716 rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
2718 vmcs_conf->
size = vmx_msr_high & 0x1fff;
2719 vmcs_conf->
basic_cap = vmx_msr_high & ~0x1fff;
2729 vmcs_conf->
misc = misc_msr;
2731 #if IS_ENABLED(CONFIG_HYPERV)
2732 if (enlightened_vmcs)
2741 int cpu = smp_processor_id();
2744 pr_err(
"VMX not supported by CPU %d\n", cpu);
2748 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2749 !this_cpu_has(X86_FEATURE_VMX)) {
2750 pr_err(
"VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
2770 int cpu = raw_smp_processor_id();
2778 pr_err(
"Failed to setup VMCS config on CPU %d\n", cpu);
2784 pr_err(
"Inconsistent VMCS config on CPU %d\n", cpu);
2794 cr4_set_bits(X86_CR4_VMXE);
2796 asm goto(
"1: vmxon %[vmxon_pointer]\n\t"
2797 _ASM_EXTABLE(1b, %l[fault])
2798 : : [vmxon_pointer]
"m"(vmxon_pointer)
2803 WARN_ONCE(1,
"VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2804 rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
2805 cr4_clear_bits(X86_CR4_VMXE);
2812 int cpu = raw_smp_processor_id();
2813 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2816 if (cr4_read_shadow() & X86_CR4_VMXE)
2826 intel_pt_handle_vmx(1);
2830 intel_pt_handle_vmx(0);
2842 int cpu = raw_smp_processor_id();
2845 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu,
cpu),
2859 intel_pt_handle_vmx(0);
2864 int node = cpu_to_node(cpu);
2868 pages = __alloc_pages_node(node,
flags, 0);
2871 vmcs = page_address(pages);
2887 free_page((
unsigned long)
vmcs);
2920 __get_free_page(GFP_KERNEL_ACCOUNT);
2941 for_each_possible_cpu(cpu) {
2943 per_cpu(vmxarea, cpu) = NULL;
2951 for_each_possible_cpu(cpu) {
2973 per_cpu(vmxarea, cpu) =
vmcs;
2979 struct kvm_segment *save)
2989 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
2990 save->selector &= ~SEGMENT_RPL_MASK;
2991 save->dpl = save->selector & SEGMENT_RPL_MASK;
2999 unsigned long flags;
3023 (
vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3038 struct kvm_segment var = *save;
3041 if (seg == VCPU_SREG_CS)
3046 var.base = var.base & 0xffff0;
3056 if (save->base & 0xf)
3057 pr_warn_once(
"segment base is not paragraph aligned "
3058 "when entering protected mode (seg=%d)", seg);
3069 unsigned long flags;
3101 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3123 vcpu->arch.efer = efer;
3124 #ifdef CONFIG_X86_64
3125 if (efer & EFER_LMA)
3126 vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3128 vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3130 if (KVM_BUG_ON(efer & EFER_LMA,
vcpu->kvm))
3138 #ifdef CONFIG_X86_64
3140 static void enter_lmode(
struct kvm_vcpu *
vcpu)
3147 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3148 pr_debug_ratelimited(
"%s: tss fixup for long mode. \n",
3151 (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3152 | VMX_AR_TYPE_BUSY_64_TSS);
3157 static void exit_lmode(
struct kvm_vcpu *
vcpu)
3196 struct kvm_mmu *mmu = vcpu->arch.mmu;
3197 u64 root_hpa = mmu->root.hpa;
3200 if (!VALID_PAGE(root_hpa))
3205 mmu->root_role.level));
3233 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3248 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3261 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3262 CPU_BASED_CR3_STORE_EXITING)
3278 unsigned long hw_cr0, old_cr0_pg;
3289 hw_cr0 |= X86_CR0_WP;
3300 vcpu->arch.cr0 = cr0;
3303 #ifdef CONFIG_X86_64
3304 if (
vcpu->arch.efer & EFER_LME) {
3305 if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3307 else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3337 if (!(cr0 & X86_CR0_PG)) {
3342 tmp = exec_controls_get(vmx);
3345 exec_controls_set(vmx, tmp);
3349 if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3356 if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3373 u64 eptp = VMX_EPTP_MT_WB;
3375 eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3379 eptp |= VMX_EPTP_AD_ENABLE_BIT;
3388 struct kvm *kvm = vcpu->kvm;
3389 bool update_guest_cr3 =
true;
3390 unsigned long guest_cr3;
3402 guest_cr3 = vcpu->arch.cr3;
3404 update_guest_cr3 =
false;
3411 if (update_guest_cr3)
3423 if ((cr4 & X86_CR4_VMXE) &&
is_smm(vcpu))
3436 unsigned long hw_cr4;
3443 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3452 if (cr4 & X86_CR4_UMIP) {
3453 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3454 hw_cr4 &= ~X86_CR4_UMIP;
3457 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3461 vcpu->arch.cr4 = cr4;
3467 hw_cr4 &= ~X86_CR4_PAE;
3468 hw_cr4 |= X86_CR4_PSE;
3469 }
else if (!(cr4 & X86_CR4_PAE)) {
3470 hw_cr4 &= ~X86_CR4_PAE;
3486 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3492 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3503 if (
seg == VCPU_SREG_TR
3514 var->unusable = (
ar >> 16) & 1;
3515 var->type =
ar & 15;
3516 var->s = (
ar >> 4) & 1;
3517 var->dpl = (
ar >> 5) & 3;
3525 var->present = !var->unusable;
3526 var->avl = (
ar >> 12) & 1;
3527 var->l = (
ar >> 13) & 1;
3528 var->db = (
ar >> 14) & 1;
3529 var->g = (
ar >> 15) & 1;
3534 struct kvm_segment s;
3551 return VMX_AR_DPL(
ar);
3559 ar = var->type & 15;
3560 ar |= (var->s & 1) << 4;
3561 ar |= (var->dpl & 3) << 5;
3562 ar |= (var->present & 1) << 7;
3563 ar |= (var->avl & 1) << 12;
3564 ar |= (var->l & 1) << 13;
3565 ar |= (var->db & 1) << 14;
3566 ar |= (var->g & 1) << 15;
3567 ar |= (var->unusable || !var->present) << 16;
3581 if (seg == VCPU_SREG_TR)
3620 *db = (ar >> 14) & 1;
3621 *l = (ar >> 13) & 1;
3650 struct kvm_segment var;
3655 if (seg == VCPU_SREG_CS)
3659 if (var.base != (var.selector << 4))
3661 if (var.limit != 0xffff)
3671 struct kvm_segment cs;
3672 unsigned int cs_rpl;
3675 cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3679 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3683 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3684 if (cs.dpl > cs_rpl)
3687 if (cs.dpl != cs_rpl)
3699 struct kvm_segment ss;
3700 unsigned int ss_rpl;
3703 ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3707 if (ss.type != 3 && ss.type != 7)
3711 if (ss.dpl != ss_rpl)
3721 struct kvm_segment var;
3725 rpl = var.selector & SEGMENT_RPL_MASK;
3733 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3746 struct kvm_segment tr;
3752 if (tr.selector & SEGMENT_TI_MASK)
3754 if (tr.type != 3 && tr.type != 11)
3764 struct kvm_segment ldtr;
3770 if (ldtr.selector & SEGMENT_TI_MASK)
3782 struct kvm_segment cs, ss;
3787 return ((cs.selector & SEGMENT_RPL_MASK) ==
3788 (ss.selector & SEGMENT_RPL_MASK));
3843 const void *zero_page = (
const void *) __va(page_to_phys(ZERO_PAGE(0)));
3847 for (i = 0; i < 3; i++) {
3848 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
3852 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3853 if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data,
sizeof(u16)))
3857 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data,
sizeof(u8)))
3871 mutex_lock(&
kvm->slots_lock);
3880 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3883 if (IS_ERR(uaddr)) {
3889 for (i = 0; i < (PAGE_SIZE /
sizeof(tmp)); i++) {
3890 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3891 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3892 if (__copy_to_user(uaddr + i *
sizeof(tmp), &tmp,
sizeof(tmp))) {
3900 mutex_unlock(&
kvm->slots_lock);
3913 if (seg == VCPU_SREG_CS)
3925 spin_lock(&vmx_vpid_lock);
3926 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3927 if (vpid < VMX_NR_VPIDS)
3928 __set_bit(vpid, vmx_vpid_bitmap);
3931 spin_unlock(&vmx_vpid_lock);
3939 spin_lock(&vmx_vpid_lock);
3940 __clear_bit(vpid, vmx_vpid_bitmap);
3941 spin_unlock(&vmx_vpid_lock);
3952 struct hv_enlightened_vmcs *evmcs = (
void *)vmx->
vmcs01.
vmcs;
3954 if (evmcs->hv_enlightenments_control.msr_bitmap)
3955 evmcs->hv_clean_fields &=
3956 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
3979 if (idx != -ENOENT) {
3989 vmx_set_msr_bitmap_read(msr_bitmap, msr);
3995 vmx_set_msr_bitmap_write(msr_bitmap, msr);
4000 vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4003 vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4023 if (idx != -ENOENT) {
4032 vmx_set_msr_bitmap_read(msr_bitmap, msr);
4035 vmx_set_msr_bitmap_write(msr_bitmap, msr);
4045 const int read_idx = APIC_BASE_MSR / BITS_PER_LONG_LONG;
4046 const int write_idx = read_idx + (0x800 /
sizeof(u64));
4055 (secondary_exec_controls_get(vmx) &
4056 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
4079 msr_bitmap[read_idx] = ~0ull;
4080 msr_bitmap[write_idx] = ~0ull;
4129 vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
4131 return ((rvi & 0xf0) > (vppr & 0xf0));
4166 if (
vcpu->mode == IN_GUEST_MODE) {
4194 __apic_send_IPI_mask(get_cpu_mask(
vcpu->cpu), pi_vec);
4218 kvm_make_request(KVM_REQ_EVENT,
vcpu);
4230 smp_mb__after_atomic();
4255 if (!
vcpu->arch.apic->apicv_active)
4276 int trig_mode,
int vector)
4278 struct kvm_vcpu *vcpu = apic->
vcpu;
4282 kvm_make_request(KVM_REQ_EVENT, vcpu);
4285 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4300 unsigned long cr0, cr3, cr4;
4303 WARN_ON(cr0 & X86_CR0_TS);
4315 cr4 = cr4_read_shadow();
4320 #ifdef CONFIG_X86_64
4339 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4348 if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4351 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4355 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4356 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4365 struct kvm_vcpu *vcpu = &vmx->
vcpu;
4368 ~vcpu->arch.cr4_guest_rsvd_bits;
4374 vcpu->arch.cr4_guest_owned_bits &=
4376 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4384 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4387 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4390 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4392 return pin_based_exec_ctrl;
4400 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
4401 VM_ENTRY_LOAD_IA32_RTIT_CTL);
4405 vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
4406 VM_ENTRY_LOAD_IA32_EFER |
4407 VM_ENTRY_IA32E_MODE);
4410 vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4412 return vmentry_ctrl;
4423 vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
4424 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
4427 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
4428 VM_EXIT_CLEAR_IA32_RTIT_CTL);
4431 vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4434 return vmexit_ctrl &
4435 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
4450 secondary_exec_controls_setbit(vmx,
4451 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4452 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4454 tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4456 secondary_exec_controls_clearbit(vmx,
4457 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4458 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4460 tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4474 exec_control &= ~(CPU_BASED_RDTSC_EXITING |
4475 CPU_BASED_USE_IO_BITMAPS |
4476 CPU_BASED_MONITOR_TRAP_FLAG |
4477 CPU_BASED_PAUSE_EXITING);
4480 exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
4481 CPU_BASED_NMI_WINDOW_EXITING);
4483 if (vmx->
vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4484 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4487 exec_control &= ~CPU_BASED_TPR_SHADOW;
4489 #ifdef CONFIG_X86_64
4490 if (exec_control & CPU_BASED_TPR_SHADOW)
4491 exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
4492 CPU_BASED_CR8_STORE_EXITING);
4494 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4495 CPU_BASED_CR8_LOAD_EXITING;
4499 exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4500 CPU_BASED_CR3_STORE_EXITING |
4501 CPU_BASED_INVLPG_EXITING);
4503 exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4504 CPU_BASED_MONITOR_EXITING);
4506 exec_control &= ~CPU_BASED_HLT_EXITING;
4507 return exec_control;
4519 exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
4521 return exec_control;
4531 u32
control,
bool enabled,
bool exiting)
4541 if (enabled == exiting)
4568 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4570 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4573 if (cpu_has_vmx_##name()) { \
4574 if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \
4575 __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \
4577 __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \
4578 vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4579 __enabled, exiting); \
4584 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4585 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4587 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4588 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4592 struct kvm_vcpu *vcpu = &vmx->
vcpu;
4597 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4599 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4601 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4603 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4607 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4609 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4611 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4612 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4613 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4619 exec_control &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
4623 exec_control &= ~SECONDARY_EXEC_DESC;
4630 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4637 if (!
enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4638 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4651 bool rdpid_or_rdtscp_enabled =
4656 SECONDARY_EXEC_ENABLE_RDTSCP,
4657 rdpid_or_rdtscp_enabled,
false);
4666 ENABLE_USR_WAIT_PAUSE,
false);
4668 if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4669 exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
4672 exec_control &= ~SECONDARY_EXEC_NOTIFY_VM_EXITING;
4674 return exec_control;
4679 return get_order(kvm->arch.max_vcpu_ids *
sizeof(*
to_kvm_vmx(kvm)->pid_table));
4693 pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
4707 #define VMX_XSS_EXIT_BITMAP 0
4711 struct kvm *kvm = vmx->
vcpu.kvm;
4787 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->
vcpu.arch.cr0_guest_owned_bits);
4820 __pa(vmx->
vcpu.arch.apic->regs));
4842 #ifdef CONFIG_KVM_HYPERV
4846 vcpu->arch.microcode_version = 0x100000000ULL;
4901 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4909 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD,
vcpu);
4918 exec_controls_setbit(
to_vmx(
vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4924 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4929 exec_controls_setbit(
to_vmx(
vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4936 int irq =
vcpu->arch.interrupt.nr;
4938 trace_kvm_inj_virq(irq,
vcpu->arch.interrupt.soft, reinjected);
4940 ++
vcpu->stat.irq_injections;
4943 if (
vcpu->arch.interrupt.soft)
4944 inc_eip =
vcpu->arch.event_exit_inst_len;
4948 intr = irq | INTR_INFO_VALID_MASK;
4949 if (
vcpu->arch.interrupt.soft) {
4950 intr |= INTR_TYPE_SOFT_INTR;
4952 vmx->
vcpu.arch.event_exit_inst_len);
4954 intr |= INTR_TYPE_EXT_INTR;
4977 ++
vcpu->stat.nmi_injections;
4986 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
5000 masked =
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5018 GUEST_INTR_STATE_NMI);
5021 GUEST_INTR_STATE_NMI);
5033 return (
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5034 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5035 GUEST_INTR_STATE_NMI));
5057 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5082 mutex_lock(&kvm->slots_lock);
5085 mutex_unlock(&kvm->slots_lock);
5088 return PTR_ERR(ret);
5111 if (
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5115 return !(
vcpu->guest_debug &
5116 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
5131 int vec, u32 err_code)
5137 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5139 if (
vcpu->arch.halt_request) {
5140 vcpu->arch.halt_request = 0;
5176 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5186 struct kvm_run *kvm_run = vcpu->run;
5187 u32 intr_info, ex_no, error_code;
5188 unsigned long cr2, dr6;
5217 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5218 error_code =
vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5240 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5241 !(
is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5242 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5243 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5244 vcpu->run->internal.ndata = 4;
5245 vcpu->run->internal.data[0] = vect_info;
5246 vcpu->run->internal.data[1] = intr_info;
5247 vcpu->run->internal.data[2] = error_code;
5248 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5254 if (
enable_ept && !vcpu->arch.apf.host_apf_flags) {
5266 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5274 if (!(vcpu->guest_debug &
5275 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5302 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
5304 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
5309 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5310 kvm_run->debug.arch.dr7 =
vmcs_readl(GUEST_DR7);
5318 vmx->
vcpu.arch.event_exit_inst_len =
5320 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5322 kvm_run->debug.arch.exception = ex_no;
5339 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5340 kvm_run->ex.exception = ex_no;
5341 kvm_run->ex.error_code = error_code;
5349 ++vcpu->stat.irq_exits;
5355 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5356 vcpu->mmio_needed = 0;
5362 unsigned long exit_qualification;
5363 int size, in, string;
5367 string = (exit_qualification & 16) != 0;
5369 ++vcpu->stat.io_exits;
5374 port = exit_qualification >> 16;
5375 size = (exit_qualification & 7) + 1;
5376 in = (exit_qualification & 8) != 0;
5387 hypercall[0] = 0x0f;
5388 hypercall[1] = 0x01;
5389 hypercall[2] = 0xc1;
5397 unsigned long orig_val = val;
5423 unsigned long orig_val = val;
5482 if (cr8_prev <= cr8)
5489 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5495 KVM_BUG(1, vcpu->kvm,
"Guest always owns CR0.TS");
5522 vcpu->run->exit_reason = 0;
5523 vcpu_unimpl(vcpu,
"unhandled control register: op %d cr %d\n",
5551 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5552 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5553 vcpu->run->debug.arch.dr7 = dr7;
5555 vcpu->run->debug.arch.exception = DB_VECTOR;
5556 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5564 if (vcpu->guest_debug == 0) {
5565 exec_controls_clearbit(
to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5572 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5593 get_debugreg(vcpu->arch.db[0], 0);
5594 get_debugreg(vcpu->arch.db[1], 1);
5595 get_debugreg(vcpu->arch.db[2], 2);
5596 get_debugreg(vcpu->arch.db[3], 3);
5597 get_debugreg(vcpu->arch.dr6, 6);
5600 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5601 exec_controls_setbit(
to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5607 set_debugreg(DR6_RESERVED, 6);
5623 exec_controls_clearbit(
to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5625 kvm_make_request(KVM_REQ_EVENT, vcpu);
5627 ++vcpu->stat.irq_window_exits;
5643 int access_type, offset;
5652 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5653 (offset == APIC_EOI)) {
5692 bool has_error_code =
false;
5695 int reason, type, idt_v, idt_index;
5704 if (reason == TASK_SWITCH_GATE && idt_v) {
5706 case INTR_TYPE_NMI_INTR:
5707 vcpu->arch.nmi_injected =
false;
5710 case INTR_TYPE_EXT_INTR:
5711 case INTR_TYPE_SOFT_INTR:
5714 case INTR_TYPE_HARD_EXCEPTION:
5716 VECTORING_INFO_DELIVER_CODE_MASK) {
5717 has_error_code =
true;
5722 case INTR_TYPE_SOFT_EXCEPTION:
5731 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5732 type != INTR_TYPE_EXT_INTR &&
5733 type != INTR_TYPE_NMI_INTR))
5741 type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5742 reason, has_error_code, error_code);
5762 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5769 ? PFERR_USER_MASK : 0;
5772 ? PFERR_WRITE_MASK : 0;
5775 ? PFERR_FETCH_MASK : 0;
5778 ? PFERR_PRESENT_MASK : 0;
5781 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5813 trace_kvm_fast_mmio(gpa);
5825 exec_controls_clearbit(
to_vmx(
vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5826 ++
vcpu->stat.nmi_window_exits;
5827 kvm_make_request(KVM_REQ_EVENT,
vcpu);
5843 bool intr_window_requested;
5844 unsigned count = 130;
5846 intr_window_requested = exec_controls_get(vmx) &
5847 CPU_BASED_INTR_WINDOW_EXITING;
5853 if (kvm_test_request(KVM_REQ_EVENT,
vcpu))
5864 if (
vcpu->arch.halt_request) {
5865 vcpu->arch.halt_request = 0;
5874 if (__xfer_to_guest_mode_work_pending())
5902 trace_kvm_ple_window_update(
vcpu->vcpu_id,
5918 trace_kvm_ple_window_update(
vcpu->vcpu_id,
5949 u32 vmx_instruction_info;
5963 vmx_instruction_info =
vmcs_read32(VMX_INSTRUCTION_INFO);
5971 vmx_instruction_info,
false,
5980 unsigned long exit_qualification;
5982 trace_kvm_pml_full(vcpu->vcpu_id);
5990 if (!(
to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5992 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5994 GUEST_INTR_STATE_NMI);
6010 return EXIT_FASTPATH_REENTER_GUEST;
6013 return EXIT_FASTPATH_NONE;
6032 #ifndef CONFIG_X86_SGX_KVM
6059 bool context_invalid = exit_qual & NOTIFY_VM_CONTEXT_INVALID;
6061 ++
vcpu->stat.notify_window_exits;
6067 if (
enable_vnmi && (exit_qual & INTR_INFO_UNBLOCK_NMI))
6069 GUEST_INTR_STATE_NMI);
6071 if (
vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6073 vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6074 vcpu->run->notify.flags = context_invalid ?
6075 KVM_NOTIFY_CONTEXT_INVALID : 0;
6092 [EXIT_REASON_IO_INSTRUCTION] =
handle_io,
6146 u64 *info1, u64 *info2,
6147 u32 *intr_info, u32 *error_code)
6157 *error_code =
vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6170 __free_page(vmx->
pml_pg);
6193 pml_buf = page_address(vmx->
pml_pg);
6197 gpa = pml_buf[pml_idx];
6198 WARN_ON(gpa & (PAGE_SIZE - 1));
6208 pr_err(
"%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6210 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
6211 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
6212 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
6217 pr_err(
"%s limit=0x%08x, base=0x%016lx\n",
6225 struct vmx_msr_entry *e;
6227 pr_err(
"MSR %s:\n", name);
6228 for (i = 0, e = m->
val; i < m->nr; ++i, ++e)
6229 pr_err(
" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
6235 u32 vmentry_ctl, vmexit_ctl;
6236 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6237 u64 tertiary_exec_control;
6242 pr_warn_ratelimited(
"set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6248 cpu_based_exec_ctrl =
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6249 pin_based_exec_ctrl =
vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6253 secondary_exec_control =
vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6255 secondary_exec_control = 0;
6258 tertiary_exec_control =
vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6260 tertiary_exec_control = 0;
6262 pr_err(
"VMCS %p, last attempted VM-entry on CPU %d\n",
6264 pr_err(
"*** Guest State ***\n");
6265 pr_err(
"CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6268 pr_err(
"CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6270 pr_err(
"CR3 = 0x%016lx\n",
vmcs_readl(GUEST_CR3));
6272 pr_err(
"PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
6274 pr_err(
"PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
6277 pr_err(
"RSP = 0x%016lx RIP = 0x%016lx\n",
6279 pr_err(
"RFLAGS=0x%08lx DR7 = 0x%016lx\n",
6281 pr_err(
"Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6295 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6296 pr_err(
"EFER= 0x%016llx\n",
vmcs_read64(GUEST_IA32_EFER));
6297 else if (efer_slot >= 0)
6298 pr_err(
"EFER= 0x%016llx (autoload)\n",
6300 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6301 pr_err(
"EFER= 0x%016llx (effective)\n",
6302 vcpu->arch.efer | (EFER_LMA | EFER_LME));
6304 pr_err(
"EFER= 0x%016llx (effective)\n",
6305 vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6306 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6307 pr_err(
"PAT = 0x%016llx\n",
vmcs_read64(GUEST_IA32_PAT));
6308 pr_err(
"DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
6312 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6313 pr_err(
"PerfGlobCtl = 0x%016llx\n",
6315 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6316 pr_err(
"BndCfgS = 0x%016llx\n",
vmcs_read64(GUEST_BNDCFGS));
6317 pr_err(
"Interruptibility = %08x ActivityState = %08x\n",
6320 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6321 pr_err(
"InterruptStatus = %04x\n",
6328 pr_err(
"*** Host State ***\n");
6329 pr_err(
"RIP = 0x%016lx RSP = 0x%016lx\n",
6331 pr_err(
"CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6336 pr_err(
"FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6339 pr_err(
"GDTBase=%016lx IDTBase=%016lx\n",
6341 pr_err(
"CR0=%016lx CR3=%016lx CR4=%016lx\n",
6344 pr_err(
"Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6348 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6349 pr_err(
"EFER= 0x%016llx\n",
vmcs_read64(HOST_IA32_EFER));
6350 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6351 pr_err(
"PAT = 0x%016llx\n",
vmcs_read64(HOST_IA32_PAT));
6353 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6354 pr_err(
"PerfGlobCtl = 0x%016llx\n",
6359 pr_err(
"*** Control State ***\n");
6360 pr_err(
"CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6361 cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6362 pr_err(
"PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6363 pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6364 pr_err(
"ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6368 pr_err(
"VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6372 pr_err(
"VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6376 pr_err(
" reason=%08x qualification=%016lx\n",
6378 pr_err(
"IDTVectoring: info=%08x errcode=%08x\n",
6381 pr_err(
"TSC Offset = 0x%016llx\n",
vmcs_read64(TSC_OFFSET));
6382 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6383 pr_err(
"TSC Multiplier = 0x%016llx\n",
6385 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6386 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6388 pr_err(
"SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6390 pr_cont(
"TPR Threshold = 0x%02x\n",
vmcs_read32(TPR_THRESHOLD));
6391 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6392 pr_err(
"APIC-access addr = 0x%016llx ",
vmcs_read64(APIC_ACCESS_ADDR));
6393 pr_cont(
"virt-APIC addr = 0x%016llx\n",
vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6395 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6396 pr_err(
"PostedIntrVec = 0x%02x\n",
vmcs_read16(POSTED_INTR_NV));
6397 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6398 pr_err(
"EPT pointer = 0x%016llx\n",
vmcs_read64(EPT_POINTER));
6399 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6400 pr_err(
"PLE Gap=%08x Window=%08x\n",
6402 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6403 pr_err(
"Virtual processor ID = 0x%04x\n",
6416 u16 exit_handler_index;
6443 if (exit_reason.
basic == EXIT_REASON_PML_FULL)
6444 goto unexpected_vmexit;
6485 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6486 vcpu->run->fail_entry.hardware_entry_failure_reason
6488 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6492 if (unlikely(vmx->
fail)) {
6494 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6495 vcpu->run->fail_entry.hardware_entry_failure_reason
6497 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6508 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6509 (exit_reason.
basic != EXIT_REASON_EXCEPTION_NMI &&
6510 exit_reason.
basic != EXIT_REASON_EPT_VIOLATION &&
6511 exit_reason.
basic != EXIT_REASON_PML_FULL &&
6512 exit_reason.
basic != EXIT_REASON_APIC_ACCESS &&
6513 exit_reason.
basic != EXIT_REASON_TASK_SWITCH &&
6514 exit_reason.
basic != EXIT_REASON_NOTIFY)) {
6517 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6518 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6519 vcpu->run->internal.data[0] = vectoring_info;
6520 vcpu->run->internal.data[1] = exit_reason.
full;
6521 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6522 if (exit_reason.
basic == EXIT_REASON_EPT_MISCONFIG) {
6523 vcpu->run->internal.data[ndata++] =
6526 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6527 vcpu->run->internal.ndata = ndata;
6536 vcpu->arch.nmi_pending) {
6543 printk(KERN_WARNING
"%s: Breaking out of NMI-blocked "
6544 "state on VCPU %d after 1 s timeout\n",
6545 __func__, vcpu->vcpu_id);
6550 if (exit_fastpath != EXIT_FASTPATH_NONE)
6554 goto unexpected_vmexit;
6555 #ifdef CONFIG_RETPOLINE
6556 if (exit_reason.
basic == EXIT_REASON_MSR_WRITE)
6558 else if (exit_reason.
basic == EXIT_REASON_PREEMPTION_TIMER)
6560 else if (exit_reason.
basic == EXIT_REASON_INTERRUPT_WINDOW)
6562 else if (exit_reason.
basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6564 else if (exit_reason.
basic == EXIT_REASON_HLT)
6566 else if (exit_reason.
basic == EXIT_REASON_EPT_MISCONFIG)
6570 exit_handler_index = array_index_nospec((u16)exit_reason.
basic,
6573 goto unexpected_vmexit;
6578 vcpu_unimpl(vcpu,
"vmx: unexpected exit reason 0x%x\n",
6581 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6582 vcpu->run->internal.suberror =
6583 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
6584 vcpu->run->internal.ndata = 2;
6585 vcpu->run->internal.data[0] = exit_reason.
full;
6586 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6598 if (
to_vmx(vcpu)->exit_reason.bus_lock_detected) {
6600 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6602 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6626 if (static_branch_likely(&vmx_l1d_flush_cond)) {
6634 flush_l1d = vcpu->arch.l1tf_flush_l1d;
6635 vcpu->arch.l1tf_flush_l1d =
false;
6641 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
6642 kvm_clear_cpu_l1tf_flush_l1d();
6648 vcpu->stat.l1d_flush++;
6650 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
6651 native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
6657 "xorl %%eax, %%eax\n"
6658 ".Lpopulate_tlb:\n\t"
6659 "movzbl (%[flush_pages], %%" _ASM_AX
"), %%ecx\n\t"
6660 "addl $4096, %%eax\n\t"
6661 "cmpl %%eax, %[size]\n\t"
6662 "jne .Lpopulate_tlb\n\t"
6663 "xorl %%eax, %%eax\n\t"
6666 "xorl %%eax, %%eax\n"
6668 "movzbl (%[flush_pages], %%" _ASM_AX
"), %%ecx\n\t"
6669 "addl $64, %%eax\n\t"
6670 "cmpl %%eax, %[size]\n\t"
6671 "jne .Lfill_cache\n\t"
6675 :
"eax",
"ebx",
"ecx",
"edx");
6697 u32 sec_exec_control;
6712 sec_exec_control = secondary_exec_controls_get(vmx);
6713 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6714 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6718 WARN_ONCE(
true,
"Invalid local APIC state");
6725 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6726 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD,
vcpu);
6734 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT,
vcpu);
6740 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6743 secondary_exec_controls_set(vmx, sec_exec_control);
6750 const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
6751 struct kvm *kvm = vcpu->kvm;
6752 struct kvm_memslots *slots = kvm_memslots(kvm);
6753 struct kvm_memory_slot *slot;
6754 unsigned long mmu_seq;
6763 if (!(secondary_exec_controls_get(
to_vmx(vcpu)) &
6764 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6773 slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
6774 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
6783 mmu_seq = kvm->mmu_invalidate_seq;
6792 if (is_error_noslot_pfn(pfn))
6795 read_lock(&vcpu->kvm->mmu_lock);
6796 if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
6797 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6798 read_unlock(&vcpu->kvm->mmu_lock);
6803 read_unlock(&vcpu->kvm->mmu_lock);
6827 if (max_isr != old) {
6829 status |= max_isr << 8;
6843 old = (u8)status & 0xff;
6844 if ((u8)vector != old) {
6846 status |= (u8)vector;
6869 bool got_posted_interrupt;
6880 smp_mb__after_atomic();
6881 got_posted_interrupt =
6885 got_posted_interrupt =
false;
6905 else if (got_posted_interrupt)
6906 kvm_make_request(KVM_REQ_EVENT,
vcpu);
6949 if (
vcpu->arch.guest_fpu.fpstate->xfd)
6950 rdmsrl(MSR_IA32_XFD_ERR,
vcpu->arch.guest_fpu.xfd_err);
6959 vmx->
vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6971 unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
6975 "unexpected VM-Exit interrupt info: 0x%x", intr_info))
6982 vcpu->arch.at_instruction_boundary =
true;
7005 case MSR_IA32_SMBASE:
7006 if (!IS_ENABLED(CONFIG_KVM_SMM))
7015 case MSR_AMD64_VIRT_SPEC_CTRL:
7016 case MSR_AMD64_TSC_RATIO:
7029 bool idtv_info_valid;
7051 vector != DF_VECTOR && !idtv_info_valid)
7053 GUEST_INTR_STATE_NMI);
7057 & GUEST_INTR_STATE_NMI);
7060 ktime_to_ns(ktime_sub(ktime_get(),
7066 int instr_len_field,
7067 int error_code_field)
7071 bool idtv_info_valid;
7075 vcpu->arch.nmi_injected =
false;
7079 if (!idtv_info_valid)
7082 kvm_make_request(KVM_REQ_EVENT,
vcpu);
7088 case INTR_TYPE_NMI_INTR:
7089 vcpu->arch.nmi_injected =
true;
7097 case INTR_TYPE_SOFT_EXCEPTION:
7100 case INTR_TYPE_HARD_EXCEPTION:
7107 case INTR_TYPE_SOFT_INTR:
7110 case INTR_TYPE_EXT_INTR:
7121 VM_EXIT_INSTRUCTION_LEN,
7122 IDT_VECTORING_ERROR_CODE);
7129 VM_ENTRY_INSTRUCTION_LEN,
7130 VM_ENTRY_EXCEPTION_ERROR_CODE);
7138 struct perf_guest_switch_msr *msrs;
7141 pmu->host_cross_mapped_mask = 0;
7142 if (pmu->pebs_enable & pmu->global_ctrl)
7146 msrs = perf_guest_get_msrs(&nr_msrs, (
void *)pmu);
7150 for (i = 0; i < nr_msrs; i++)
7151 if (msrs[i].host == msrs[i].guest)
7155 msrs[i].host,
false);
7195 u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7197 if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7201 vmx->
spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7210 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7212 native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
7220 case EXIT_REASON_MSR_WRITE:
7222 case EXIT_REASON_PREEMPTION_TIMER:
7225 return EXIT_FASTPATH_NONE;
7234 guest_state_enter_irqoff();
7242 if (static_branch_unlikely(&vmx_l1d_should_flush))
7244 else if (static_branch_unlikely(&mmio_stale_data_clear) &&
7246 mds_clear_cpu_buffers();
7250 if (
vcpu->arch.cr2 != native_read_cr2())
7251 native_write_cr2(
vcpu->arch.cr2);
7256 vcpu->arch.cr2 = native_read_cr2();
7263 if (unlikely(vmx->
fail)) {
7280 guest_state_exit_irqoff();
7286 unsigned long cr3, cr4;
7307 return EXIT_FASTPATH_NONE;
7310 trace_kvm_entry(
vcpu);
7327 vcpu->arch.regs_dirty = 0;
7336 cr3 = __get_current_cr3_fast();
7342 cr4 = cr4_read_shadow();
7349 if (unlikely(
vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
7350 set_debugreg(
vcpu->arch.dr6, 6);
7357 if (
vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7379 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
7388 #ifndef CONFIG_X86_64
7397 loadsegment(ds, __USER_DS);
7398 loadsegment(es, __USER_DS);
7412 ++
vcpu->stat.nested_run;
7417 if (unlikely(vmx->
fail))
7418 return EXIT_FASTPATH_NONE;
7420 if (unlikely((u16)vmx->
exit_reason.
basic == EXIT_REASON_MCE_DURING_VMENTRY))
7426 return EXIT_FASTPATH_NONE;
7434 return EXIT_FASTPATH_NONE;
7472 vmx->
pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7479 if (boot_cpu_has(X86_FEATURE_RTM)) {
7487 tsx_ctrl->
mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7501 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
7502 struct hv_enlightened_vmcs *evmcs = (
void *)vmx->
vmcs01.
vmcs;
7504 evmcs->hv_enlightenments_control.msr_bitmap = 1;
7512 #ifdef CONFIG_X86_64
7556 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7557 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7562 kvm->arch.pause_in_guest =
true;
7564 if (boot_cpu_has(X86_BUG_L1TF) &&
enable_ept) {
7565 switch (l1tf_mitigation) {
7566 case L1TF_MITIGATION_OFF:
7567 case L1TF_MITIGATION_FLUSH_NOWARN:
7570 case L1TF_MITIGATION_FLUSH:
7571 case L1TF_MITIGATION_FLUSH_NOSMT:
7572 case L1TF_MITIGATION_FULL:
7577 if (sched_smt_active())
7579 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7582 case L1TF_MITIGATION_FULL_FORCE:
7611 return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7614 return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7618 return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
7620 return (MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT) |
7636 SECONDARY_EXEC_SHADOW_VMCS |
7637 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7638 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7639 SECONDARY_EXEC_DESC;
7641 u32 cur_ctl = secondary_exec_controls_get(vmx);
7643 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7653 struct kvm_cpuid_entry2 *entry;
7658 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
7659 if (entry && (entry->_reg & (_cpuid_mask))) \
7660 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7690 #undef cr4_fixed1_update
7696 struct kvm_cpuid_entry2 *best = NULL;
7699 for (i = 0; i < PT_CPUID_LEAVES; i++) {
7703 vmx->
pt_desc.
caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7704 vmx->
pt_desc.
caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7705 vmx->
pt_desc.
caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7706 vmx->
pt_desc.
caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7711 PT_CAP_num_address_ranges);
7715 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC |
7716 RTIT_CTL_BRANCH_EN);
7722 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_cr3_filtering))
7729 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_psb_cyc))
7731 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7736 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_mtc))
7738 RTIT_CTL_MTC_RANGE);
7741 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_ptwrite))
7746 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_power_event_trace))
7750 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_topa_output))
7754 if (intel_pt_validate_cap(vmx->
pt_desc.
caps, PT_CAP_output_subsys))
7771 if (boot_cpu_has(X86_FEATURE_XSAVE) &&
7786 FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7787 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
7790 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7791 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
7796 if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
7800 if (boot_cpu_has(X86_FEATURE_RTM)) {
7813 if (boot_cpu_has(X86_FEATURE_IBPB))
7817 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
7831 FEAT_CTL_SGX_LC_ENABLED;
7834 ~FEAT_CTL_SGX_LC_ENABLED;
7843 struct x86_pmu_lbr lbr;
7844 u64 host_perf_cap = 0;
7849 if (boot_cpu_has(X86_FEATURE_PDCM))
7850 rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
7852 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
7853 x86_perf_get_lbr(&lbr);
7859 perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
7881 perf_cap &= ~PERF_CAP_PEBS_BASELINE;
7945 unsigned short port;
7967 CPU_BASED_UNCOND_IO_EXITING);
7990 exception->
vector = UD_VECTOR;
8039 #ifdef CONFIG_X86_64
8041 static inline int u64_shl_div_u64(u64 a,
unsigned int shift,
8042 u64 divisor, u64 *result)
8044 u64 low = a << shift, high = a >> (64 - shift);
8047 if (high >= divisor)
8051 asm(
"divq %2\n\t" :
"=a" (low),
"=d" (high) :
8052 "rm" (divisor),
"0" (low),
"1" (high));
8058 static int vmx_set_hv_timer(
struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
8062 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
8063 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8068 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
8072 if (delta_tsc > lapic_timer_advance_cycles)
8073 delta_tsc -= lapic_timer_advance_cycles;
8079 delta_tsc && u64_shl_div_u64(delta_tsc,
8081 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8094 *expired = !delta_tsc;
8098 static void vmx_cancel_hv_timer(
struct kvm_vcpu *vcpu)
8127 if (atomic_read(&
vcpu->kvm->nr_memslots_dirty_logging))
8128 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8130 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8135 if (
vcpu->arch.mcg_cap & MCG_LMCE_P)
8137 FEAT_CTL_LMCE_ENABLED;
8140 ~FEAT_CTL_LMCE_ENABLED;
8143 #ifdef CONFIG_KVM_SMM
8144 static int vmx_smi_allowed(
struct kvm_vcpu *
vcpu,
bool for_injection)
8152 static int vmx_enter_smm(
struct kvm_vcpu *
vcpu,
union kvm_smram *smram)
8173 static int vmx_leave_smm(
struct kvm_vcpu *
vcpu,
const union kvm_smram *smram)
8194 static void vmx_enable_smi_window(
struct kvm_vcpu *
vcpu)
8210 if (hrtimer_try_to_cancel(timer) == 1)
8211 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
8217 kvm_set_posted_intr_wakeup_handler(NULL);
8225 #define VMX_REQUIRED_APICV_INHIBITS \
8227 BIT(APICV_INHIBIT_REASON_DISABLE)| \
8228 BIT(APICV_INHIBIT_REASON_ABSENT) | \
8229 BIT(APICV_INHIBIT_REASON_HYPERV) | \
8230 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
8231 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
8232 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
8233 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) \
8253 unsigned long cr3_bits;
8265 if (!(gva & BIT_ULL(63))) {
8267 if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
8271 lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
8284 return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
8287 static struct kvm_x86_ops vmx_x86_ops
__initdata = {
8288 .name = KBUILD_MODNAME,
8298 .vm_size =
sizeof(
struct kvm_vmx),
8405 #ifdef CONFIG_X86_64
8406 .set_hv_timer = vmx_set_hv_timer,
8407 .cancel_hv_timer = vmx_cancel_hv_timer,
8412 #ifdef CONFIG_KVM_SMM
8413 .smi_allowed = vmx_smi_allowed,
8414 .enter_smm = vmx_enter_smm,
8415 .leave_smm = vmx_leave_smm,
8416 .enable_smi_window = vmx_enable_smi_window,
8439 kvm_make_request(KVM_REQ_PMI, vcpu);
8440 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
8441 (
unsigned long *)&vcpu->arch.pmu.global_status);
8456 const u32 vmx_uret_msrs_list[] = {
8457 #ifdef CONFIG_X86_64
8458 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
8460 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
8467 for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
8486 me_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits,
8496 static struct kvm_x86_init_ops vmx_init_ops
__initdata;
8500 unsigned long host_bndcfgs;
8513 pr_warn_once(
"VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
8514 "does not work properly. Using workaround\n");
8516 if (boot_cpu_has(X86_FEATURE_NX))
8519 if (boot_cpu_has(X86_FEATURE_MPX)) {
8520 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
8521 WARN_ONCE(host_bndcfgs,
"BNDCFGS in host will be lost");
8526 XFEATURE_MASK_BNDCSR);
8539 if (!
enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
8540 pr_err_ratelimited(
"NX (Execute Disable) not supported\n");
8556 #ifdef CONFIG_X86_SGX_KVM
8567 vmx_x86_ops.set_apic_access_page_addr = NULL;
8570 vmx_x86_ops.update_cr8_intercept = NULL;
8572 #if IS_ENABLED(CONFIG_HYPERV)
8573 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
8591 vmx_x86_ops.sync_pir_to_irr = NULL;
8604 set_bit(0, vmx_vpid_bitmap);
8627 vmx_x86_ops.cpu_dirty_log_size = 0;
8633 u64 use_timer_freq = 5000ULL * 1000 * 1000;
8639 use_timer_freq = (u64)tsc_khz * 1000;
8647 if (use_timer_freq > 0xffffffffu / 10)
8652 vmx_x86_ops.set_hv_timer = NULL;
8653 vmx_x86_ops.cancel_hv_timer = NULL;
8667 vmx_init_ops.handle_intel_pt_intr = NULL;
8690 static struct kvm_x86_init_ops vmx_init_ops
__initdata = {
8692 .handle_intel_pt_intr = NULL,
8694 .runtime_ops = &vmx_x86_ops,
8705 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
8754 for_each_possible_cpu(cpu) {
8755 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
#define irqchip_in_kernel(k)
static bool cpu_has_vmx_ept_ad_bits(void)
static bool cpu_has_vmx_tpr_shadow(void)
static bool cpu_has_vmx_vmfunc(void)
static bool cpu_has_vmx_vpid(void)
#define PT_MODE_HOST_GUEST
static bool vmx_pt_mode_is_host_guest(void)
static bool cpu_has_vmx_unrestricted_guest(void)
static bool cpu_has_vmx_ept_execute_only(void)
static bool vmx_pebs_supported(void)
static bool cpu_has_vmx_mpx(void)
static bool cpu_has_vmx_invept_global(void)
#define PMU_CAP_FW_WRITES
static bool cpu_has_vmx_virtualize_x2apic_mode(void)
static bool cpu_has_vmx_ept(void)
static bool cpu_has_vmx_wbinvd_exit(void)
static bool cpu_has_vmx_xsaves(void)
static bool cpu_has_vmx_apicv(void)
static bool cpu_has_load_ia32_efer(void)
static bool cpu_has_secondary_exec_ctrls(void)
static bool vmx_umip_emulated(void)
static bool cpu_has_vmx_intel_pt(void)
static bool cpu_has_load_perf_global_ctrl(void)
static bool cpu_has_vmx_ept_4levels(void)
static bool vmx_pt_mode_is_system(void)
static bool cpu_has_vmx_invvpid(void)
static bool cpu_has_vmx_invpcid(void)
static bool cpu_has_vmx_flexpriority(void)
static bool cpu_has_vmx_pml(void)
static bool cpu_has_vmx_tsc_scaling(void)
static bool cpu_has_vmx_rdtscp(void)
static bool cpu_has_vmx_encls_vmexit(void)
static bool cpu_has_vmx_msr_bitmap(void)
static bool cpu_has_vmx_invvpid_global(void)
static bool cpu_has_vmx_invvpid_single(void)
static bool cpu_has_vmx_preemption_timer(void)
static bool cpu_has_vmx_ept_mt_wb(void)
static bool cpu_has_notify_vmexit(void)
static bool cpu_has_virtual_nmis(void)
static bool cpu_has_vmx_ple(void)
static int ept_caps_to_lpage_level(u32 ept_caps)
static bool cpu_has_vmx_ept_5levels(void)
static bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
static bool cpu_has_tertiary_exec_ctrls(void)
static bool cpu_has_vmx_ipiv(void)
static bool cpu_has_vmx_waitpkg(void)
static bool cpu_has_vmx_bus_lock_detection(void)
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
void kvm_set_cpu_caps(void)
static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
static bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment)
static bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
static u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
#define KVM_EVMCS_VERSION
static void kvm_register_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
#define X86_CR4_PDPTR_BITS
static void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
#define KVM_POSSIBLE_CR4_GUEST_BITS
static ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
static bool kvm_register_is_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
#define X86_CR4_TLBFLUSH_BITS
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define X86EMUL_PROPAGATE_FAULT
#define X86EMUL_F_IMPLICIT
#define X86EMUL_UNHANDLEABLE
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val)
struct kvm_vcpu * kvm_get_running_vcpu(void)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
void kvm_release_pfn_clean(kvm_pfn_t pfn)
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
int hv_flush_remote_tlbs(struct kvm *kvm)
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
int kvm_alloc_apic_access_page(struct kvm *kvm)
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
static void kvm_lapic_set_irr(int vec, struct kvm_lapic *apic)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len)
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len)
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, int tdp_max_root_level, int tdp_huge_page_level)
static __always_inline u64 rsvd_bits(int s, int e)
static unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
static u8 kvm_get_shadow_phys_bits(void)
static unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
static bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
static struct vmcs12 * get_vmcs12(struct kvm_vcpu *vcpu)
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
static bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
static int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
static bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
#define vcpu_to_pmu(vcpu)
struct kvm_pmu_ops intel_pmu_ops
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
void vmx_pi_start_assignment(struct kvm *kvm)
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
void __init pi_init_cpu(int cpu)
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
void pi_wakeup_handler(void)
int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set)
static void pi_clear_on(struct pi_desc *pi_desc)
static bool pi_test_and_set_on(struct pi_desc *pi_desc)
#define PID_TABLE_ENTRY_VALID
static bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
static bool pi_test_on(struct pi_desc *pi_desc)
#define feature_bit(name)
#define VMX_RUN_SAVE_SPEC_CTRL
void setup_default_sgx_lepubkeyhash(void)
bool __read_mostly enable_sgx
void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu)
void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static bool is_smm(struct kvm_vcpu *vcpu)
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
u64 default_tsc_scaling_ratio
u64 max_tsc_scaling_ratio
u8 tsc_scaling_ratio_frac_bits
bool ept_identity_pagetable_done
gpa_t ept_identity_map_addr
struct perf_event * event
struct vmcs_host_state host_state
bool hv_timer_soft_disabled
unsigned long * msr_bitmap
struct vmcs * shadow_vmcs
struct list_head loaded_vmcss_on_cpu_link
struct vmcs_controls_shadow controls_shadow
struct hrtimer preemption_timer
struct kvm_host_map virtual_apic_map
struct nested_vmx_msrs msrs
bool force_msr_bitmap_recalc
bool reload_vmcs01_apic_access_page
bool need_vmcs12_to_shadow_sync
bool update_vmcs01_apicv_status
struct nested_vmx::@39 smm
bool update_vmcs01_cpu_dirty_logging
bool change_vmcs01_virtual_apic_mode
u64 addr_a[RTIT_ADDR_RANGE]
u64 addr_b[RTIT_ADDR_RANGE]
u32 caps[PT_CPUID_REGS_NUM *PT_CPUID_LEAVES]
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]
struct vcpu_vmx::msr_autostore msr_autostore
struct kvm_segment segs[8]
u8 x2apic_msr_bitmap_mode
struct vcpu_vmx::@41 segment_cache
struct loaded_vmcs vmcs01
u64 msr_ia32_mcu_opt_ctrl
struct list_head pi_wakeup_list
u64 msr_ia32_feature_control_valid_bits
struct vcpu_vmx::@40 rmode
struct loaded_vmcs * loaded_vmcs
u64 msr_ia32_sgxlepubkeyhash[4]
unsigned long host_debugctlmsr
struct vcpu_vmx::msr_autoload msr_autoload
struct vcpu_vmx::@42 shadow_msr_intercept
bool guest_uret_msrs_loaded
struct vcpu_vmx::@41::kvm_save_segment seg[8]
u64 msr_ia32_feature_control
unsigned long exit_qualification
u32 msr_ia32_umwait_control
union vmx_exit_reason exit_reason
natural_width guest_sysenter_eip
u32 cpu_based_vm_exec_control
natural_width guest_sysenter_esp
natural_width exit_qualification
natural_width cr0_guest_host_mask
natural_width cr4_guest_host_mask
u64 cpu_based_3rd_exec_ctrl
u32 cpu_based_2nd_exec_ctrl
struct nested_vmx_msrs nested
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
static bool nested_exit_on_intr(struct vcpu_svm *svm)
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
#define trace_kvm_cr_read(cr, val)
#define trace_kvm_cr_write(cr, val)
static void vmx_check_vmcs12_offsets(void)
static __always_inline bool is_nmi(u32 intr_info)
static bool is_nm_fault(u32 intr_info)
static bool is_gp_fault(u32 intr_info)
static bool is_page_fault(u32 intr_info)
static bool is_exception_with_error_code(u32 intr_info)
static bool is_icebp(u32 intr_info)
static bool is_external_intr(u32 intr_info)
static bool is_invalid_opcode(u32 intr_info)
static bool is_machine_check(u32 intr_info)
void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
void nested_vmx_hardware_unsetup(void)
void vmx_leave_nested(struct kvm_vcpu *vcpu)
struct kvm_x86_nested_ops vmx_nested_ops
__init int nested_vmx_hardware_setup(int(*exit_handlers[])(struct kvm_vcpu *))
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, int size)
void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
void nested_vmx_set_vmcs_shadowing_bitmap(void)
int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
static void vmx_setup_mce(struct kvm_vcpu *vcpu)
static bool cpu_has_sgx(void)
static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
static unsigned int ple_window_max
static void pt_guest_enter(struct vcpu_vmx *vmx)
static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save)
static u32 vmx_segment_access_rights(struct kvm_segment *var)
#define MSR_BITMAP_MODE_X2APIC_APICV
static int handle_task_switch(struct kvm_vcpu *vcpu)
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
static int handle_nmi_window(struct kvm_vcpu *vcpu)
static int __read_mostly cpu_preemption_timer_multi
void ept_save_pdptrs(struct kvm_vcpu *vcpu)
void vmx_do_interrupt_irqoff(unsigned long entry)
static int kvm_cpu_vmxon(u64 vmxon_pointer)
static void vmx_dump_dtsel(char *name, uint32_t limit)
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
void free_vmcs(struct vmcs *vmcs)
static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
static const struct kernel_param_ops vmentry_l1d_flush_ops
void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
static bool ldtr_valid(struct kvm_vcpu *vcpu)
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
static int handle_machine_check(struct kvm_vcpu *vcpu)
static void seg_setup(int seg)
module_param_named(vpid, enable_vpid, bool, 0444)
static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static void vmx_dump_sel(char *name, uint32_t sel)
static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
static int handle_invlpg(struct kvm_vcpu *vcpu)
static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr, bool load_into_hardware)
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
static int handle_dr(struct kvm_vcpu *vcpu)
static int vmentry_l1d_flush_parse(const char *s)
#define MSR_BITMAP_MODE_X2APIC
static int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len)
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static u32 vmx_exec_control(struct vcpu_vmx *vmx)
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, struct x86_instruction_info *info)
static void vmx_hardware_disable(void)
static void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
static void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
static const struct @59 vmentry_l1d_param[]
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
static bool cpu_has_perf_global_ctrl_bug(void)
static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
static int vmx_get_pid_table_order(struct kvm *kvm)
static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector)
static int vmx_hardware_enable(void)
#define KVM_RMODE_VM_CR4_ALWAYS_ON
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
static void __init vmx_setup_me_spte_mask(void)
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage, struct x86_exception *exception)
static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
static unsigned int ple_window_shrink
static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static void vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, u32 control, bool enabled, bool exiting)
static int vmx_get_max_ept_level(void)
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
static int(* kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu)
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS)
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
static int vmx_check_processor_compat(void)
static void vmx_vm_destroy(struct kvm *kvm)
static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
static __init void vmx_setup_user_return_msrs(void)
static bool kvm_is_vmx_supported(void)
static int handle_encls(struct kvm_vcpu *vcpu)
static bool code_segment_valid(struct kvm_vcpu *vcpu)
static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
static bool __kvm_is_vmx_supported(void)
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
static int handle_triple_fault(struct kvm_vcpu *vcpu)
static int handle_ept_violation(struct kvm_vcpu *vcpu)
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field)
static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static bool is_valid_passthrough_msr(u32 msr)
bool __read_mostly enable_ept_ad_bits
static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
static bool __read_mostly dump_invalid_vmcs
static void vmx_cleanup_l1d_flush(void)
static void enter_pmode(struct kvm_vcpu *vcpu)
#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting)
static bool update_transition_efer(struct vcpu_vmx *vmx)
static int handle_preemption_timer(struct kvm_vcpu *vcpu)
static int vmx_vcpu_precreate(struct kvm *kvm)
static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
static __init int alloc_kvm_area(void)
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
static bool pt_can_write_msr(struct vcpu_vmx *vmx)
static void __vmx_exit(void)
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
static const struct kvm_vmx_segment_field kvm_vmx_segment_fields[]
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field)
#define RMODE_GUEST_OWNED_EFLAGS_BITS
#define KVM_VMX_TSC_MULTIPLIER_MAX
#define vmx_insn_failed(fmt...)
static bool __read_mostly enable_vnmi
static void handle_exception_irqoff(struct vcpu_vmx *vmx)
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
static bool __read_mostly error_on_inconsistent_vmcs_config
static void __loaded_vmcs_clear(void *arg)
static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code)
static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
static unsigned int ple_window_grow
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
#define KVM_SUPPORTED_FEATURE_CONTROL
static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
static int handle_monitor_trap(struct kvm_vcpu *vcpu)
static int handle_exception_nmi(struct kvm_vcpu *vcpu)
noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy)
static __init int hardware_setup(void)
static unsigned int ple_window
static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit)
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
static const int kvm_vmx_max_exit_handlers
bool __read_mostly allow_smaller_maxphyaddr
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS]
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
bool __read_mostly enable_pml
static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
#define KVM_PMODE_VM_CR4_ALWAYS_ON
noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
MODULE_AUTHOR("Qumranet")
static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
static bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
#define VMX_XSS_EXIT_BITMAP
static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
struct vmx_uret_msr * vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
static int init_rmode_tss(struct kvm *kvm, void __user *ua)
static int handle_desc(struct kvm_vcpu *vcpu)
static bool __read_mostly nested
static u32 vmx_preemption_cpu_tfms[]
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
int vmx_get_cpl(struct kvm_vcpu *vcpu)
static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
int __read_mostly pt_mode
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush)
static void vmx_hwapic_isr_update(int max_isr)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST
static int handle_interrupt_window(struct kvm_vcpu *vcpu)
static bool cpu_has_broken_vmx_preemption_timer(void)
static int handle_apic_write(struct kvm_vcpu *vcpu)
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static int possible_passthrough_msr_slot(u32 msr)
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
static int handle_cr(struct kvm_vcpu *vcpu)
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST
static int handle_io(struct kvm_vcpu *vcpu)
static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
static int __init vmx_init(void)
static void shrink_ple_window(struct kvm_vcpu *vcpu)
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
static u32 vmx_vmexit_ctrl(void)
static bool __read_mostly emulate_invalid_guest_state
struct vmcs * alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
#define KVM_VM_CR0_ALWAYS_OFF
static void grow_ple_window(struct kvm_vcpu *vcpu)
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
struct vmcs_config vmcs_config __ro_after_init
static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, int pi_vec)
static int handle_pause(struct kvm_vcpu *vcpu)
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
static void * vmx_l1d_flush_pages
void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags)
static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
static int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
static bool stack_segment_valid(struct kvm_vcpu *vcpu)
bool __read_mostly enable_ept
static void free_kvm_area(void)
static int kvm_cpu_vmxoff(void)
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
static void vmx_emergency_disable(void)
static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
void dump_vmcs(struct kvm_vcpu *vcpu)
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644)
static void vmx_hardware_unsetup(void)
static unsigned int ple_gap
static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int handle_pml_full(struct kvm_vcpu *vcpu)
bool __read_mostly enable_vpid
static void enter_rmode(struct kvm_vcpu *vcpu)
void vmx_do_nmi_irqoff(void)
static u64 vmx_get_perf_capabilities(void)
static void vmx_exit(void)
static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
noinline void vmwrite_error(unsigned long field, unsigned long value)
static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
static int init_rmode_identity_map(struct kvm *kvm)
static void hv_init_evmcs(void)
static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
#define MSR_IA32_RTIT_STATUS_MASK
static void pt_guest_exit(struct vcpu_vmx *vmx)
bool __read_mostly enable_unrestricted_guest
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
static int setup_vmcs_config(struct vmcs_config *vmcs_conf, struct vmx_capability *vmx_cap)
static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
static void fix_rmode_seg(int seg, struct kvm_segment *save)
bool __read_mostly enable_ipiv
static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data)
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
static __init void vmx_set_cpu_caps(void)
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
static int vmx_vm_init(struct kvm *kvm)
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, unsigned int flags)
static DEFINE_PER_CPU(struct vmcs *, vmxarea)
static void init_vmcs(struct vcpu_vmx *vmx)
static bool __read_mostly fasteoi
module_param(emulate_invalid_guest_state, bool, 0444)
bool __read_mostly flexpriority_enabled
static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
static int handle_apic_access(struct kvm_vcpu *vcpu)
static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
static struct kvm_x86_ops vmx_x86_ops __initdata
noinline void vmread_error(unsigned long field)
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
#define VMX_REQUIRED_APICV_INHIBITS
static bool tr_valid(struct kvm_vcpu *vcpu)
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
static bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, struct msr_data *msr)
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
static int handle_notify(struct kvm_vcpu *vcpu)
static bool __read_mostly enable_preemption_timer
#define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname)
static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
static unsigned long host_idt_base
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val, bool entry_only)
static DEFINE_SPINLOCK(vmx_vpid_lock)
static void vmx_inject_exception(struct kvm_vcpu *vcpu)
static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
#define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname)
static unsigned int vmx_handle_intel_pt_intr(void)
noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
static int handle_invpcid(struct kvm_vcpu *vcpu)
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
static void vmclear_local_loaded_vmcss(void)
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
static void vmx_set_rvi(int vector)
static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask)
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
#define VMX_SEGMENT_FIELD(seg)
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
static DEFINE_MUTEX(vmx_l1d_flush_mutex)
#define KVM_VM_CR0_ALWAYS_ON
static u32 vmx_vmentry_ctrl(void)
static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, u64 data)
static void hv_reset_evmcs(void)
static __always_inline struct kvm_vmx * to_kvm_vmx(struct kvm *kvm)
static bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, unsigned int flags)
#define VMX_REGS_LAZY_LOAD_SET
static bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
static bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
static struct vmcs * alloc_vmcs(bool shadow)
#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS
#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL
#define MAX_NR_USER_RETURN_MSRS
static void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
#define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL
#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL
static int vmx_get_instr_info_reg2(u32 vmx_instr_info)
static bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
static unsigned long vmx_l1_guest_owned_cr0_bits(void)
#define MAX_POSSIBLE_PASSTHROUGH_MSRS
static bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
static bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
static u8 vmx_get_rvi(void)
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL
#define MAX_NR_LOADSTORE_MSRS
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
static __always_inline bool kvm_is_using_evmcs(void)
static void vpid_sync_vcpu_addr(int vpid, gva_t addr)
static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
static __always_inline u64 vmcs_read64(unsigned long field)
static __always_inline void vmcs_write16(unsigned long field, u16 value)
static __always_inline void vmcs_write32(unsigned long field, u32 value)
static void vmcs_load(struct vmcs *vmcs)
static void vpid_sync_context(int vpid)
static void ept_sync_global(void)
static __always_inline u32 vmcs_read32(unsigned long field)
static void vpid_sync_vcpu_global(void)
static void vpid_sync_vcpu_single(int vpid)
static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
static __always_inline void vmcs_write64(unsigned long field, u64 value)
static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
static void vmcs_clear(struct vmcs *vmcs)
static void ept_sync_context(u64 eptp)
static __always_inline unsigned long vmcs_readl(unsigned long field)
static __always_inline u16 vmcs_read16(unsigned long field)
int kvm_spec_ctrl_test_value(u64 value)
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
bool __read_mostly enable_apicv
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
int kvm_find_user_return_msr(u32 msr)
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
bool __read_mostly enable_vmware_backdoor
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
u64 __read_mostly host_arch_capabilities
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
void kvm_x86_vendor_exit(void)
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
bool __read_mostly enable_pmu
u32 __read_mostly kvm_nr_uret_msrs
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
u64 __read_mostly host_efer
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, struct kvm_queued_exception *ex)
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
int kvm_add_user_return_msr(u32 msr)
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
int kvm_emulate_invd(struct kvm_vcpu *vcpu)
noinstr void kvm_spurious_fault(void)
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
int handle_ud(struct kvm_vcpu *vcpu)
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload)
void kvm_enable_efer_bits(u64 mask)
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
static bool is_protmode(struct kvm_vcpu *vcpu)
#define KVM_VMX_DEFAULT_PLE_WINDOW
static bool kvm_cstate_in_guest(struct kvm *kvm)
static unsigned int __grow_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int max)
static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
static bool kvm_exception_is_soft(unsigned int nr)
static bool kvm_mpx_supported(void)
#define KVM_FIRST_EMULATED_VMX_MSR
#define KVM_DEFAULT_PLE_WINDOW_SHRINK
static bool is_paging(struct kvm_vcpu *vcpu)
static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, enum kvm_intr_type intr)
static void kvm_register_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
static bool kvm_mwait_in_guest(struct kvm *kvm)
#define KVM_DEFAULT_PLE_WINDOW_GROW
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
static void kvm_machine_check(void)
static bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
static void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, bool soft)
static bool is_pae_paging(struct kvm_vcpu *vcpu)
static bool is_long_mode(struct kvm_vcpu *vcpu)
static bool kvm_pause_in_guest(struct kvm *kvm)
#define KVM_MSR_RET_INVALID
static u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX
#define KVM_DEFAULT_PLE_GAP
static void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
static bool kvm_hlt_in_guest(struct kvm *kvm)
#define KVM_LAST_EMULATED_VMX_MSR
static bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
static bool is_64_bit_mode(struct kvm_vcpu *vcpu)
static bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
static unsigned int __shrink_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int min)
static bool kvm_notify_vmexit_enabled(struct kvm *kvm)
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)