2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
9 #include <asm/perf_event.h>
12 #include "../kvm_cache_regs.h"
23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
26 #define MAX_NR_USER_RETURN_MSRS 7
28 #define MAX_NR_USER_RETURN_MSRS 4
31 #define MAX_NR_LOADSTORE_MSRS 8
53 #define RTIT_ADDR_RANGE 4
68 u32
caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
244 #ifdef CONFIG_KVM_HYPERV
245 gpa_t hv_evmcs_vmptr;
246 struct kvm_host_map hv_evmcs_map;
247 struct hv_enlightened_vmcs *hv_evmcs;
252 struct kvm_vcpu
vcpu;
279 u64 msr_host_kernel_gs_base;
280 u64 msr_guest_kernel_gs_base;
306 struct kvm_segment
segs[8];
310 struct kvm_save_segment {
338 #define PML_ENTITY_NUM 512
362 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 16
386 unsigned long fs_base,
unsigned long gs_base);
394 void vmx_set_cr0(
struct kvm_vcpu *vcpu,
unsigned long cr0);
395 void vmx_set_cr4(
struct kvm_vcpu *vcpu,
unsigned long cr4);
398 void vmx_get_segment(
struct kvm_vcpu *vcpu,
struct kvm_segment *var,
int seg);
400 u64
construct_eptp(
struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level);
428 int type,
bool value)
446 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
447 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
450 int f = sizeof(unsigned long); \
453 return bitop##_bit(msr, bitmap + base / f); \
454 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
455 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
456 return (rtype)true; \
458 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
459 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
460 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
471 #define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
472 (VM_ENTRY_LOAD_DEBUG_CONTROLS)
474 #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
475 (__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
478 #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
479 __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
481 #define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
482 (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
483 VM_ENTRY_LOAD_IA32_PAT | \
484 VM_ENTRY_LOAD_IA32_EFER | \
485 VM_ENTRY_LOAD_BNDCFGS | \
486 VM_ENTRY_PT_CONCEAL_PIP | \
487 VM_ENTRY_LOAD_IA32_RTIT_CTL)
489 #define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
490 (VM_EXIT_SAVE_DEBUG_CONTROLS | \
491 VM_EXIT_ACK_INTR_ON_EXIT)
493 #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
494 (__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
495 VM_EXIT_HOST_ADDR_SPACE_SIZE)
497 #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
498 __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
500 #define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
501 (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
502 VM_EXIT_SAVE_IA32_PAT | \
503 VM_EXIT_LOAD_IA32_PAT | \
504 VM_EXIT_SAVE_IA32_EFER | \
505 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
506 VM_EXIT_LOAD_IA32_EFER | \
507 VM_EXIT_CLEAR_BNDCFGS | \
508 VM_EXIT_PT_CONCEAL_PIP | \
509 VM_EXIT_CLEAR_IA32_RTIT_CTL)
511 #define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
512 (PIN_BASED_EXT_INTR_MASK | \
513 PIN_BASED_NMI_EXITING)
514 #define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
515 (PIN_BASED_VIRTUAL_NMIS | \
516 PIN_BASED_POSTED_INTR | \
517 PIN_BASED_VMX_PREEMPTION_TIMER)
519 #define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
520 (CPU_BASED_HLT_EXITING | \
521 CPU_BASED_CR3_LOAD_EXITING | \
522 CPU_BASED_CR3_STORE_EXITING | \
523 CPU_BASED_UNCOND_IO_EXITING | \
524 CPU_BASED_MOV_DR_EXITING | \
525 CPU_BASED_USE_TSC_OFFSETTING | \
526 CPU_BASED_MWAIT_EXITING | \
527 CPU_BASED_MONITOR_EXITING | \
528 CPU_BASED_INVLPG_EXITING | \
529 CPU_BASED_RDPMC_EXITING | \
530 CPU_BASED_INTR_WINDOW_EXITING)
533 #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
534 (__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
535 CPU_BASED_CR8_LOAD_EXITING | \
536 CPU_BASED_CR8_STORE_EXITING)
538 #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
539 __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
542 #define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
543 (CPU_BASED_RDTSC_EXITING | \
544 CPU_BASED_TPR_SHADOW | \
545 CPU_BASED_USE_IO_BITMAPS | \
546 CPU_BASED_MONITOR_TRAP_FLAG | \
547 CPU_BASED_USE_MSR_BITMAPS | \
548 CPU_BASED_NMI_WINDOW_EXITING | \
549 CPU_BASED_PAUSE_EXITING | \
550 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
551 CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
553 #define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
554 #define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
555 (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
556 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
557 SECONDARY_EXEC_WBINVD_EXITING | \
558 SECONDARY_EXEC_ENABLE_VPID | \
559 SECONDARY_EXEC_ENABLE_EPT | \
560 SECONDARY_EXEC_UNRESTRICTED_GUEST | \
561 SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
562 SECONDARY_EXEC_DESC | \
563 SECONDARY_EXEC_ENABLE_RDTSCP | \
564 SECONDARY_EXEC_ENABLE_INVPCID | \
565 SECONDARY_EXEC_APIC_REGISTER_VIRT | \
566 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
567 SECONDARY_EXEC_SHADOW_VMCS | \
568 SECONDARY_EXEC_ENABLE_XSAVES | \
569 SECONDARY_EXEC_RDSEED_EXITING | \
570 SECONDARY_EXEC_RDRAND_EXITING | \
571 SECONDARY_EXEC_ENABLE_PML | \
572 SECONDARY_EXEC_TSC_SCALING | \
573 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
574 SECONDARY_EXEC_PT_USE_GPA | \
575 SECONDARY_EXEC_PT_CONCEAL_VMX | \
576 SECONDARY_EXEC_ENABLE_VMFUNC | \
577 SECONDARY_EXEC_BUS_LOCK_DETECTION | \
578 SECONDARY_EXEC_NOTIFY_VM_EXITING | \
579 SECONDARY_EXEC_ENCLS_EXITING)
581 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
582 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
583 (TERTIARY_EXEC_IPI_VIRT)
585 #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
586 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
588 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
589 vmcs_write##bits(uname, val); \
590 vmx->loaded_vmcs->controls_shadow.lname = val; \
593 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
595 return vmcs->controls_shadow.lname; \
597 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
599 return __##lname##_controls_get(vmx->loaded_vmcs); \
601 static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
603 BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
604 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
606 static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
608 BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
609 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
623 #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
624 (1 << VCPU_REGS_RSP) | \
625 (1 << VCPU_EXREG_RFLAGS) | \
626 (1 << VCPU_EXREG_PDPTR) | \
627 (1 << VCPU_EXREG_SEGMENTS) | \
628 (1 << VCPU_EXREG_CR0) | \
629 (1 << VCPU_EXREG_CR3) | \
630 (1 << VCPU_EXREG_CR4) | \
631 (1 << VCPU_EXREG_EXIT_INFO_1) | \
632 (1 << VCPU_EXREG_EXIT_INFO_2))
715 return secondary_exec_controls_get(vmx) &
716 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
730 (secondary_exec_controls_get(
to_vmx(vcpu)) &
731 SECONDARY_EXEC_UNRESTRICTED_GUEST));
744 return (vmx_instr_info >> 28) & 0xf;
bool __read_mostly enable_ept
bool __read_mostly enable_unrestricted_guest
bool __read_mostly enable_ipiv
static int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
#define KVM_POSSIBLE_CR0_GUEST_BITS
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
bool ept_identity_pagetable_done
gpa_t ept_identity_map_addr
struct perf_event * event
struct x86_pmu_lbr records
struct hrtimer preemption_timer
struct vmcs12 * cached_shadow_vmcs12
struct kvm_host_map virtual_apic_map
struct nested_vmx_msrs msrs
bool force_msr_bitmap_recalc
struct gfn_to_hva_cache vmcs12_cache
bool reload_vmcs01_apic_access_page
struct kvm_host_map msr_bitmap_map
bool has_preemption_timer_deadline
u64 preemption_timer_deadline
bool preemption_timer_expired
struct kvm_host_map pi_desc_map
struct gfn_to_hva_cache shadow_vmcs12_cache
bool need_vmcs12_to_shadow_sync
bool enlightened_vmcs_enabled
struct vmcs12 * cached_vmcs12
bool update_vmcs01_apicv_status
struct kvm_host_map apic_access_page_map
struct nested_vmx::@39 smm
bool update_vmcs01_cpu_dirty_logging
bool change_vmcs01_virtual_apic_mode
struct loaded_vmcs vmcs02
bool need_sync_vmcs02_to_vmcs12_rare
u64 addr_a[RTIT_ADDR_RANGE]
u64 addr_b[RTIT_ADDR_RANGE]
u32 caps[PT_CPUID_REGS_NUM *PT_CPUID_LEAVES]
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]
struct vcpu_vmx::msr_autostore msr_autostore
struct kvm_segment segs[8]
u8 x2apic_msr_bitmap_mode
struct vcpu_vmx::@41 segment_cache
struct loaded_vmcs vmcs01
u64 msr_ia32_mcu_opt_ctrl
struct list_head pi_wakeup_list
u64 msr_ia32_feature_control_valid_bits
struct vcpu_vmx::@40 rmode
struct loaded_vmcs * loaded_vmcs
u64 msr_ia32_sgxlepubkeyhash[4]
unsigned long host_debugctlmsr
struct vcpu_vmx::msr_autoload msr_autoload
struct vcpu_vmx::@42 shadow_msr_intercept
bool guest_uret_msrs_loaded
struct vcpu_vmx::@41::kvm_save_segment seg[8]
u64 msr_ia32_feature_control
unsigned long exit_qualification
u32 msr_ia32_umwait_control
union vmx_exit_reason exit_reason
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS)
bool __read_mostly allow_smaller_maxphyaddr
static __always_inline struct kvm_vmx * to_kvm_vmx(struct kvm *kvm)
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
static bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags)
void ept_save_pdptrs(struct kvm_vcpu *vcpu)
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, unsigned int flags)
void free_vmcs(struct vmcs *vmcs)
void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
static bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
static bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
static struct vmcs * alloc_vmcs(bool shadow)
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
static struct x86_pmu_lbr * vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
#define MAX_NR_USER_RETURN_MSRS
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
static void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
static int vmx_get_instr_info_reg2(u32 vmx_instr_info)
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
static bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy)
#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
static unsigned long vmx_l1_guest_owned_cr0_bits(void)
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
struct vmx_uret_msr * vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
#define MAX_POSSIBLE_PASSTHROUGH_MSRS
int vmx_get_cpl(struct kvm_vcpu *vcpu)
#define BUILD_CONTROLS_SHADOW(lname, uname, bits)
static struct lbr_desc * vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
struct vmcs * alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
static bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
void dump_vmcs(struct kvm_vcpu *vcpu)
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
static u8 vmx_get_rvi(void)
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
#define MAX_NR_LOADSTORE_MSRS
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
static __always_inline u32 vmcs_read32(unsigned long field)
static __always_inline unsigned long vmcs_readl(unsigned long field)
static __always_inline u16 vmcs_read16(unsigned long field)