KVM
Classes | Macros | Enumerations | Functions
vmx.h File Reference
#include <linux/kvm_host.h>
#include <asm/kvm.h>
#include <asm/intel_pt.h>
#include <asm/perf_event.h>
#include "capabilities.h"
#include "../kvm_cache_regs.h"
#include "posted_intr.h"
#include "vmcs.h"
#include "vmx_ops.h"
#include "../cpuid.h"
#include "run_flags.h"
Include dependency graph for vmx.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  vmx_msrs
 
struct  vmx_uret_msr
 
struct  pt_ctx
 
struct  pt_desc
 
union  vmx_exit_reason
 
struct  lbr_desc
 
struct  nested_vmx
 
struct  vcpu_vmx
 
struct  vcpu_vmx::msr_autoload
 
struct  vcpu_vmx::msr_autostore
 
struct  kvm_vmx
 

Macros

#define MSR_TYPE_R   1
 
#define MSR_TYPE_W   2
 
#define MSR_TYPE_RW   3
 
#define X2APIC_MSR(r)   (APIC_BASE_MSR + ((r) >> 4))
 
#define MAX_NR_USER_RETURN_MSRS   4
 
#define MAX_NR_LOADSTORE_MSRS   8
 
#define RTIT_ADDR_RANGE   4
 
#define PML_ENTITY_NUM   512
 
#define MAX_POSSIBLE_PASSTHROUGH_MSRS   16
 
#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base)
 
#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)
 
#define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS    (VM_ENTRY_LOAD_DEBUG_CONTROLS)
 
#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS    __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
 
#define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS
 
#define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
 
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS    __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
 
#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS
 
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL
 
#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL
 
#define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
 
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL    __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
 
#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL
 
#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL   0
 
#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL
 
#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL   0
 
#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL    (TERTIARY_EXEC_IPI_VIRT)
 
#define BUILD_CONTROLS_SHADOW(lname, uname, bits)
 
#define VMX_REGS_LAZY_LOAD_SET
 

Enumerations

enum  segment_cache_field {
  SEG_FIELD_SEL = 0 , SEG_FIELD_BASE = 1 , SEG_FIELD_LIMIT = 2 , SEG_FIELD_AR = 3 ,
  SEG_FIELD_NR = 4
}
 

Functions

void vmx_vcpu_load_vmcs (struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy)
 
int allocate_vpid (void)
 
void free_vpid (int vpid)
 
void vmx_set_constant_host_state (struct vcpu_vmx *vmx)
 
void vmx_prepare_switch_to_guest (struct kvm_vcpu *vcpu)
 
void vmx_set_host_fs_gs (struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
 
int vmx_get_cpl (struct kvm_vcpu *vcpu)
 
bool vmx_emulation_required (struct kvm_vcpu *vcpu)
 
unsigned long vmx_get_rflags (struct kvm_vcpu *vcpu)
 
void vmx_set_rflags (struct kvm_vcpu *vcpu, unsigned long rflags)
 
u32 vmx_get_interrupt_shadow (struct kvm_vcpu *vcpu)
 
void vmx_set_interrupt_shadow (struct kvm_vcpu *vcpu, int mask)
 
int vmx_set_efer (struct kvm_vcpu *vcpu, u64 efer)
 
void vmx_set_cr0 (struct kvm_vcpu *vcpu, unsigned long cr0)
 
void vmx_set_cr4 (struct kvm_vcpu *vcpu, unsigned long cr4)
 
void set_cr4_guest_host_mask (struct vcpu_vmx *vmx)
 
void ept_save_pdptrs (struct kvm_vcpu *vcpu)
 
void vmx_get_segment (struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
 
void __vmx_set_segment (struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
 
u64 construct_eptp (struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
 
bool vmx_guest_inject_ac (struct kvm_vcpu *vcpu)
 
void vmx_update_exception_bitmap (struct kvm_vcpu *vcpu)
 
bool vmx_nmi_blocked (struct kvm_vcpu *vcpu)
 
bool vmx_interrupt_blocked (struct kvm_vcpu *vcpu)
 
bool vmx_get_nmi_mask (struct kvm_vcpu *vcpu)
 
void vmx_set_nmi_mask (struct kvm_vcpu *vcpu, bool masked)
 
void vmx_set_virtual_apic_mode (struct kvm_vcpu *vcpu)
 
struct vmx_uret_msrvmx_find_uret_msr (struct vcpu_vmx *vmx, u32 msr)
 
void pt_update_intercept_for_msr (struct kvm_vcpu *vcpu)
 
void vmx_update_host_rsp (struct vcpu_vmx *vmx, unsigned long host_rsp)
 
void vmx_spec_ctrl_restore_host (struct vcpu_vmx *vmx, unsigned int flags)
 
unsigned int __vmx_vcpu_run_flags (struct vcpu_vmx *vmx)
 
bool __vmx_vcpu_run (struct vcpu_vmx *vmx, unsigned long *regs, unsigned int flags)
 
int vmx_find_loadstore_msr_slot (struct vmx_msrs *m, u32 msr)
 
void vmx_ept_load_pdptrs (struct kvm_vcpu *vcpu)
 
void vmx_disable_intercept_for_msr (struct kvm_vcpu *vcpu, u32 msr, int type)
 
void vmx_enable_intercept_for_msr (struct kvm_vcpu *vcpu, u32 msr, int type)
 
u64 vmx_get_l2_tsc_offset (struct kvm_vcpu *vcpu)
 
u64 vmx_get_l2_tsc_multiplier (struct kvm_vcpu *vcpu)
 
gva_t vmx_get_untagged_addr (struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
 
static void vmx_set_intercept_for_msr (struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
 
void vmx_update_cpu_dirty_logging (struct kvm_vcpu *vcpu)
 
static u8 vmx_get_rvi (void)
 
static unsigned long vmx_l1_guest_owned_cr0_bits (void)
 
static __always_inline struct kvm_vmxto_kvm_vmx (struct kvm *kvm)
 
static __always_inline struct vcpu_vmxto_vmx (struct kvm_vcpu *vcpu)
 
static struct lbr_descvcpu_to_lbr_desc (struct kvm_vcpu *vcpu)
 
static struct x86_pmu_lbr * vcpu_to_lbr_records (struct kvm_vcpu *vcpu)
 
static bool intel_pmu_lbr_is_enabled (struct kvm_vcpu *vcpu)
 
void intel_pmu_cross_mapped_check (struct kvm_pmu *pmu)
 
int intel_pmu_create_guest_lbr_event (struct kvm_vcpu *vcpu)
 
void vmx_passthrough_lbr_msrs (struct kvm_vcpu *vcpu)
 
static __always_inline unsigned long vmx_get_exit_qual (struct kvm_vcpu *vcpu)
 
static __always_inline u32 vmx_get_intr_info (struct kvm_vcpu *vcpu)
 
struct vmcsalloc_vmcs_cpu (bool shadow, int cpu, gfp_t flags)
 
void free_vmcs (struct vmcs *vmcs)
 
int alloc_loaded_vmcs (struct loaded_vmcs *loaded_vmcs)
 
void free_loaded_vmcs (struct loaded_vmcs *loaded_vmcs)
 
void loaded_vmcs_clear (struct loaded_vmcs *loaded_vmcs)
 
static struct vmcsalloc_vmcs (bool shadow)
 
static bool vmx_has_waitpkg (struct vcpu_vmx *vmx)
 
static bool vmx_need_pf_intercept (struct kvm_vcpu *vcpu)
 
static bool is_unrestricted_guest (struct kvm_vcpu *vcpu)
 
bool __vmx_guest_state_valid (struct kvm_vcpu *vcpu)
 
static bool vmx_guest_state_valid (struct kvm_vcpu *vcpu)
 
void dump_vmcs (struct kvm_vcpu *vcpu)
 
static int vmx_get_instr_info_reg2 (u32 vmx_instr_info)
 
static bool vmx_can_use_ipiv (struct kvm_vcpu *vcpu)
 

Macro Definition Documentation

◆ __BUILD_VMX_MSR_BITMAP_HELPER

#define __BUILD_VMX_MSR_BITMAP_HELPER (   rtype,
  action,
  bitop,
  access,
  base 
)
Value:
static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
u32 msr) \
{ \
int f = sizeof(unsigned long); \
\
if (msr <= 0x1fff) \
return bitop##_bit(msr, bitmap + base / f); \
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
return (rtype)true; \
}

Definition at line 446 of file vmx.h.

◆ __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL

#define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
Value:
(CPU_BASED_HLT_EXITING | \
CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING | \
CPU_BASED_UNCOND_IO_EXITING | \
CPU_BASED_MOV_DR_EXITING | \
CPU_BASED_USE_TSC_OFFSETTING | \
CPU_BASED_MWAIT_EXITING | \
CPU_BASED_MONITOR_EXITING | \
CPU_BASED_INVLPG_EXITING | \
CPU_BASED_RDPMC_EXITING | \
CPU_BASED_INTR_WINDOW_EXITING)

Definition at line 519 of file vmx.h.

◆ __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS

#define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS    (VM_ENTRY_LOAD_DEBUG_CONTROLS)

Definition at line 471 of file vmx.h.

◆ __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS

#define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
Value:
(VM_EXIT_SAVE_DEBUG_CONTROLS | \
VM_EXIT_ACK_INTR_ON_EXIT)

Definition at line 489 of file vmx.h.

◆ BUILD_CONTROLS_SHADOW

#define BUILD_CONTROLS_SHADOW (   lname,
  uname,
  bits 
)
Value:
static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
{ \
if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
vmcs_write##bits(uname, val); \
vmx->loaded_vmcs->controls_shadow.lname = val; \
} \
} \
static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
{ \
return vmcs->controls_shadow.lname; \
} \
static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
{ \
return __##lname##_controls_get(vmx->loaded_vmcs); \
} \
static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
{ \
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
} \
static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
{ \
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
}
Definition: vmx.h:251
Definition: vmcs.h:21

Definition at line 585 of file vmx.h.

◆ BUILD_VMX_MSR_BITMAP_HELPERS

#define BUILD_VMX_MSR_BITMAP_HELPERS (   ret_type,
  action,
  bitop 
)
Value:
__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base)
Definition: vmx.h:446

Definition at line 458 of file vmx.h.

◆ KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL

#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL
Value:
(CPU_BASED_RDTSC_EXITING | \
CPU_BASED_TPR_SHADOW | \
CPU_BASED_USE_IO_BITMAPS | \
CPU_BASED_MONITOR_TRAP_FLAG | \
CPU_BASED_USE_MSR_BITMAPS | \
CPU_BASED_NMI_WINDOW_EXITING | \
CPU_BASED_PAUSE_EXITING | \
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)

Definition at line 542 of file vmx.h.

◆ KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL

#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL
Value:
(PIN_BASED_VIRTUAL_NMIS | \
PIN_BASED_POSTED_INTR | \
PIN_BASED_VMX_PREEMPTION_TIMER)

Definition at line 514 of file vmx.h.

◆ KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL

#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL
Value:
(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
SECONDARY_EXEC_WBINVD_EXITING | \
SECONDARY_EXEC_ENABLE_VPID | \
SECONDARY_EXEC_ENABLE_EPT | \
SECONDARY_EXEC_UNRESTRICTED_GUEST | \
SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
SECONDARY_EXEC_DESC | \
SECONDARY_EXEC_ENABLE_RDTSCP | \
SECONDARY_EXEC_ENABLE_INVPCID | \
SECONDARY_EXEC_APIC_REGISTER_VIRT | \
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
SECONDARY_EXEC_SHADOW_VMCS | \
SECONDARY_EXEC_ENABLE_XSAVES | \
SECONDARY_EXEC_RDSEED_EXITING | \
SECONDARY_EXEC_RDRAND_EXITING | \
SECONDARY_EXEC_ENABLE_PML | \
SECONDARY_EXEC_TSC_SCALING | \
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
SECONDARY_EXEC_PT_USE_GPA | \
SECONDARY_EXEC_PT_CONCEAL_VMX | \
SECONDARY_EXEC_ENABLE_VMFUNC | \
SECONDARY_EXEC_BUS_LOCK_DETECTION | \
SECONDARY_EXEC_NOTIFY_VM_EXITING | \
SECONDARY_EXEC_ENCLS_EXITING)

Definition at line 554 of file vmx.h.

◆ KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL

#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL    (TERTIARY_EXEC_IPI_VIRT)

Definition at line 582 of file vmx.h.

◆ KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS

#define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS
Value:
(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
VM_ENTRY_LOAD_IA32_PAT | \
VM_ENTRY_LOAD_IA32_EFER | \
VM_ENTRY_LOAD_BNDCFGS | \
VM_ENTRY_PT_CONCEAL_PIP | \
VM_ENTRY_LOAD_IA32_RTIT_CTL)

Definition at line 481 of file vmx.h.

◆ KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS

#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS
Value:
(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
VM_EXIT_SAVE_IA32_PAT | \
VM_EXIT_LOAD_IA32_PAT | \
VM_EXIT_SAVE_IA32_EFER | \
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
VM_EXIT_LOAD_IA32_EFER | \
VM_EXIT_CLEAR_BNDCFGS | \
VM_EXIT_PT_CONCEAL_PIP | \
VM_EXIT_CLEAR_IA32_RTIT_CTL)

Definition at line 500 of file vmx.h.

◆ KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL

#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL    __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL

Definition at line 538 of file vmx.h.

◆ KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL

#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL
Value:
(PIN_BASED_EXT_INTR_MASK | \
PIN_BASED_NMI_EXITING)

Definition at line 511 of file vmx.h.

◆ KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL

#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL   0

Definition at line 553 of file vmx.h.

◆ KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL

#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL   0

Definition at line 581 of file vmx.h.

◆ KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS

#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS    __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS

Definition at line 478 of file vmx.h.

◆ KVM_REQUIRED_VMX_VM_EXIT_CONTROLS

#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS    __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS

Definition at line 497 of file vmx.h.

◆ MAX_NR_LOADSTORE_MSRS

#define MAX_NR_LOADSTORE_MSRS   8

Definition at line 31 of file vmx.h.

◆ MAX_NR_USER_RETURN_MSRS

#define MAX_NR_USER_RETURN_MSRS   4

Definition at line 28 of file vmx.h.

◆ MAX_POSSIBLE_PASSTHROUGH_MSRS

#define MAX_POSSIBLE_PASSTHROUGH_MSRS   16

Definition at line 362 of file vmx.h.

◆ MSR_TYPE_R

#define MSR_TYPE_R   1

Definition at line 19 of file vmx.h.

◆ MSR_TYPE_RW

#define MSR_TYPE_RW   3

Definition at line 21 of file vmx.h.

◆ MSR_TYPE_W

#define MSR_TYPE_W   2

Definition at line 20 of file vmx.h.

◆ PML_ENTITY_NUM

#define PML_ENTITY_NUM   512

Definition at line 338 of file vmx.h.

◆ RTIT_ADDR_RANGE

#define RTIT_ADDR_RANGE   4

Definition at line 53 of file vmx.h.

◆ VMX_REGS_LAZY_LOAD_SET

#define VMX_REGS_LAZY_LOAD_SET
Value:
((1 << VCPU_REGS_RIP) | \
(1 << VCPU_REGS_RSP) | \
(1 << VCPU_EXREG_RFLAGS) | \
(1 << VCPU_EXREG_PDPTR) | \
(1 << VCPU_EXREG_SEGMENTS) | \
(1 << VCPU_EXREG_CR0) | \
(1 << VCPU_EXREG_CR3) | \
(1 << VCPU_EXREG_CR4) | \
(1 << VCPU_EXREG_EXIT_INFO_1) | \
(1 << VCPU_EXREG_EXIT_INFO_2))

Definition at line 623 of file vmx.h.

◆ X2APIC_MSR

#define X2APIC_MSR (   r)    (APIC_BASE_MSR + ((r) >> 4))

Definition at line 23 of file vmx.h.

Enumeration Type Documentation

◆ segment_cache_field

Enumerator
SEG_FIELD_SEL 
SEG_FIELD_BASE 
SEG_FIELD_LIMIT 
SEG_FIELD_AR 
SEG_FIELD_NR 

Definition at line 44 of file vmx.h.

44  {
45  SEG_FIELD_SEL = 0,
46  SEG_FIELD_BASE = 1,
47  SEG_FIELD_LIMIT = 2,
48  SEG_FIELD_AR = 3,
49 
50  SEG_FIELD_NR = 4
51 };
@ SEG_FIELD_BASE
Definition: vmx.h:46
@ SEG_FIELD_SEL
Definition: vmx.h:45
@ SEG_FIELD_NR
Definition: vmx.h:50
@ SEG_FIELD_AR
Definition: vmx.h:48
@ SEG_FIELD_LIMIT
Definition: vmx.h:47

Function Documentation

◆ __vmx_guest_state_valid()

bool __vmx_guest_state_valid ( struct kvm_vcpu *  vcpu)

Definition at line 3796 of file vmx.c.

3797 {
3798  /* real mode guest state checks */
3799  if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3800  if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3801  return false;
3802  if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3803  return false;
3804  if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3805  return false;
3806  if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3807  return false;
3808  if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3809  return false;
3810  if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3811  return false;
3812  } else {
3813  /* protected mode guest state checks */
3814  if (!cs_ss_rpl_check(vcpu))
3815  return false;
3816  if (!code_segment_valid(vcpu))
3817  return false;
3818  if (!stack_segment_valid(vcpu))
3819  return false;
3820  if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3821  return false;
3822  if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3823  return false;
3824  if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3825  return false;
3826  if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3827  return false;
3828  if (!tr_valid(vcpu))
3829  return false;
3830  if (!ldtr_valid(vcpu))
3831  return false;
3832  }
3833  /* TODO:
3834  * - Add checks on RIP
3835  * - Add checks on RFLAGS
3836  */
3837 
3838  return true;
3839 }
static bool ldtr_valid(struct kvm_vcpu *vcpu)
Definition: vmx.c:3762
static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
Definition: vmx.c:3648
static bool code_segment_valid(struct kvm_vcpu *vcpu)
Definition: vmx.c:3669
static bool stack_segment_valid(struct kvm_vcpu *vcpu)
Definition: vmx.c:3697
static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
Definition: vmx.c:3719
static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
Definition: vmx.c:3780
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
Definition: vmx.c:1509
static bool tr_valid(struct kvm_vcpu *vcpu)
Definition: vmx.c:3744
static bool is_protmode(struct kvm_vcpu *vcpu)
Definition: x86.h:138
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __vmx_set_segment()

void __vmx_set_segment ( struct kvm_vcpu *  vcpu,
struct kvm_segment *  var,
int  seg 
)

Definition at line 3572 of file vmx.c.

3573 {
3574  struct vcpu_vmx *vmx = to_vmx(vcpu);
3575  const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3576 
3578 
3579  if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3580  vmx->rmode.segs[seg] = *var;
3581  if (seg == VCPU_SREG_TR)
3582  vmcs_write16(sf->selector, var->selector);
3583  else if (var->s)
3584  fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3585  return;
3586  }
3587 
3588  vmcs_writel(sf->base, var->base);
3589  vmcs_write32(sf->limit, var->limit);
3590  vmcs_write16(sf->selector, var->selector);
3591 
3592  /*
3593  * Fix the "Accessed" bit in AR field of segment registers for older
3594  * qemu binaries.
3595  * IA32 arch specifies that at the time of processor reset the
3596  * "Accessed" bit in the AR field of segment registers is 1. And qemu
3597  * is setting it to 0 in the userland code. This causes invalid guest
3598  * state vmexit when "unrestricted guest" mode is turned on.
3599  * Fix for this setup issue in cpu_reset is being pushed in the qemu
3600  * tree. Newer qemu binaries with that qemu fix would not need this
3601  * kvm hack.
3602  */
3603  if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3604  var->type |= 0x1; /* Accessed */
3605 
3607 }
unsigned ar_bytes
Definition: vmx.c:508
unsigned base
Definition: vmx.c:506
unsigned limit
Definition: vmx.c:507
unsigned selector
Definition: vmx.c:505
struct kvm_segment segs[8]
Definition: vmx.h:306
struct vcpu_vmx::@40 rmode
struct kvm_vcpu vcpu
Definition: vmx.h:252
int vm86_active
Definition: vmx.h:304
static u32 vmx_segment_access_rights(struct kvm_segment *var)
Definition: vmx.c:3555
static const struct kvm_vmx_segment_field kvm_vmx_segment_fields[]
static void fix_rmode_seg(int seg, struct kvm_segment *save)
Definition: vmx.c:3035
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
Definition: vmx.c:520
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
Definition: vmx.h:657
static bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
Definition: vmx.h:727
static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
Definition: vmx_ops.h:258
static __always_inline void vmcs_write16(unsigned long field, u16 value)
Definition: vmx_ops.h:228
static __always_inline void vmcs_write32(unsigned long field, u32 value)
Definition: vmx_ops.h:237
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __vmx_vcpu_run()

bool __vmx_vcpu_run ( struct vcpu_vmx vmx,
unsigned long *  regs,
unsigned int  flags 
)
Here is the caller graph for this function:

◆ __vmx_vcpu_run_flags()

unsigned int __vmx_vcpu_run_flags ( struct vcpu_vmx vmx)

Definition at line 944 of file vmx.c.

945 {
946  unsigned int flags = 0;
947 
948  if (vmx->loaded_vmcs->launched)
950 
951  /*
952  * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
953  * to change it directly without causing a vmexit. In that case read
954  * it after vmexit and store it in vmx->spec_ctrl.
955  */
956  if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
958 
959  return flags;
960 }
#define VMX_RUN_SAVE_SPEC_CTRL
Definition: run_flags.h:9
#define VMX_RUN_VMRESUME
Definition: run_flags.h:8
bool launched
Definition: vmcs.h:65
struct loaded_vmcs * loaded_vmcs
Definition: vmx.h:292
static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
Definition: vmx.c:936
uint32_t flags
Definition: xen.c:1
Here is the call graph for this function:
Here is the caller graph for this function:

◆ alloc_loaded_vmcs()

int alloc_loaded_vmcs ( struct loaded_vmcs loaded_vmcs)

Definition at line 2905 of file vmx.c.

2906 {
2907  loaded_vmcs->vmcs = alloc_vmcs(false);
2908  if (!loaded_vmcs->vmcs)
2909  return -ENOMEM;
2910 
2912 
2913  loaded_vmcs->shadow_vmcs = NULL;
2915  loaded_vmcs->cpu = -1;
2916  loaded_vmcs->launched = 0;
2917 
2918  if (cpu_has_vmx_msr_bitmap()) {
2919  loaded_vmcs->msr_bitmap = (unsigned long *)
2920  __get_free_page(GFP_KERNEL_ACCOUNT);
2921  if (!loaded_vmcs->msr_bitmap)
2922  goto out_vmcs;
2923  memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
2924  }
2925 
2926  memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2927  memset(&loaded_vmcs->controls_shadow, 0,
2928  sizeof(struct vmcs_controls_shadow));
2929 
2930  return 0;
2931 
2932 out_vmcs:
2934  return -ENOMEM;
2935 }
static bool cpu_has_vmx_msr_bitmap(void)
Definition: capabilities.h:124
struct vmcs_host_state host_state
Definition: vmcs.h:74
int cpu
Definition: vmcs.h:64
bool hv_timer_soft_disabled
Definition: vmcs.h:67
unsigned long * msr_bitmap
Definition: vmcs.h:72
struct vmcs * vmcs
Definition: vmcs.h:62
struct vmcs * shadow_vmcs
Definition: vmcs.h:63
struct vmcs_controls_shadow controls_shadow
Definition: vmcs.h:75
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:2893
static struct vmcs * alloc_vmcs(bool shadow)
Definition: vmx.h:707
static void vmcs_clear(struct vmcs *vmcs)
Definition: vmx_ops.h:287
Here is the call graph for this function:
Here is the caller graph for this function:

◆ alloc_vmcs()

static struct vmcs* alloc_vmcs ( bool  shadow)
inlinestatic

Definition at line 707 of file vmx.h.

708 {
709  return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
710  GFP_KERNEL_ACCOUNT);
711 }
struct vmcs * alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
Definition: vmx.c:2862
Here is the call graph for this function:
Here is the caller graph for this function:

◆ alloc_vmcs_cpu()

struct vmcs* alloc_vmcs_cpu ( bool  shadow,
int  cpu,
gfp_t  flags 
)

Definition at line 2862 of file vmx.c.

2863 {
2864  int node = cpu_to_node(cpu);
2865  struct page *pages;
2866  struct vmcs *vmcs;
2867 
2868  pages = __alloc_pages_node(node, flags, 0);
2869  if (!pages)
2870  return NULL;
2871  vmcs = page_address(pages);
2872  memset(vmcs, 0, vmcs_config.size);
2873 
2874  /* KVM supports Enlightened VMCS v1 only */
2875  if (kvm_is_using_evmcs())
2877  else
2879 
2880  if (shadow)
2881  vmcs->hdr.shadow_vmcs = 1;
2882  return vmcs;
2883 }
#define KVM_EVMCS_VERSION
Definition: hyperv_evmcs.h:14
u32 shadow_vmcs
Definition: vmcs.h:18
u32 revision_id
Definition: vmcs.h:17
struct vmcs_hdr hdr
Definition: vmcs.h:22
static __always_inline bool kvm_is_using_evmcs(void)
Definition: vmx_onhyperv.h:115
Here is the call graph for this function:
Here is the caller graph for this function:

◆ allocate_vpid()

int allocate_vpid ( void  )

Definition at line 3919 of file vmx.c.

3920 {
3921  int vpid;
3922 
3923  if (!enable_vpid)
3924  return 0;
3925  spin_lock(&vmx_vpid_lock);
3926  vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3927  if (vpid < VMX_NR_VPIDS)
3928  __set_bit(vpid, vmx_vpid_bitmap);
3929  else
3930  vpid = 0;
3931  spin_unlock(&vmx_vpid_lock);
3932  return vpid;
3933 }
bool __read_mostly enable_vpid
Definition: vmx.c:82
Here is the caller graph for this function:

◆ construct_eptp()

u64 construct_eptp ( struct kvm_vcpu *  vcpu,
hpa_t  root_hpa,
int  root_level 
)

Definition at line 3371 of file vmx.c.

3372 {
3373  u64 eptp = VMX_EPTP_MT_WB;
3374 
3375  eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3376 
3377  if (enable_ept_ad_bits &&
3378  (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3379  eptp |= VMX_EPTP_AD_ENABLE_BIT;
3380  eptp |= root_hpa;
3381 
3382  return eptp;
3383 }
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
Definition: nested.h:77
bool __read_mostly enable_ept_ad_bits
Definition: vmx.c:98
Here is the call graph for this function:
Here is the caller graph for this function:

◆ dump_vmcs()

void dump_vmcs ( struct kvm_vcpu *  vcpu)

Definition at line 6232 of file vmx.c.

6233 {
6234  struct vcpu_vmx *vmx = to_vmx(vcpu);
6235  u32 vmentry_ctl, vmexit_ctl;
6236  u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6237  u64 tertiary_exec_control;
6238  unsigned long cr4;
6239  int efer_slot;
6240 
6241  if (!dump_invalid_vmcs) {
6242  pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6243  return;
6244  }
6245 
6246  vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
6247  vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
6248  cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6249  pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6250  cr4 = vmcs_readl(GUEST_CR4);
6251 
6253  secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6254  else
6255  secondary_exec_control = 0;
6256 
6258  tertiary_exec_control = vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6259  else
6260  tertiary_exec_control = 0;
6261 
6262  pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6263  vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6264  pr_err("*** Guest State ***\n");
6265  pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6266  vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
6267  vmcs_readl(CR0_GUEST_HOST_MASK));
6268  pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6269  cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
6270  pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
6271  if (cpu_has_vmx_ept()) {
6272  pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
6273  vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
6274  pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
6275  vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
6276  }
6277  pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
6278  vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
6279  pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
6280  vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
6281  pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6282  vmcs_readl(GUEST_SYSENTER_ESP),
6283  vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
6284  vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
6285  vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
6286  vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
6287  vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
6288  vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
6289  vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
6290  vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
6291  vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
6292  vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
6293  vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
6294  efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6295  if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6296  pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
6297  else if (efer_slot >= 0)
6298  pr_err("EFER= 0x%016llx (autoload)\n",
6299  vmx->msr_autoload.guest.val[efer_slot].value);
6300  else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6301  pr_err("EFER= 0x%016llx (effective)\n",
6302  vcpu->arch.efer | (EFER_LMA | EFER_LME));
6303  else
6304  pr_err("EFER= 0x%016llx (effective)\n",
6305  vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6306  if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6307  pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
6308  pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
6309  vmcs_read64(GUEST_IA32_DEBUGCTL),
6310  vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
6312  vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6313  pr_err("PerfGlobCtl = 0x%016llx\n",
6314  vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
6315  if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6316  pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
6317  pr_err("Interruptibility = %08x ActivityState = %08x\n",
6318  vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
6319  vmcs_read32(GUEST_ACTIVITY_STATE));
6320  if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6321  pr_err("InterruptStatus = %04x\n",
6322  vmcs_read16(GUEST_INTR_STATUS));
6323  if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
6324  vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6325  if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
6326  vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6327 
6328  pr_err("*** Host State ***\n");
6329  pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
6330  vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
6331  pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6332  vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
6333  vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
6334  vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
6335  vmcs_read16(HOST_TR_SELECTOR));
6336  pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6337  vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
6338  vmcs_readl(HOST_TR_BASE));
6339  pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6340  vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
6341  pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6342  vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
6343  vmcs_readl(HOST_CR4));
6344  pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6345  vmcs_readl(HOST_IA32_SYSENTER_ESP),
6346  vmcs_read32(HOST_IA32_SYSENTER_CS),
6347  vmcs_readl(HOST_IA32_SYSENTER_EIP));
6348  if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6349  pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
6350  if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6351  pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
6353  vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6354  pr_err("PerfGlobCtl = 0x%016llx\n",
6355  vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
6356  if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
6357  vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6358 
6359  pr_err("*** Control State ***\n");
6360  pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6361  cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6362  pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6363  pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6364  pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6365  vmcs_read32(EXCEPTION_BITMAP),
6366  vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
6367  vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
6368  pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6369  vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6370  vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
6371  vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
6372  pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6373  vmcs_read32(VM_EXIT_INTR_INFO),
6374  vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6375  vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
6376  pr_err(" reason=%08x qualification=%016lx\n",
6377  vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
6378  pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6379  vmcs_read32(IDT_VECTORING_INFO_FIELD),
6380  vmcs_read32(IDT_VECTORING_ERROR_CODE));
6381  pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
6382  if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6383  pr_err("TSC Multiplier = 0x%016llx\n",
6384  vmcs_read64(TSC_MULTIPLIER));
6385  if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6386  if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6387  u16 status = vmcs_read16(GUEST_INTR_STATUS);
6388  pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6389  }
6390  pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
6391  if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6392  pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
6393  pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6394  }
6395  if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6396  pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6397  if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6398  pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6399  if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6400  pr_err("PLE Gap=%08x Window=%08x\n",
6401  vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6402  if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6403  pr_err("Virtual processor ID = 0x%04x\n",
6404  vmcs_read16(VIRTUAL_PROCESSOR_ID));
6405 }
static bool cpu_has_vmx_ept(void)
Definition: capabilities.h:147
static bool cpu_has_secondary_exec_ctrls(void)
Definition: capabilities.h:129
static bool cpu_has_load_perf_global_ctrl(void)
Definition: capabilities.h:104
static bool cpu_has_tertiary_exec_ctrls(void)
Definition: capabilities.h:135
struct vmx_msrs host
Definition: vmx.h:296
struct vmx_msrs guest
Definition: vmx.h:295
struct vmx_msrs guest
Definition: vmx.h:300
struct vcpu_vmx::msr_autostore msr_autostore
struct vcpu_vmx::msr_autoload msr_autoload
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
Definition: vmx.h:35
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
Definition: vmx.c:969
static void vmx_dump_dtsel(char *name, uint32_t limit)
Definition: vmx.c:6215
static void vmx_dump_sel(char *name, uint32_t sel)
Definition: vmx.c:6206
static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
Definition: vmx.c:6222
static bool __read_mostly dump_invalid_vmcs
Definition: vmx.c:126
static __always_inline u64 vmcs_read64(unsigned long field)
Definition: vmx_ops.h:169
static __always_inline u32 vmcs_read32(unsigned long field)
Definition: vmx_ops.h:161
static __always_inline unsigned long vmcs_readl(unsigned long field)
Definition: vmx_ops.h:181
static __always_inline u16 vmcs_read16(unsigned long field)
Definition: vmx_ops.h:153
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ept_save_pdptrs()

void ept_save_pdptrs ( struct kvm_vcpu *  vcpu)

Definition at line 3246 of file vmx.c.

3247 {
3248  struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3249 
3250  if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3251  return;
3252 
3253  mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3254  mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3255  mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3256  mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3257 
3258  kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3259 }
static void kvm_register_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static bool is_pae_paging(struct kvm_vcpu *vcpu)
Definition: x86.h:203
Here is the call graph for this function:
Here is the caller graph for this function:

◆ free_loaded_vmcs()

void free_loaded_vmcs ( struct loaded_vmcs loaded_vmcs)

Definition at line 2893 of file vmx.c.

2894 {
2895  if (!loaded_vmcs->vmcs)
2896  return;
2899  loaded_vmcs->vmcs = NULL;
2900  if (loaded_vmcs->msr_bitmap)
2901  free_page((unsigned long)loaded_vmcs->msr_bitmap);
2902  WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
2903 }
void free_vmcs(struct vmcs *vmcs)
Definition: vmx.c:2885
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:814
Here is the call graph for this function:
Here is the caller graph for this function:

◆ free_vmcs()

void free_vmcs ( struct vmcs vmcs)

Definition at line 2885 of file vmx.c.

2886 {
2887  free_page((unsigned long)vmcs);
2888 }
Here is the caller graph for this function:

◆ free_vpid()

void free_vpid ( int  vpid)

Definition at line 3935 of file vmx.c.

3936 {
3937  if (!enable_vpid || vpid == 0)
3938  return;
3939  spin_lock(&vmx_vpid_lock);
3940  __clear_bit(vpid, vmx_vpid_bitmap);
3941  spin_unlock(&vmx_vpid_lock);
3942 }
Here is the caller graph for this function:

◆ intel_pmu_create_guest_lbr_event()

int intel_pmu_create_guest_lbr_event ( struct kvm_vcpu *  vcpu)

Definition at line 254 of file pmu_intel.c.

255 {
256  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
257  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
258  struct perf_event *event;
259 
260  /*
261  * The perf_event_attr is constructed in the minimum efficient way:
262  * - set 'pinned = true' to make it task pinned so that if another
263  * cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
264  * - set '.exclude_host = true' to record guest branches behavior;
265  *
266  * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
267  * schedule the event without a real HW counter but a fake one;
268  * check is_guest_lbr_event() and __intel_get_event_constraints();
269  *
270  * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
271  * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
272  * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
273  * event, which helps KVM to save/restore guest LBR records
274  * during host context switches and reduces quite a lot overhead,
275  * check branch_user_callstack() and intel_pmu_lbr_sched_task();
276  */
277  struct perf_event_attr attr = {
278  .type = PERF_TYPE_RAW,
279  .size = sizeof(attr),
280  .config = INTEL_FIXED_VLBR_EVENT,
281  .sample_type = PERF_SAMPLE_BRANCH_STACK,
282  .pinned = true,
283  .exclude_host = true,
284  .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
285  PERF_SAMPLE_BRANCH_USER,
286  };
287 
288  if (unlikely(lbr_desc->event)) {
289  __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
290  return 0;
291  }
292 
293  event = perf_event_create_kernel_counter(&attr, -1,
294  current, NULL, NULL);
295  if (IS_ERR(event)) {
296  pr_debug_ratelimited("%s: failed %ld\n",
297  __func__, PTR_ERR(event));
298  return PTR_ERR(event);
299  }
300  lbr_desc->event = event;
301  pmu->event_count++;
302  __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
303  return 0;
304 }
#define vcpu_to_pmu(vcpu)
Definition: pmu.h:7
Definition: vmx.h:96
struct perf_event * event
Definition: vmx.h:106
static struct lbr_desc * vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
Definition: vmx.h:662
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_pmu_cross_mapped_check()

void intel_pmu_cross_mapped_check ( struct kvm_pmu pmu)

Definition at line 746 of file pmu_intel.c.

747 {
748  struct kvm_pmc *pmc = NULL;
749  int bit, hw_idx;
750 
751  for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
752  X86_PMC_IDX_MAX) {
753  pmc = intel_pmc_idx_to_pmc(pmu, bit);
754 
755  if (!pmc || !pmc_speculative_in_use(pmc) ||
756  !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
757  continue;
758 
759  /*
760  * A negative index indicates the event isn't mapped to a
761  * physical counter in the host, e.g. due to contention.
762  */
763  hw_idx = pmc->perf_event->hw.idx;
764  if (hw_idx != pmc->idx && hw_idx > -1)
765  pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
766  }
767 }
static bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
Definition: pmu.h:206
static bool pmc_speculative_in_use(struct kvm_pmc *pmc)
Definition: pmu.h:128
static struct kvm_pmc * intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
Definition: pmu_intel.c:92
Here is the caller graph for this function:

◆ intel_pmu_lbr_is_enabled()

static bool intel_pmu_lbr_is_enabled ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 672 of file vmx.h.

673 {
674  return !!vcpu_to_lbr_records(vcpu)->nr;
675 }
static struct x86_pmu_lbr * vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
Definition: vmx.h:667
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_unrestricted_guest()

static bool is_unrestricted_guest ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 727 of file vmx.h.

728 {
729  return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
730  (secondary_exec_controls_get(to_vmx(vcpu)) &
731  SECONDARY_EXEC_UNRESTRICTED_GUEST));
732 }
bool __read_mostly enable_unrestricted_guest
Definition: vmx.c:94
Here is the call graph for this function:
Here is the caller graph for this function:

◆ loaded_vmcs_clear()

void loaded_vmcs_clear ( struct loaded_vmcs loaded_vmcs)

Definition at line 814 of file vmx.c.

815 {
816  int cpu = loaded_vmcs->cpu;
817 
818  if (cpu != -1)
819  smp_call_function_single(cpu,
821 }
static void __loaded_vmcs_clear(void *arg)
Definition: vmx.c:785
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pt_update_intercept_for_msr()

void pt_update_intercept_for_msr ( struct kvm_vcpu *  vcpu)

Definition at line 4098 of file vmx.c.

4099 {
4100  struct vcpu_vmx *vmx = to_vmx(vcpu);
4101  bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4102  u32 i;
4103 
4104  vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4105  vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4106  vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4107  vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4108  for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4109  vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4110  vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4111  }
4112 }
u64 ctl
Definition: vmx.h:56
struct pt_ctx guest
Definition: vmx.h:70
u32 num_address_ranges
Definition: vmx.h:67
struct pt_desc pt_desc
Definition: vmx.h:358
#define MSR_TYPE_RW
Definition: vmx.h:21
static void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
Definition: vmx.h:427
Here is the call graph for this function:
Here is the caller graph for this function:

◆ set_cr4_guest_host_mask()

void set_cr4_guest_host_mask ( struct vcpu_vmx vmx)

Definition at line 4363 of file vmx.c.

4364 {
4365  struct kvm_vcpu *vcpu = &vmx->vcpu;
4366 
4367  vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4368  ~vcpu->arch.cr4_guest_rsvd_bits;
4369  if (!enable_ept) {
4370  vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4371  vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4372  }
4373  if (is_guest_mode(&vmx->vcpu))
4374  vcpu->arch.cr4_guest_owned_bits &=
4376  vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4377 }
#define X86_CR4_PDPTR_BITS
#define KVM_POSSIBLE_CR4_GUEST_BITS
Definition: kvm_cache_regs.h:8
#define X86_CR4_TLBFLUSH_BITS
static struct vmcs12 * get_vmcs12(struct kvm_vcpu *vcpu)
Definition: nested.h:40
natural_width cr4_guest_host_mask
Definition: vmcs12.h:82
bool __read_mostly enable_ept
Definition: vmx.c:91
Here is the call graph for this function:
Here is the caller graph for this function:

◆ to_kvm_vmx()

static __always_inline struct kvm_vmx* to_kvm_vmx ( struct kvm *  kvm)
static

Definition at line 652 of file vmx.h.

653 {
654  return container_of(kvm, struct kvm_vmx, kvm);
655 }
Definition: vmx.h:369
Here is the caller graph for this function:

◆ to_vmx()

static __always_inline struct vcpu_vmx* to_vmx ( struct kvm_vcpu *  vcpu)
static

Definition at line 657 of file vmx.h.

658 {
659  return container_of(vcpu, struct vcpu_vmx, vcpu);
660 }

◆ vcpu_to_lbr_desc()

static struct lbr_desc* vcpu_to_lbr_desc ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 662 of file vmx.h.

663 {
664  return &to_vmx(vcpu)->lbr_desc;
665 }
struct lbr_desc lbr_desc
Definition: vmx.h:359
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vcpu_to_lbr_records()

static struct x86_pmu_lbr* vcpu_to_lbr_records ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 667 of file vmx.h.

668 {
669  return &vcpu_to_lbr_desc(vcpu)->records;
670 }
struct x86_pmu_lbr records
Definition: vmx.h:98
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_can_use_ipiv()

static bool vmx_can_use_ipiv ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 747 of file vmx.h.

748 {
749  return lapic_in_kernel(vcpu) && enable_ipiv;
750 }
bool __read_mostly enable_ipiv
Definition: vmx.c:109
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_disable_intercept_for_msr()

void vmx_disable_intercept_for_msr ( struct kvm_vcpu *  vcpu,
u32  msr,
int  type 
)

Definition at line 3962 of file vmx.c.

3963 {
3964  struct vcpu_vmx *vmx = to_vmx(vcpu);
3965  unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3966 
3967  if (!cpu_has_vmx_msr_bitmap())
3968  return;
3969 
3971 
3972  /*
3973  * Mark the desired intercept state in shadow bitmap, this is needed
3974  * for resync when the MSR filters change.
3975  */
3976  if (is_valid_passthrough_msr(msr)) {
3977  int idx = possible_passthrough_msr_slot(msr);
3978 
3979  if (idx != -ENOENT) {
3980  if (type & MSR_TYPE_R)
3981  clear_bit(idx, vmx->shadow_msr_intercept.read);
3982  if (type & MSR_TYPE_W)
3983  clear_bit(idx, vmx->shadow_msr_intercept.write);
3984  }
3985  }
3986 
3987  if ((type & MSR_TYPE_R) &&
3988  !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
3989  vmx_set_msr_bitmap_read(msr_bitmap, msr);
3990  type &= ~MSR_TYPE_R;
3991  }
3992 
3993  if ((type & MSR_TYPE_W) &&
3994  !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
3995  vmx_set_msr_bitmap_write(msr_bitmap, msr);
3996  type &= ~MSR_TYPE_W;
3997  }
3998 
3999  if (type & MSR_TYPE_R)
4000  vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4001 
4002  if (type & MSR_TYPE_W)
4003  vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4004 }
struct loaded_vmcs vmcs01
Definition: vmx.h:291
struct vcpu_vmx::@42 shadow_msr_intercept
static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
Definition: vmx.c:3944
static bool is_valid_passthrough_msr(u32 msr)
Definition: vmx.c:681
static int possible_passthrough_msr_slot(u32 msr)
Definition: vmx.c:670
#define MSR_TYPE_R
Definition: vmx.h:19
#define MSR_TYPE_W
Definition: vmx.h:20
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
Definition: x86.c:1796
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_emulation_required()

bool vmx_emulation_required ( struct kvm_vcpu *  vcpu)

Definition at line 1504 of file vmx.c.

1505 {
1507 }
static bool __read_mostly emulate_invalid_guest_state
Definition: vmx.c:101
static bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
Definition: vmx.h:735
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_enable_intercept_for_msr()

void vmx_enable_intercept_for_msr ( struct kvm_vcpu *  vcpu,
u32  msr,
int  type 
)

Definition at line 4006 of file vmx.c.

4007 {
4008  struct vcpu_vmx *vmx = to_vmx(vcpu);
4009  unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4010 
4011  if (!cpu_has_vmx_msr_bitmap())
4012  return;
4013 
4015 
4016  /*
4017  * Mark the desired intercept state in shadow bitmap, this is needed
4018  * for resync when the MSR filter changes.
4019  */
4020  if (is_valid_passthrough_msr(msr)) {
4021  int idx = possible_passthrough_msr_slot(msr);
4022 
4023  if (idx != -ENOENT) {
4024  if (type & MSR_TYPE_R)
4025  set_bit(idx, vmx->shadow_msr_intercept.read);
4026  if (type & MSR_TYPE_W)
4027  set_bit(idx, vmx->shadow_msr_intercept.write);
4028  }
4029  }
4030 
4031  if (type & MSR_TYPE_R)
4032  vmx_set_msr_bitmap_read(msr_bitmap, msr);
4033 
4034  if (type & MSR_TYPE_W)
4035  vmx_set_msr_bitmap_write(msr_bitmap, msr);
4036 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_ept_load_pdptrs()

void vmx_ept_load_pdptrs ( struct kvm_vcpu *  vcpu)

Definition at line 3231 of file vmx.c.

3232 {
3233  struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3234 
3235  if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3236  return;
3237 
3238  if (is_pae_paging(vcpu)) {
3239  vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3240  vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3241  vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3242  vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3243  }
3244 }
static bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static __always_inline void vmcs_write64(unsigned long field, u64 value)
Definition: vmx_ops.h:246
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_find_loadstore_msr_slot()

int vmx_find_loadstore_msr_slot ( struct vmx_msrs m,
u32  msr 
)

Definition at line 969 of file vmx.c.

970 {
971  unsigned int i;
972 
973  for (i = 0; i < m->nr; ++i) {
974  if (m->val[i].index == msr)
975  return i;
976  }
977  return -ENOENT;
978 }
unsigned int nr
Definition: vmx.h:34
Here is the caller graph for this function:

◆ vmx_find_uret_msr()

struct vmx_uret_msr* vmx_find_uret_msr ( struct vcpu_vmx vmx,
u32  msr 
)

Definition at line 713 of file vmx.c.

714 {
715  int i;
716 
717  i = kvm_find_user_return_msr(msr);
718  if (i >= 0)
719  return &vmx->guest_uret_msrs[i];
720  return NULL;
721 }
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]
Definition: vmx.h:276
int kvm_find_user_return_msr(u32 msr)
Definition: x86.c:416
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_cpl()

int vmx_get_cpl ( struct kvm_vcpu *  vcpu)

Definition at line 3543 of file vmx.c.

3544 {
3545  struct vcpu_vmx *vmx = to_vmx(vcpu);
3546 
3547  if (unlikely(vmx->rmode.vm86_active))
3548  return 0;
3549  else {
3550  int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3551  return VMX_AR_DPL(ar);
3552  }
3553 }
u32 ar
Definition: vmx.h:314
static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
Definition: vmx.c:865
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_exit_qual()

static __always_inline unsigned long vmx_get_exit_qual ( struct kvm_vcpu *  vcpu)
static

Definition at line 681 of file vmx.h.

682 {
683  struct vcpu_vmx *vmx = to_vmx(vcpu);
684 
685  if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1))
686  vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
687 
688  return vmx->exit_qualification;
689 }
static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
unsigned long exit_qualification
Definition: vmx.h:265
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_instr_info_reg2()

static int vmx_get_instr_info_reg2 ( u32  vmx_instr_info)
inlinestatic

Definition at line 742 of file vmx.h.

743 {
744  return (vmx_instr_info >> 28) & 0xf;
745 }
Here is the caller graph for this function:

◆ vmx_get_interrupt_shadow()

u32 vmx_get_interrupt_shadow ( struct kvm_vcpu *  vcpu)

Definition at line 1561 of file vmx.c.

1562 {
1563  u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1564  int ret = 0;
1565 
1566  if (interruptibility & GUEST_INTR_STATE_STI)
1567  ret |= KVM_X86_SHADOW_INT_STI;
1568  if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1569  ret |= KVM_X86_SHADOW_INT_MOV_SS;
1570 
1571  return ret;
1572 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_intr_info()

static __always_inline u32 vmx_get_intr_info ( struct kvm_vcpu *  vcpu)
static

Definition at line 691 of file vmx.h.

692 {
693  struct vcpu_vmx *vmx = to_vmx(vcpu);
694 
695  if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2))
696  vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
697 
698  return vmx->exit_intr_info;
699 }
u32 exit_intr_info
Definition: vmx.h:266
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_l2_tsc_multiplier()

u64 vmx_get_l2_tsc_multiplier ( struct kvm_vcpu *  vcpu)

Definition at line 1907 of file vmx.c.

1908 {
1909  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1910 
1911  if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
1912  nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
1913  return vmcs12->tsc_multiplier;
1914 
1916 }
static bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
Definition: nested.h:137
static bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
Definition: nested.h:132
Definition: x86.h:12
u64 default_tsc_scaling_ratio
Definition: x86.h:22
Definition: vmcs12.h:27
u64 tsc_multiplier
Definition: vmcs12.h:73
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_l2_tsc_offset()

u64 vmx_get_l2_tsc_offset ( struct kvm_vcpu *  vcpu)

Definition at line 1897 of file vmx.c.

1898 {
1899  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1900 
1901  if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
1902  return vmcs12->tsc_offset;
1903 
1904  return 0;
1905 }
u64 tsc_offset
Definition: vmcs12.h:43
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_nmi_mask()

bool vmx_get_nmi_mask ( struct kvm_vcpu *  vcpu)

Definition at line 4991 of file vmx.c.

4992 {
4993  struct vcpu_vmx *vmx = to_vmx(vcpu);
4994  bool masked;
4995 
4996  if (!enable_vnmi)
4997  return vmx->loaded_vmcs->soft_vnmi_blocked;
4998  if (vmx->loaded_vmcs->nmi_known_unmasked)
4999  return false;
5000  masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5001  vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5002  return masked;
5003 }
bool nmi_known_unmasked
Definition: vmcs.h:66
int soft_vnmi_blocked
Definition: vmcs.h:69
static bool __read_mostly enable_vnmi
Definition: vmx.c:85
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_rflags()

unsigned long vmx_get_rflags ( struct kvm_vcpu *  vcpu)

Definition at line 1509 of file vmx.c.

1510 {
1511  struct vcpu_vmx *vmx = to_vmx(vcpu);
1512  unsigned long rflags, save_rflags;
1513 
1514  if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1515  kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1516  rflags = vmcs_readl(GUEST_RFLAGS);
1517  if (vmx->rmode.vm86_active) {
1519  save_rflags = vmx->rmode.save_rflags;
1521  }
1522  vmx->rflags = rflags;
1523  }
1524  return vmx->rflags;
1525 }
static bool kvm_register_is_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
ulong rflags
Definition: vmx.h:268
ulong save_rflags
Definition: vmx.h:305
#define RMODE_GUEST_OWNED_EFLAGS_BITS
Definition: vmx.c:153
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_rvi()

static u8 vmx_get_rvi ( void  )
inlinestatic

Definition at line 466 of file vmx.h.

467 {
468  return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
469 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_segment()

void vmx_get_segment ( struct kvm_vcpu *  vcpu,
struct kvm_segment *  var,
int  seg 
)

Definition at line 3496 of file vmx.c.

3497 {
3498  struct vcpu_vmx *vmx = to_vmx(vcpu);
3499  u32 ar;
3500 
3501  if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3502  *var = vmx->rmode.segs[seg];
3503  if (seg == VCPU_SREG_TR
3504  || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3505  return;
3506  var->base = vmx_read_guest_seg_base(vmx, seg);
3507  var->selector = vmx_read_guest_seg_selector(vmx, seg);
3508  return;
3509  }
3510  var->base = vmx_read_guest_seg_base(vmx, seg);
3511  var->limit = vmx_read_guest_seg_limit(vmx, seg);
3512  var->selector = vmx_read_guest_seg_selector(vmx, seg);
3513  ar = vmx_read_guest_seg_ar(vmx, seg);
3514  var->unusable = (ar >> 16) & 1;
3515  var->type = ar & 15;
3516  var->s = (ar >> 4) & 1;
3517  var->dpl = (ar >> 5) & 3;
3518  /*
3519  * Some userspaces do not preserve unusable property. Since usable
3520  * segment has to be present according to VMX spec we can use present
3521  * property to amend userspace bug by making unusable segment always
3522  * nonpresent. vmx_segment_access_rights() already marks nonpresent
3523  * segment as unusable.
3524  */
3525  var->present = !var->unusable;
3526  var->avl = (ar >> 12) & 1;
3527  var->l = (ar >> 13) & 1;
3528  var->db = (ar >> 14) & 1;
3529  var->g = (ar >> 15) & 1;
3530 }
struct vcpu_vmx::@41::kvm_save_segment seg[8]
static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
Definition: vmx.c:847
static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
Definition: vmx.c:856
static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
Definition: vmx.c:838
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_untagged_addr()

gva_t vmx_get_untagged_addr ( struct kvm_vcpu *  vcpu,
gva_t  gva,
unsigned int  flags 
)

Definition at line 8250 of file vmx.c.

8251 {
8252  int lam_bit;
8253  unsigned long cr3_bits;
8254 
8256  return gva;
8257 
8258  if (!is_64_bit_mode(vcpu))
8259  return gva;
8260 
8261  /*
8262  * Bit 63 determines if the address should be treated as user address
8263  * or a supervisor address.
8264  */
8265  if (!(gva & BIT_ULL(63))) {
8266  cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
8267  if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
8268  return gva;
8269 
8270  /* LAM_U48 is ignored if LAM_U57 is set. */
8271  lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
8272  } else {
8273  if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
8274  return gva;
8275 
8276  lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
8277  }
8278 
8279  /*
8280  * Untag the address by sign-extending the lam_bit, but NOT to bit 63.
8281  * Bit 63 is retained from the raw virtual address so that untagging
8282  * doesn't change a user access to a supervisor access, and vice versa.
8283  */
8284  return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
8285 }
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
#define X86EMUL_F_INVLPG
Definition: kvm_emulate.h:95
#define X86EMUL_F_IMPLICIT
Definition: kvm_emulate.h:94
#define X86EMUL_F_FETCH
Definition: kvm_emulate.h:93
static unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
Definition: mmu.h:149
static bool is_64_bit_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:152
Here is the caller graph for this function:

◆ vmx_guest_inject_ac()

bool vmx_guest_inject_ac ( struct kvm_vcpu *  vcpu)

Definition at line 5174 of file vmx.c.

5175 {
5176  if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5177  return true;
5178 
5179  return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
5180  (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5181 }
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
int vmx_get_cpl(struct kvm_vcpu *vcpu)
Definition: vmx.c:3543
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
Definition: x86.c:13170
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_guest_state_valid()

static bool vmx_guest_state_valid ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 735 of file vmx.h.

736 {
737  return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
738 }
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
Definition: vmx.c:3796
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_has_waitpkg()

static bool vmx_has_waitpkg ( struct vcpu_vmx vmx)
inlinestatic

Definition at line 713 of file vmx.h.

714 {
715  return secondary_exec_controls_get(vmx) &
716  SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
717 }
Here is the caller graph for this function:

◆ vmx_interrupt_blocked()

bool vmx_interrupt_blocked ( struct kvm_vcpu *  vcpu)

Definition at line 5050 of file vmx.c.

5051 {
5052  if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5053  return false;
5054 
5055  return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5056  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5057  (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5058 }
static bool nested_exit_on_intr(struct vcpu_svm *svm)
Definition: svm.h:581
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_l1_guest_owned_cr0_bits()

static unsigned long vmx_l1_guest_owned_cr0_bits ( void  )
inlinestatic

Definition at line 634 of file vmx.h.

635 {
636  unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
637 
638  /*
639  * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
640  * in order to construct shadow PTEs with the correct protections.
641  * Note! CR0.WP technically can be passed through to the guest if
642  * paging is disabled, but checking CR0.PG would generate a cyclical
643  * dependency of sorts due to forcing the caller to ensure CR0 holds
644  * the correct value prior to determining which CR0 bits can be owned
645  * by L1. Keep it simple and limit the optimization to EPT.
646  */
647  if (!enable_ept)
648  bits &= ~X86_CR0_WP;
649  return bits;
650 }
bool __read_mostly enable_ept
Definition: vmx.c:91
#define KVM_POSSIBLE_CR0_GUEST_BITS
Definition: kvm_cache_regs.h:7
Here is the caller graph for this function:

◆ vmx_need_pf_intercept()

static bool vmx_need_pf_intercept ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 719 of file vmx.h.

720 {
721  if (!enable_ept)
722  return true;
723 
724  return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
725 }
static int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
Definition: cpuid.h:40
bool __read_mostly allow_smaller_maxphyaddr
Definition: x86.c:232
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_nmi_blocked()

bool vmx_nmi_blocked ( struct kvm_vcpu *  vcpu)

Definition at line 5025 of file vmx.c.

5026 {
5027  if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5028  return false;
5029 
5031  return true;
5032 
5033  return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5034  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5035  GUEST_INTR_STATE_NMI));
5036 }
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
Definition: svm.h:586
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_passthrough_lbr_msrs()

void vmx_passthrough_lbr_msrs ( struct kvm_vcpu *  vcpu)

Definition at line 713 of file pmu_intel.c.

714 {
715  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
716  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
717 
718  if (!lbr_desc->event) {
720  if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
721  goto warn;
722  if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
723  goto warn;
724  return;
725  }
726 
727  if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
729  __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
730  goto warn;
731  } else
733 
734  return;
735 
736 warn:
737  pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
738 }
static void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:692
static void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:681
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_prepare_switch_to_guest()

void vmx_prepare_switch_to_guest ( struct kvm_vcpu *  vcpu)

Definition at line 1282 of file vmx.c.

1283 {
1284  struct vcpu_vmx *vmx = to_vmx(vcpu);
1285  struct vmcs_host_state *host_state;
1286 #ifdef CONFIG_X86_64
1287  int cpu = raw_smp_processor_id();
1288 #endif
1289  unsigned long fs_base, gs_base;
1290  u16 fs_sel, gs_sel;
1291  int i;
1292 
1293  vmx->req_immediate_exit = false;
1294 
1295  /*
1296  * Note that guest MSRs to be saved/restored can also be changed
1297  * when guest state is loaded. This happens when guest transitions
1298  * to/from long-mode by setting MSR_EFER.LMA.
1299  */
1300  if (!vmx->guest_uret_msrs_loaded) {
1301  vmx->guest_uret_msrs_loaded = true;
1302  for (i = 0; i < kvm_nr_uret_msrs; ++i) {
1303  if (!vmx->guest_uret_msrs[i].load_into_hardware)
1304  continue;
1305 
1307  vmx->guest_uret_msrs[i].data,
1308  vmx->guest_uret_msrs[i].mask);
1309  }
1310  }
1311 
1314 
1315  if (vmx->guest_state_loaded)
1316  return;
1317 
1318  host_state = &vmx->loaded_vmcs->host_state;
1319 
1320  /*
1321  * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1322  * allow segment selectors with cpl > 0 or ti == 1.
1323  */
1324  host_state->ldt_sel = kvm_read_ldt();
1325 
1326 #ifdef CONFIG_X86_64
1327  savesegment(ds, host_state->ds_sel);
1328  savesegment(es, host_state->es_sel);
1329 
1330  gs_base = cpu_kernelmode_gs_base(cpu);
1331  if (likely(is_64bit_mm(current->mm))) {
1332  current_save_fsgs();
1333  fs_sel = current->thread.fsindex;
1334  gs_sel = current->thread.gsindex;
1335  fs_base = current->thread.fsbase;
1336  vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1337  } else {
1338  savesegment(fs, fs_sel);
1339  savesegment(gs, gs_sel);
1340  fs_base = read_msr(MSR_FS_BASE);
1341  vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1342  }
1343 
1344  wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1345 #else
1346  savesegment(fs, fs_sel);
1347  savesegment(gs, gs_sel);
1348  fs_base = segment_base(fs_sel);
1349  gs_base = segment_base(gs_sel);
1350 #endif
1351 
1353  vmx->guest_state_loaded = true;
1354 }
bool need_vmcs12_to_shadow_sync
Definition: vmx.h:151
struct nested_vmx nested
Definition: vmx.h:329
bool guest_uret_msrs_loaded
Definition: vmx.h:277
bool req_immediate_exit
Definition: vmx.h:335
bool guest_state_loaded
Definition: vmx.h:263
u16 fs_sel
Definition: vmcs.h:41
unsigned long fs_base
Definition: vmcs.h:38
u16 ldt_sel
Definition: vmcs.h:41
unsigned long gs_base
Definition: vmcs.h:37
u16 gs_sel
Definition: vmcs.h:41
u64 mask
Definition: vmx.h:41
bool load_into_hardware
Definition: vmx.h:39
u64 data
Definition: vmx.h:40
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
Definition: nested.c:2124
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
Definition: vmx.c:1255
u32 __read_mostly kvm_nr_uret_msrs
Definition: x86.c:219
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
Definition: x86.c:442
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_constant_host_state()

void vmx_set_constant_host_state ( struct vcpu_vmx vmx)

Definition at line 4296 of file vmx.c.

4297 {
4298  u32 low32, high32;
4299  unsigned long tmpl;
4300  unsigned long cr0, cr3, cr4;
4301 
4302  cr0 = read_cr0();
4303  WARN_ON(cr0 & X86_CR0_TS);
4304  vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
4305 
4306  /*
4307  * Save the most likely value for this task's CR3 in the VMCS.
4308  * We can't use __get_current_cr3_fast() because we're not atomic.
4309  */
4310  cr3 = __read_cr3();
4311  vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
4312  vmx->loaded_vmcs->host_state.cr3 = cr3;
4313 
4314  /* Save the most likely value for this task's CR4 in the VMCS. */
4315  cr4 = cr4_read_shadow();
4316  vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
4317  vmx->loaded_vmcs->host_state.cr4 = cr4;
4318 
4319  vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
4320 #ifdef CONFIG_X86_64
4321  /*
4322  * Load null selectors, so we can avoid reloading them in
4323  * vmx_prepare_switch_to_host(), in case userspace uses
4324  * the null selectors too (the expected case).
4325  */
4326  vmcs_write16(HOST_DS_SELECTOR, 0);
4327  vmcs_write16(HOST_ES_SELECTOR, 0);
4328 #else
4329  vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4330  vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4331 #endif
4332  vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
4333  vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
4334 
4335  vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */
4336 
4337  vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4338 
4339  rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4340  vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4341 
4342  /*
4343  * SYSENTER is used for 32-bit system calls on either 32-bit or
4344  * 64-bit kernels. It is always zero If neither is allowed, otherwise
4345  * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4346  * have already done so!).
4347  */
4348  if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4349  vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
4350 
4351  rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4352  vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
4353 
4354  if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4355  rdmsr(MSR_IA32_CR_PAT, low32, high32);
4356  vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4357  }
4358 
4359  if (cpu_has_load_ia32_efer())
4360  vmcs_write64(HOST_IA32_EFER, host_efer);
4361 }
static bool cpu_has_load_ia32_efer(void)
Definition: capabilities.h:99
unsigned long cr3
Definition: vmcs.h:35
unsigned long cr4
Definition: vmcs.h:36
void vmx_vmexit(void)
static unsigned long host_idt_base
Definition: vmx.c:525
u64 __read_mostly host_efer
Definition: x86.c:229
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_cr0()

void vmx_set_cr0 ( struct kvm_vcpu *  vcpu,
unsigned long  cr0 
)

Definition at line 3275 of file vmx.c.

3276 {
3277  struct vcpu_vmx *vmx = to_vmx(vcpu);
3278  unsigned long hw_cr0, old_cr0_pg;
3279  u32 tmp;
3280 
3281  old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3282 
3283  hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3286  else {
3287  hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3288  if (!enable_ept)
3289  hw_cr0 |= X86_CR0_WP;
3290 
3291  if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3292  enter_pmode(vcpu);
3293 
3294  if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3295  enter_rmode(vcpu);
3296  }
3297 
3298  vmcs_writel(CR0_READ_SHADOW, cr0);
3299  vmcs_writel(GUEST_CR0, hw_cr0);
3300  vcpu->arch.cr0 = cr0;
3301  kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3302 
3303 #ifdef CONFIG_X86_64
3304  if (vcpu->arch.efer & EFER_LME) {
3305  if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3306  enter_lmode(vcpu);
3307  else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3308  exit_lmode(vcpu);
3309  }
3310 #endif
3311 
3313  /*
3314  * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
3315  * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3316  * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3317  * KVM's CR3 is installed.
3318  */
3319  if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3320  vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3321 
3322  /*
3323  * When running with EPT but not unrestricted guest, KVM must
3324  * intercept CR3 accesses when paging is _disabled_. This is
3325  * necessary because restricted guests can't actually run with
3326  * paging disabled, and so KVM stuffs its own CR3 in order to
3327  * run the guest when identity mapped page tables.
3328  *
3329  * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3330  * update, it may be stale with respect to CR3 interception,
3331  * e.g. after nested VM-Enter.
3332  *
3333  * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3334  * stores to forward them to L1, even if KVM does not need to
3335  * intercept them to preserve its identity mapped page tables.
3336  */
3337  if (!(cr0 & X86_CR0_PG)) {
3338  exec_controls_setbit(vmx, CR3_EXITING_BITS);
3339  } else if (!is_guest_mode(vcpu)) {
3340  exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3341  } else {
3342  tmp = exec_controls_get(vmx);
3343  tmp &= ~CR3_EXITING_BITS;
3345  exec_controls_set(vmx, tmp);
3346  }
3347 
3348  /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3349  if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3351 
3352  /*
3353  * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3354  * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3355  */
3356  if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3357  kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3358  }
3359 
3360  /* depends on vcpu->arch.cr0 to be set to a new value */
3362 }
static void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
bool emulation_required
Definition: vmx.h:318
u32 cpu_based_vm_exec_control
Definition: vmcs12.h:122
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
Definition: vmx.c:2467
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
Definition: vmx.c:1504
static void enter_pmode(struct kvm_vcpu *vcpu)
Definition: vmx.c:2997
#define CR3_EXITING_BITS
Definition: vmx.c:3261
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST
Definition: vmx.c:145
#define KVM_VM_CR0_ALWAYS_OFF
Definition: vmx.c:144
static void enter_rmode(struct kvm_vcpu *vcpu)
Definition: vmx.c:3067
bool __read_mostly enable_unrestricted_guest
Definition: vmx.c:94
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Definition: vmx.c:3432
#define KVM_VM_CR0_ALWAYS_ON
Definition: vmx.c:146
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_cr4()

void vmx_set_cr4 ( struct kvm_vcpu *  vcpu,
unsigned long  cr4 
)

Definition at line 3432 of file vmx.c.

3433 {
3434  unsigned long old_cr4 = kvm_read_cr4(vcpu);
3435  struct vcpu_vmx *vmx = to_vmx(vcpu);
3436  unsigned long hw_cr4;
3437 
3438  /*
3439  * Pass through host's Machine Check Enable value to hw_cr4, which
3440  * is in force while we are in guest mode. Do not let guests control
3441  * this bit, even if host CR4.MCE == 0.
3442  */
3443  hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3446  else if (vmx->rmode.vm86_active)
3447  hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3448  else
3449  hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3450 
3451  if (vmx_umip_emulated()) {
3452  if (cr4 & X86_CR4_UMIP) {
3453  secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3454  hw_cr4 &= ~X86_CR4_UMIP;
3455  } else if (!is_guest_mode(vcpu) ||
3456  !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3457  secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3458  }
3459  }
3460 
3461  vcpu->arch.cr4 = cr4;
3462  kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3463 
3465  if (enable_ept) {
3466  if (!is_paging(vcpu)) {
3467  hw_cr4 &= ~X86_CR4_PAE;
3468  hw_cr4 |= X86_CR4_PSE;
3469  } else if (!(cr4 & X86_CR4_PAE)) {
3470  hw_cr4 &= ~X86_CR4_PAE;
3471  }
3472  }
3473 
3474  /*
3475  * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3476  * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
3477  * to be manually disabled when guest switches to non-paging
3478  * mode.
3479  *
3480  * If !enable_unrestricted_guest, the CPU is always running
3481  * with CR0.PG=1 and CR4 needs to be modified.
3482  * If enable_unrestricted_guest, the CPU automatically
3483  * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3484  */
3485  if (!is_paging(vcpu))
3486  hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3487  }
3488 
3489  vmcs_writel(CR4_READ_SHADOW, cr4);
3490  vmcs_writel(GUEST_CR4, hw_cr4);
3491 
3492  if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3494 }
static bool vmx_umip_emulated(void)
Definition: capabilities.h:153
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
Definition: cpuid.c:309
#define KVM_RMODE_VM_CR4_ALWAYS_ON
Definition: vmx.c:151
#define KVM_PMODE_VM_CR4_ALWAYS_ON
Definition: vmx.c:150
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST
Definition: vmx.c:149
static bool is_paging(struct kvm_vcpu *vcpu)
Definition: x86.h:198
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_efer()

int vmx_set_efer ( struct kvm_vcpu *  vcpu,
u64  efer 
)

Definition at line 3115 of file vmx.c.

3116 {
3117  struct vcpu_vmx *vmx = to_vmx(vcpu);
3118 
3119  /* Nothing to do if hardware doesn't support EFER. */
3120  if (!vmx_find_uret_msr(vmx, MSR_EFER))
3121  return 0;
3122 
3123  vcpu->arch.efer = efer;
3124 #ifdef CONFIG_X86_64
3125  if (efer & EFER_LMA)
3126  vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3127  else
3128  vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3129 #else
3130  if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3131  return 1;
3132 #endif
3133 
3134  vmx_setup_uret_msrs(vmx);
3135  return 0;
3136 }
static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
Definition: vmx.c:1860
struct vmx_uret_msr * vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
Definition: vmx.c:713
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_host_fs_gs()

void vmx_set_host_fs_gs ( struct vmcs_host_state host,
u16  fs_sel,
u16  gs_sel,
unsigned long  fs_base,
unsigned long  gs_base 
)

Definition at line 1255 of file vmx.c.

1257 {
1258  if (unlikely(fs_sel != host->fs_sel)) {
1259  if (!(fs_sel & 7))
1260  vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1261  else
1262  vmcs_write16(HOST_FS_SELECTOR, 0);
1263  host->fs_sel = fs_sel;
1264  }
1265  if (unlikely(gs_sel != host->gs_sel)) {
1266  if (!(gs_sel & 7))
1267  vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1268  else
1269  vmcs_write16(HOST_GS_SELECTOR, 0);
1270  host->gs_sel = gs_sel;
1271  }
1272  if (unlikely(fs_base != host->fs_base)) {
1273  vmcs_writel(HOST_FS_BASE, fs_base);
1274  host->fs_base = fs_base;
1275  }
1276  if (unlikely(gs_base != host->gs_base)) {
1277  vmcs_writel(HOST_GS_BASE, gs_base);
1278  host->gs_base = gs_base;
1279  }
1280 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_intercept_for_msr()

static void vmx_set_intercept_for_msr ( struct kvm_vcpu *  vcpu,
u32  msr,
int  type,
bool  value 
)
inlinestatic

Definition at line 427 of file vmx.h.

429 {
430  if (value)
431  vmx_enable_intercept_for_msr(vcpu, msr, type);
432  else
433  vmx_disable_intercept_for_msr(vcpu, msr, type);
434 }
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
Definition: vmx.c:4006
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
Definition: vmx.c:3962
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_interrupt_shadow()

void vmx_set_interrupt_shadow ( struct kvm_vcpu *  vcpu,
int  mask 
)

Definition at line 1574 of file vmx.c.

1575 {
1576  u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1577  u32 interruptibility = interruptibility_old;
1578 
1579  interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1580 
1581  if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1582  interruptibility |= GUEST_INTR_STATE_MOV_SS;
1583  else if (mask & KVM_X86_SHADOW_INT_STI)
1584  interruptibility |= GUEST_INTR_STATE_STI;
1585 
1586  if ((interruptibility != interruptibility_old))
1587  vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1588 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_nmi_mask()

void vmx_set_nmi_mask ( struct kvm_vcpu *  vcpu,
bool  masked 
)

Definition at line 5005 of file vmx.c.

5006 {
5007  struct vcpu_vmx *vmx = to_vmx(vcpu);
5008 
5009  if (!enable_vnmi) {
5010  if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5011  vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5012  vmx->loaded_vmcs->vnmi_blocked_time = 0;
5013  }
5014  } else {
5015  vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5016  if (masked)
5017  vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5018  GUEST_INTR_STATE_NMI);
5019  else
5020  vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5021  GUEST_INTR_STATE_NMI);
5022  }
5023 }
s64 vnmi_blocked_time
Definition: vmcs.h:71
static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
Definition: vmx_ops.h:277
static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
Definition: vmx_ops.h:267
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_rflags()

void vmx_set_rflags ( struct kvm_vcpu *  vcpu,
unsigned long  rflags 
)

Definition at line 1527 of file vmx.c.

1528 {
1529  struct vcpu_vmx *vmx = to_vmx(vcpu);
1530  unsigned long old_rflags;
1531 
1532  /*
1533  * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
1534  * is an unrestricted guest in order to mark L2 as needing emulation
1535  * if L1 runs L2 as a restricted guest.
1536  */
1537  if (is_unrestricted_guest(vcpu)) {
1538  kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1539  vmx->rflags = rflags;
1540  vmcs_writel(GUEST_RFLAGS, rflags);
1541  return;
1542  }
1543 
1544  old_rflags = vmx_get_rflags(vcpu);
1545  vmx->rflags = rflags;
1546  if (vmx->rmode.vm86_active) {
1547  vmx->rmode.save_rflags = rflags;
1548  rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1549  }
1550  vmcs_writel(GUEST_RFLAGS, rflags);
1551 
1552  if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1554 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_virtual_apic_mode()

void vmx_set_virtual_apic_mode ( struct kvm_vcpu *  vcpu)

Definition at line 6694 of file vmx.c.

6695 {
6696  struct vcpu_vmx *vmx = to_vmx(vcpu);
6697  u32 sec_exec_control;
6698 
6699  if (!lapic_in_kernel(vcpu))
6700  return;
6701 
6702  if (!flexpriority_enabled &&
6704  return;
6705 
6706  /* Postpone execution until vmcs01 is the current VMCS. */
6707  if (is_guest_mode(vcpu)) {
6709  return;
6710  }
6711 
6712  sec_exec_control = secondary_exec_controls_get(vmx);
6713  sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6714  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6715 
6716  switch (kvm_get_apic_mode(vcpu)) {
6717  case LAPIC_MODE_INVALID:
6718  WARN_ONCE(true, "Invalid local APIC state");
6719  break;
6720  case LAPIC_MODE_DISABLED:
6721  break;
6722  case LAPIC_MODE_XAPIC:
6723  if (flexpriority_enabled) {
6724  sec_exec_control |=
6725  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6726  kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6727 
6728  /*
6729  * Flush the TLB, reloading the APIC access page will
6730  * only do so if its physical address has changed, but
6731  * the guest may have inserted a non-APIC mapping into
6732  * the TLB while the APIC access page was disabled.
6733  */
6734  kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6735  }
6736  break;
6737  case LAPIC_MODE_X2APIC:
6739  sec_exec_control |=
6740  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6741  break;
6742  }
6743  secondary_exec_controls_set(vmx, sec_exec_control);
6744 
6746 }
static bool cpu_has_vmx_virtualize_x2apic_mode(void)
Definition: capabilities.h:165
@ LAPIC_MODE_X2APIC
Definition: lapic.h:29
@ LAPIC_MODE_DISABLED
Definition: lapic.h:26
@ LAPIC_MODE_XAPIC
Definition: lapic.h:28
@ LAPIC_MODE_INVALID
Definition: lapic.h:27
enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
Definition: x86.c:479
bool change_vmcs01_virtual_apic_mode
Definition: vmx.h:176
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
Definition: vmx.c:4038
bool __read_mostly flexpriority_enabled
Definition: vmx.c:88
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_spec_ctrl_restore_host()

void vmx_spec_ctrl_restore_host ( struct vcpu_vmx vmx,
unsigned int  flags 
)

Definition at line 7192 of file vmx.c.

7194 {
7195  u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7196 
7197  if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7198  return;
7199 
7201  vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7202 
7203  /*
7204  * If the guest/host SPEC_CTRL values differ, restore the host value.
7205  *
7206  * For legacy IBRS, the IBRS bit always needs to be written after
7207  * transitioning from a less privileged predictor mode, regardless of
7208  * whether the guest/host values differ.
7209  */
7210  if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7211  vmx->spec_ctrl != hostval)
7212  native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
7213 
7214  barrier_nospec();
7215 }
u64 spec_ctrl
Definition: vmx.h:283

◆ vmx_update_cpu_dirty_logging()

void vmx_update_cpu_dirty_logging ( struct kvm_vcpu *  vcpu)

Definition at line 8110 of file vmx.c.

8111 {
8112  struct vcpu_vmx *vmx = to_vmx(vcpu);
8113 
8114  if (WARN_ON_ONCE(!enable_pml))
8115  return;
8116 
8117  if (is_guest_mode(vcpu)) {
8119  return;
8120  }
8121 
8122  /*
8123  * Note, nr_memslots_dirty_logging can be changed concurrent with this
8124  * code, but in that case another update request will be made and so
8125  * the guest will never run with a stale PML value.
8126  */
8127  if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8128  secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8129  else
8130  secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8131 }
bool update_vmcs01_cpu_dirty_logging
Definition: vmx.h:178
bool __read_mostly enable_pml
Definition: vmx.c:120
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_update_exception_bitmap()

void vmx_update_exception_bitmap ( struct kvm_vcpu *  vcpu)

Definition at line 874 of file vmx.c.

875 {
876  u32 eb;
877 
878  eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
879  (1u << DB_VECTOR) | (1u << AC_VECTOR);
880  /*
881  * Guest access to VMware backdoor ports could legitimately
882  * trigger #GP because of TSS I/O permission bitmap.
883  * We intercept those #GP and allow access to them anyway
884  * as VMware does.
885  */
887  eb |= (1u << GP_VECTOR);
888  if ((vcpu->guest_debug &
889  (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
890  (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
891  eb |= 1u << BP_VECTOR;
892  if (to_vmx(vcpu)->rmode.vm86_active)
893  eb = ~0;
894  if (!vmx_need_pf_intercept(vcpu))
895  eb &= ~(1u << PF_VECTOR);
896 
897  /* When we are running a nested L2 guest and L1 specified for it a
898  * certain exception bitmap, we must trap the same exceptions and pass
899  * them to L1. When running L2, we will only handle the exceptions
900  * specified above if L1 did not want them.
901  */
902  if (is_guest_mode(vcpu))
903  eb |= get_vmcs12(vcpu)->exception_bitmap;
904  else {
905  int mask = 0, match = 0;
906 
907  if (enable_ept && (eb & (1u << PF_VECTOR))) {
908  /*
909  * If EPT is enabled, #PF is currently only intercepted
910  * if MAXPHYADDR is smaller on the guest than on the
911  * host. In that case we only care about present,
912  * non-reserved faults. For vmcs02, however, PFEC_MASK
913  * and PFEC_MATCH are set in prepare_vmcs02_rare.
914  */
915  mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
916  match = PFERR_PRESENT_MASK;
917  }
918  vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
919  vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
920  }
921 
922  /*
923  * Disabling xfd interception indicates that dynamic xfeatures
924  * might be used in the guest. Always trap #NM in this case
925  * to save guest xfd_err timely.
926  */
927  if (vcpu->arch.xfd_no_write_intercept)
928  eb |= (1u << NM_VECTOR);
929 
930  vmcs_write32(EXCEPTION_BITMAP, eb);
931 }
u32 exception_bitmap
Definition: vmcs12.h:123
static bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
Definition: vmx.h:719
bool __read_mostly enable_vmware_backdoor
Definition: x86.c:176
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_update_host_rsp()

void vmx_update_host_rsp ( struct vcpu_vmx vmx,
unsigned long  host_rsp 
)

Definition at line 7184 of file vmx.c.

7185 {
7186  if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7187  vmx->loaded_vmcs->host_state.rsp = host_rsp;
7188  vmcs_writel(HOST_RSP, host_rsp);
7189  }
7190 }
unsigned long rsp
Definition: vmcs.h:39
Here is the call graph for this function:

◆ vmx_vcpu_load_vmcs()

void vmx_vcpu_load_vmcs ( struct kvm_vcpu *  vcpu,
int  cpu,
struct loaded_vmcs buddy 
)

Definition at line 1415 of file vmx.c.

1417 {
1418  struct vcpu_vmx *vmx = to_vmx(vcpu);
1419  bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1420  struct vmcs *prev;
1421 
1422  if (!already_loaded) {
1424  local_irq_disable();
1425 
1426  /*
1427  * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1428  * this cpu's percpu list, otherwise it may not yet be deleted
1429  * from its previous cpu's percpu list. Pairs with the
1430  * smb_wmb() in __loaded_vmcs_clear().
1431  */
1432  smp_rmb();
1433 
1434  list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1435  &per_cpu(loaded_vmcss_on_cpu, cpu));
1436  local_irq_enable();
1437  }
1438 
1439  prev = per_cpu(current_vmcs, cpu);
1440  if (prev != vmx->loaded_vmcs->vmcs) {
1441  per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1442  vmcs_load(vmx->loaded_vmcs->vmcs);
1443 
1444  /*
1445  * No indirect branch prediction barrier needed when switching
1446  * the active VMCS within a vCPU, unless IBRS is advertised to
1447  * the vCPU. To minimize the number of IBPBs executed, KVM
1448  * performs IBPB on nested VM-Exit (a single nested transition
1449  * may switch the active VMCS multiple times).
1450  */
1451  if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
1452  indirect_branch_prediction_barrier();
1453  }
1454 
1455  if (!already_loaded) {
1456  void *gdt = get_current_gdt_ro();
1457 
1458  /*
1459  * Flush all EPTP/VPID contexts, the new pCPU may have stale
1460  * TLB entries from its previous association with the vCPU.
1461  */
1462  kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1463 
1464  /*
1465  * Linux uses per-cpu TSS and GDT, so set these when switching
1466  * processors. See 22.2.4.
1467  */
1468  vmcs_writel(HOST_TR_BASE,
1469  (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1470  vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
1471 
1472  if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1473  /* 22.2.3 */
1474  vmcs_writel(HOST_IA32_SYSENTER_ESP,
1475  (unsigned long)(cpu_entry_stack(cpu) + 1));
1476  }
1477 
1478  vmx->loaded_vmcs->cpu = cpu;
1479  }
1480 }
struct list_head loaded_vmcss_on_cpu_link
Definition: vmcs.h:73
static void vmcs_load(struct vmcs *vmcs)
Definition: vmx_ops.h:294
Here is the call graph for this function:
Here is the caller graph for this function: