KVM
Classes | Macros | Enumerations | Functions | Variables
nested.c File Reference
#include <linux/objtool.h>
#include <linux/percpu.h>
#include <asm/debugreg.h>
#include <asm/mmu_context.h>
#include "cpuid.h"
#include "hyperv.h"
#include "mmu.h"
#include "nested.h"
#include "pmu.h"
#include "sgx.h"
#include "trace.h"
#include "vmx.h"
#include "x86.h"
#include "smm.h"
#include "vmcs_shadow_fields.h"
Include dependency graph for nested.c:

Go to the source code of this file.

Classes

struct  shadow_vmcs_field
 

Macros

#define pr_fmt(fmt)   KBUILD_MODNAME ": " fmt
 
#define CC   KVM_NESTED_VMENTER_CONSISTENCY_CHECK
 
#define VMX_VPID_EXTENT_SUPPORTED_MASK
 
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE   5
 
#define vmx_vmread_bitmap   (vmx_bitmap[VMX_VMREAD_BITMAP])
 
#define vmx_vmwrite_bitmap   (vmx_bitmap[VMX_VMWRITE_BITMAP])
 
#define SHADOW_FIELD_RO(x, y)   { x, offsetof(struct vmcs12, y) },
 
#define SHADOW_FIELD_RW(x, y)   { x, offsetof(struct vmcs12, y) },
 
#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
 
#define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw)
 
#define SHADOW_FIELD_RW(x, y)   case x:
 
#define SHADOW_FIELD_RO(x, y)   case x:
 
#define VMCS12_IDX_TO_ENC(idx)   ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
 
#define VMXON_CR0_ALWAYSON   (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
 
#define VMXON_CR4_ALWAYSON   X86_CR4_VMXE
 

Enumerations

enum  { VMX_VMREAD_BITMAP , VMX_VMWRITE_BITMAP , VMX_BITMAP_NR }
 

Functions

 module_param_named (enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO)
 
 module_param (nested_early_check, bool, S_IRUGO)
 
static void init_vmcs_shadow_fields (void)
 
static int nested_vmx_succeed (struct kvm_vcpu *vcpu)
 
static int nested_vmx_failInvalid (struct kvm_vcpu *vcpu)
 
static int nested_vmx_failValid (struct kvm_vcpu *vcpu, u32 vm_instruction_error)
 
static int nested_vmx_fail (struct kvm_vcpu *vcpu, u32 vm_instruction_error)
 
static void nested_vmx_abort (struct kvm_vcpu *vcpu, u32 indicator)
 
static bool vmx_control_verify (u32 control, u32 low, u32 high)
 
static u64 vmx_control_msr (u32 low, u32 high)
 
static void vmx_disable_shadow_vmcs (struct vcpu_vmx *vmx)
 
static void nested_release_evmcs (struct kvm_vcpu *vcpu)
 
static bool nested_evmcs_handle_vmclear (struct kvm_vcpu *vcpu, gpa_t vmptr)
 
static void vmx_sync_vmcs_host_state (struct vcpu_vmx *vmx, struct loaded_vmcs *prev)
 
static void vmx_switch_vmcs (struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
 
static void free_nested (struct kvm_vcpu *vcpu)
 
void nested_vmx_free_vcpu (struct kvm_vcpu *vcpu)
 
static bool nested_ept_root_matches (hpa_t root_hpa, u64 root_eptp, u64 eptp)
 
static void nested_ept_invalidate_addr (struct kvm_vcpu *vcpu, gpa_t eptp, gpa_t addr)
 
static void nested_ept_inject_page_fault (struct kvm_vcpu *vcpu, struct x86_exception *fault)
 
static void nested_ept_new_eptp (struct kvm_vcpu *vcpu)
 
static void nested_ept_init_mmu_context (struct kvm_vcpu *vcpu)
 
static void nested_ept_uninit_mmu_context (struct kvm_vcpu *vcpu)
 
static bool nested_vmx_is_page_fault_vmexit (struct vmcs12 *vmcs12, u16 error_code)
 
static bool nested_vmx_is_exception_vmexit (struct kvm_vcpu *vcpu, u8 vector, u32 error_code)
 
static int nested_vmx_check_io_bitmap_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_msr_bitmap_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_tpr_shadow_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void nested_vmx_disable_intercept_for_x2apic_msr (unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int type)
 
static void enable_x2apic_msr_intercepts (unsigned long *msr_bitmap)
 
static void nested_vmx_set_intercept_for_msr (struct vcpu_vmx *vmx, unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int types)
 
static bool nested_vmx_prepare_msr_bitmap (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void nested_cache_shadow_vmcs12 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void nested_flush_cached_shadow_vmcs12 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static bool nested_exit_intr_ack_set (struct kvm_vcpu *vcpu)
 
static int nested_vmx_check_apic_access_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_apicv_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_msr_switch (struct kvm_vcpu *vcpu, u32 count, u64 addr)
 
static int nested_vmx_check_exit_msr_switch_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_entry_msr_switch_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_pml_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_unrestricted_guest_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_mode_based_ept_exec_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_shadow_vmcs_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_msr_check_common (struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
 
static int nested_vmx_load_msr_check (struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
 
static int nested_vmx_store_msr_check (struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
 
static u32 nested_vmx_max_atomic_switch_msrs (struct kvm_vcpu *vcpu)
 
static u32 nested_vmx_load_msr (struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 
static bool nested_vmx_get_vmexit_msr_value (struct kvm_vcpu *vcpu, u32 msr_index, u64 *data)
 
static bool read_and_check_msr_entry (struct kvm_vcpu *vcpu, u64 gpa, int i, struct vmx_msr_entry *e)
 
static int nested_vmx_store_msr (struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 
static bool nested_msr_store_list_has_msr (struct kvm_vcpu *vcpu, u32 msr_index)
 
static void prepare_vmx_msr_autostore_list (struct kvm_vcpu *vcpu, u32 msr_index)
 
static int nested_vmx_load_cr3 (struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code)
 
static bool nested_has_guest_tlb_tag (struct kvm_vcpu *vcpu)
 
static void nested_vmx_transition_tlb_flush (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool is_vmenter)
 
static bool is_bitwise_subset (u64 superset, u64 subset, u64 mask)
 
static int vmx_restore_vmx_basic (struct vcpu_vmx *vmx, u64 data)
 
static void vmx_get_control_msr (struct nested_vmx_msrs *msrs, u32 msr_index, u32 **low, u32 **high)
 
static int vmx_restore_control_msr (struct vcpu_vmx *vmx, u32 msr_index, u64 data)
 
static int vmx_restore_vmx_misc (struct vcpu_vmx *vmx, u64 data)
 
static int vmx_restore_vmx_ept_vpid_cap (struct vcpu_vmx *vmx, u64 data)
 
static u64 * vmx_get_fixed0_msr (struct nested_vmx_msrs *msrs, u32 msr_index)
 
static int vmx_restore_fixed0_msr (struct vcpu_vmx *vmx, u32 msr_index, u64 data)
 
int vmx_set_vmx_msr (struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 
int vmx_get_vmx_msr (struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
 
static void copy_shadow_to_vmcs12 (struct vcpu_vmx *vmx)
 
static void copy_vmcs12_to_shadow (struct vcpu_vmx *vmx)
 
static void copy_enlightened_to_vmcs12 (struct vcpu_vmx *vmx, u32 hv_clean_fields)
 
static void copy_vmcs12_to_enlightened (struct vcpu_vmx *vmx)
 
static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld (struct kvm_vcpu *vcpu, bool from_launch)
 
void nested_sync_vmcs12_to_shadow (struct kvm_vcpu *vcpu)
 
static enum hrtimer_restart vmx_preemption_timer_fn (struct hrtimer *timer)
 
static u64 vmx_calc_preemption_timer_value (struct kvm_vcpu *vcpu)
 
static void vmx_start_preemption_timer (struct kvm_vcpu *vcpu, u64 preemption_timeout)
 
static u64 nested_vmx_calc_efer (struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
 
static void prepare_vmcs02_constant_state (struct vcpu_vmx *vmx)
 
static void prepare_vmcs02_early_rare (struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
 
static void prepare_vmcs02_early (struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, struct vmcs12 *vmcs12)
 
static void prepare_vmcs02_rare (struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
 
static int prepare_vmcs02 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, enum vm_entry_failure_code *entry_failure_code)
 
static int nested_vmx_check_nmi_controls (struct vmcs12 *vmcs12)
 
static bool nested_vmx_check_eptp (struct kvm_vcpu *vcpu, u64 new_eptp)
 
static int nested_check_vm_execution_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_check_vm_exit_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_check_vm_entry_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_controls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_address_space_size (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_host_state (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_vmx_check_vmcs_link_ptr (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int nested_check_guest_non_reg_state (struct vmcs12 *vmcs12)
 
static int nested_vmx_check_guest_state (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enum vm_entry_failure_code *entry_failure_code)
 
static int nested_vmx_check_vmentry_hw (struct kvm_vcpu *vcpu)
 
static bool nested_get_vmcs12_pages (struct kvm_vcpu *vcpu)
 
static bool vmx_get_nested_state_pages (struct kvm_vcpu *vcpu)
 
static int nested_vmx_write_pml_buffer (struct kvm_vcpu *vcpu, gpa_t gpa)
 
static int nested_vmx_check_permission (struct kvm_vcpu *vcpu)
 
static u8 vmx_has_apicv_interrupt (struct kvm_vcpu *vcpu)
 
static void load_vmcs12_host_state (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode (struct kvm_vcpu *vcpu, bool from_vmentry)
 
static int nested_vmx_run (struct kvm_vcpu *vcpu, bool launch)
 
static unsigned long vmcs12_guest_cr0 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static unsigned long vmcs12_guest_cr4 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void vmcs12_save_pending_event (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info)
 
void nested_mark_vmcs12_pages_dirty (struct kvm_vcpu *vcpu)
 
static int vmx_complete_nested_posted_interrupt (struct kvm_vcpu *vcpu)
 
static void nested_vmx_inject_exception_vmexit (struct kvm_vcpu *vcpu)
 
static unsigned long vmx_get_pending_dbg_trap (struct kvm_queued_exception *ex)
 
static bool vmx_is_low_priority_db_trap (struct kvm_queued_exception *ex)
 
static void nested_vmx_update_pending_dbg (struct kvm_vcpu *vcpu)
 
static bool nested_vmx_preemption_timer_pending (struct kvm_vcpu *vcpu)
 
static bool vmx_has_nested_events (struct kvm_vcpu *vcpu)
 
static int vmx_check_nested_events (struct kvm_vcpu *vcpu)
 
static u32 vmx_get_preemption_timer_value (struct kvm_vcpu *vcpu)
 
static bool is_vmcs12_ext_field (unsigned long field)
 
static void sync_vmcs02_to_vmcs12_rare (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void copy_vmcs02_to_vmcs12_rare (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void sync_vmcs02_to_vmcs12 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static void prepare_vmcs12 (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
 
static u64 nested_vmx_get_vmcs01_guest_efer (struct vcpu_vmx *vmx)
 
static void nested_vmx_restore_host_state (struct kvm_vcpu *vcpu)
 
void nested_vmx_vmexit (struct kvm_vcpu *vcpu, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
 
static void nested_vmx_triple_fault (struct kvm_vcpu *vcpu)
 
int get_vmx_mem_address (struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
 
static int nested_vmx_get_vmptr (struct kvm_vcpu *vcpu, gpa_t *vmpointer, int *ret)
 
static struct vmcsalloc_shadow_vmcs (struct kvm_vcpu *vcpu)
 
static int enter_vmx_operation (struct kvm_vcpu *vcpu)
 
static int handle_vmxon (struct kvm_vcpu *vcpu)
 
static void nested_release_vmcs12 (struct kvm_vcpu *vcpu)
 
static int handle_vmxoff (struct kvm_vcpu *vcpu)
 
static int handle_vmclear (struct kvm_vcpu *vcpu)
 
static int handle_vmlaunch (struct kvm_vcpu *vcpu)
 
static int handle_vmresume (struct kvm_vcpu *vcpu)
 
static int handle_vmread (struct kvm_vcpu *vcpu)
 
static bool is_shadow_field_rw (unsigned long field)
 
static bool is_shadow_field_ro (unsigned long field)
 
static int handle_vmwrite (struct kvm_vcpu *vcpu)
 
static void set_current_vmptr (struct vcpu_vmx *vmx, gpa_t vmptr)
 
static int handle_vmptrld (struct kvm_vcpu *vcpu)
 
static int handle_vmptrst (struct kvm_vcpu *vcpu)
 
static int handle_invept (struct kvm_vcpu *vcpu)
 
static int handle_invvpid (struct kvm_vcpu *vcpu)
 
static int nested_vmx_eptp_switching (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static int handle_vmfunc (struct kvm_vcpu *vcpu)
 
bool nested_vmx_check_io_bitmaps (struct kvm_vcpu *vcpu, unsigned int port, int size)
 
static bool nested_vmx_exit_handled_io (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static bool nested_vmx_exit_handled_msr (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, union vmx_exit_reason exit_reason)
 
static bool nested_vmx_exit_handled_cr (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static bool nested_vmx_exit_handled_encls (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
static bool nested_vmx_exit_handled_vmcs_access (struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, gpa_t bitmap)
 
static bool nested_vmx_exit_handled_mtf (struct vmcs12 *vmcs12)
 
static bool nested_vmx_l0_wants_exit (struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
 
static bool nested_vmx_l1_wants_exit (struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
 
bool nested_vmx_reflect_vmexit (struct kvm_vcpu *vcpu)
 
static int vmx_get_nested_state (struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size)
 
void vmx_leave_nested (struct kvm_vcpu *vcpu)
 
static int vmx_set_nested_state (struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state *kvm_state)
 
void nested_vmx_set_vmcs_shadowing_bitmap (void)
 
static u64 nested_vmx_calc_vmcs_enum_msr (void)
 
static void nested_vmx_setup_pinbased_ctls (struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_exit_ctls (struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_entry_ctls (struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_cpubased_ctls (struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_secondary_ctls (u32 ept_caps, struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_misc_data (struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_basic (struct nested_vmx_msrs *msrs)
 
static void nested_vmx_setup_cr_fixed (struct nested_vmx_msrs *msrs)
 
void nested_vmx_setup_ctls_msrs (struct vmcs_config *vmcs_conf, u32 ept_caps)
 
void nested_vmx_hardware_unsetup (void)
 
__init int nested_vmx_hardware_setup (int(*exit_handlers[])(struct kvm_vcpu *))
 

Variables

static bool __read_mostly enable_shadow_vmcs = 1
 
static bool __read_mostly nested_early_check = 0
 
static unsigned long * vmx_bitmap [VMX_BITMAP_NR]
 
static struct shadow_vmcs_field shadow_read_only_fields []
 
static int max_shadow_read_only_fields
 
static struct shadow_vmcs_field shadow_read_write_fields []
 
static int max_shadow_read_write_fields
 
struct kvm_x86_nested_ops vmx_nested_ops
 

Macro Definition Documentation

◆ BUILD_NVMX_MSR_INTERCEPT_HELPER

#define BUILD_NVMX_MSR_INTERCEPT_HELPER (   rw)
Value:
static inline \
void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
unsigned long *msr_bitmap_l1, \
unsigned long *msr_bitmap_l0, u32 msr) \
{ \
if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
else \
vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
}
Definition: vmx.h:251

Definition at line 565 of file nested.c.

◆ CC

Definition at line 27 of file nested.c.

◆ EPTP_PA_MASK

#define EPTP_PA_MASK   GENMASK_ULL(51, 12)

Definition at line 379 of file nested.c.

◆ pr_fmt

#define pr_fmt (   fmt)    KBUILD_MODNAME ": " fmt

Definition at line 2 of file nested.c.

◆ SHADOW_FIELD_RO [1/2]

#define SHADOW_FIELD_RO (   x,
 
)    { x, offsetof(struct vmcs12, y) },

◆ SHADOW_FIELD_RO [2/2]

#define SHADOW_FIELD_RO (   x,
 
)    case x:

◆ SHADOW_FIELD_RW [1/2]

#define SHADOW_FIELD_RW (   x,
 
)    { x, offsetof(struct vmcs12, y) },

◆ SHADOW_FIELD_RW [2/2]

#define SHADOW_FIELD_RW (   x,
 
)    case x:

◆ VMCS12_IDX_TO_ENC

#define VMCS12_IDX_TO_ENC (   idx)    ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))

Definition at line 6776 of file nested.c.

◆ VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE

#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE   5

Definition at line 39 of file nested.c.

◆ vmx_vmread_bitmap

#define vmx_vmread_bitmap   (vmx_bitmap[VMX_VMREAD_BITMAP])

Definition at line 48 of file nested.c.

◆ vmx_vmwrite_bitmap

#define vmx_vmwrite_bitmap   (vmx_bitmap[VMX_VMWRITE_BITMAP])

Definition at line 49 of file nested.c.

◆ VMX_VPID_EXTENT_SUPPORTED_MASK

#define VMX_VPID_EXTENT_SUPPORTED_MASK
Value:
(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)

Definition at line 33 of file nested.c.

◆ VMXON_CR0_ALWAYSON

#define VMXON_CR0_ALWAYSON   (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)

◆ VMXON_CR4_ALWAYSON

#define VMXON_CR4_ALWAYSON   X86_CR4_VMXE

Enumeration Type Documentation

◆ anonymous enum

anonymous enum
Enumerator
VMX_VMREAD_BITMAP 
VMX_VMWRITE_BITMAP 
VMX_BITMAP_NR 

Definition at line 41 of file nested.c.

41  {
45 };
@ VMX_VMREAD_BITMAP
Definition: nested.c:42
@ VMX_BITMAP_NR
Definition: nested.c:44
@ VMX_VMWRITE_BITMAP
Definition: nested.c:43

Function Documentation

◆ alloc_shadow_vmcs()

static struct vmcs* alloc_shadow_vmcs ( struct kvm_vcpu *  vcpu)
static

Definition at line 5116 of file nested.c.

5117 {
5118  struct vcpu_vmx *vmx = to_vmx(vcpu);
5119  struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
5120 
5121  /*
5122  * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
5123  * when L1 executes VMXOFF or the vCPU is forced out of nested
5124  * operation. VMXON faults if the CPU is already post-VMXON, so it
5125  * should be impossible to already have an allocated shadow VMCS. KVM
5126  * doesn't support virtualization of VMCS shadowing, so vmcs01 should
5127  * always be the loaded VMCS.
5128  */
5129  if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
5130  return loaded_vmcs->shadow_vmcs;
5131 
5133  if (loaded_vmcs->shadow_vmcs)
5135 
5136  return loaded_vmcs->shadow_vmcs;
5137 }
struct vmcs * shadow_vmcs
Definition: vmcs.h:63
struct loaded_vmcs vmcs01
Definition: vmx.h:291
struct loaded_vmcs * loaded_vmcs
Definition: vmx.h:292
struct kvm_vcpu vcpu
Definition: vmx.h:252
static struct vmcs * alloc_vmcs(bool shadow)
Definition: vmx.h:707
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
Definition: vmx.h:657
static void vmcs_clear(struct vmcs *vmcs)
Definition: vmx_ops.h:287
Here is the call graph for this function:
Here is the caller graph for this function:

◆ copy_enlightened_to_vmcs12()

static void copy_enlightened_to_vmcs12 ( struct vcpu_vmx vmx,
u32  hv_clean_fields 
)
static

Definition at line 1604 of file nested.c.

1605 {
1606 #ifdef CONFIG_KVM_HYPERV
1607  struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1608  struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
1609  struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
1610 
1611  /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1612  vmcs12->tpr_threshold = evmcs->tpr_threshold;
1613  vmcs12->guest_rip = evmcs->guest_rip;
1614 
1615  if (unlikely(!(hv_clean_fields &
1616  HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) {
1617  hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
1618  hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
1619  hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
1620  }
1621 
1622  if (unlikely(!(hv_clean_fields &
1623  HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1624  vmcs12->guest_rsp = evmcs->guest_rsp;
1625  vmcs12->guest_rflags = evmcs->guest_rflags;
1627  evmcs->guest_interruptibility_info;
1628  /*
1629  * Not present in struct vmcs12:
1630  * vmcs12->guest_ssp = evmcs->guest_ssp;
1631  */
1632  }
1633 
1634  if (unlikely(!(hv_clean_fields &
1635  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1637  evmcs->cpu_based_vm_exec_control;
1638  }
1639 
1640  if (unlikely(!(hv_clean_fields &
1641  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1642  vmcs12->exception_bitmap = evmcs->exception_bitmap;
1643  }
1644 
1645  if (unlikely(!(hv_clean_fields &
1646  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1647  vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1648  }
1649 
1650  if (unlikely(!(hv_clean_fields &
1651  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1653  evmcs->vm_entry_intr_info_field;
1655  evmcs->vm_entry_exception_error_code;
1657  evmcs->vm_entry_instruction_len;
1658  }
1659 
1660  if (unlikely(!(hv_clean_fields &
1661  HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1662  vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1663  vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1664  vmcs12->host_cr0 = evmcs->host_cr0;
1665  vmcs12->host_cr3 = evmcs->host_cr3;
1666  vmcs12->host_cr4 = evmcs->host_cr4;
1667  vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1668  vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1669  vmcs12->host_rip = evmcs->host_rip;
1670  vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1671  vmcs12->host_es_selector = evmcs->host_es_selector;
1672  vmcs12->host_cs_selector = evmcs->host_cs_selector;
1673  vmcs12->host_ss_selector = evmcs->host_ss_selector;
1674  vmcs12->host_ds_selector = evmcs->host_ds_selector;
1675  vmcs12->host_fs_selector = evmcs->host_fs_selector;
1676  vmcs12->host_gs_selector = evmcs->host_gs_selector;
1677  vmcs12->host_tr_selector = evmcs->host_tr_selector;
1678  vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl;
1679  /*
1680  * Not present in struct vmcs12:
1681  * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet;
1682  * vmcs12->host_ssp = evmcs->host_ssp;
1683  * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr;
1684  */
1685  }
1686 
1687  if (unlikely(!(hv_clean_fields &
1688  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1690  evmcs->pin_based_vm_exec_control;
1691  vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1693  evmcs->secondary_vm_exec_control;
1694  }
1695 
1696  if (unlikely(!(hv_clean_fields &
1697  HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1698  vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1699  vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1700  }
1701 
1702  if (unlikely(!(hv_clean_fields &
1703  HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1704  vmcs12->msr_bitmap = evmcs->msr_bitmap;
1705  }
1706 
1707  if (unlikely(!(hv_clean_fields &
1708  HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1709  vmcs12->guest_es_base = evmcs->guest_es_base;
1710  vmcs12->guest_cs_base = evmcs->guest_cs_base;
1711  vmcs12->guest_ss_base = evmcs->guest_ss_base;
1712  vmcs12->guest_ds_base = evmcs->guest_ds_base;
1713  vmcs12->guest_fs_base = evmcs->guest_fs_base;
1714  vmcs12->guest_gs_base = evmcs->guest_gs_base;
1715  vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1716  vmcs12->guest_tr_base = evmcs->guest_tr_base;
1717  vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1718  vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1719  vmcs12->guest_es_limit = evmcs->guest_es_limit;
1720  vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1721  vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1722  vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1723  vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1724  vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1725  vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1726  vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1727  vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1728  vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1729  vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1730  vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1731  vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1732  vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1733  vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1734  vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1735  vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1736  vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1737  vmcs12->guest_es_selector = evmcs->guest_es_selector;
1738  vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1739  vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1740  vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1741  vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1742  vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1743  vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1744  vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1745  }
1746 
1747  if (unlikely(!(hv_clean_fields &
1748  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1749  vmcs12->tsc_offset = evmcs->tsc_offset;
1750  vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1751  vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1752  vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap;
1753  vmcs12->tsc_multiplier = evmcs->tsc_multiplier;
1754  }
1755 
1756  if (unlikely(!(hv_clean_fields &
1757  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1758  vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1759  vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1760  vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1761  vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1762  vmcs12->guest_cr0 = evmcs->guest_cr0;
1763  vmcs12->guest_cr3 = evmcs->guest_cr3;
1764  vmcs12->guest_cr4 = evmcs->guest_cr4;
1765  vmcs12->guest_dr7 = evmcs->guest_dr7;
1766  }
1767 
1768  if (unlikely(!(hv_clean_fields &
1769  HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1770  vmcs12->host_fs_base = evmcs->host_fs_base;
1771  vmcs12->host_gs_base = evmcs->host_gs_base;
1772  vmcs12->host_tr_base = evmcs->host_tr_base;
1773  vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1774  vmcs12->host_idtr_base = evmcs->host_idtr_base;
1775  vmcs12->host_rsp = evmcs->host_rsp;
1776  }
1777 
1778  if (unlikely(!(hv_clean_fields &
1779  HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1780  vmcs12->ept_pointer = evmcs->ept_pointer;
1781  vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1782  }
1783 
1784  if (unlikely(!(hv_clean_fields &
1785  HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1786  vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1787  vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1788  vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1789  vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1790  vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1791  vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1792  vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1793  vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1795  evmcs->guest_pending_dbg_exceptions;
1796  vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1797  vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1798  vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1799  vmcs12->guest_activity_state = evmcs->guest_activity_state;
1800  vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1801  vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl;
1802  /*
1803  * Not present in struct vmcs12:
1804  * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet;
1805  * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl;
1806  * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr;
1807  */
1808  }
1809 
1810  /*
1811  * Not used?
1812  * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1813  * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1814  * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1815  * vmcs12->page_fault_error_code_mask =
1816  * evmcs->page_fault_error_code_mask;
1817  * vmcs12->page_fault_error_code_match =
1818  * evmcs->page_fault_error_code_match;
1819  * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1820  * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1821  * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1822  * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1823  */
1824 
1825  /*
1826  * Read only fields:
1827  * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1828  * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1829  * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1830  * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1831  * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1832  * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1833  * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1834  * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1835  * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1836  * vmcs12->exit_qualification = evmcs->exit_qualification;
1837  * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1838  *
1839  * Not present in struct vmcs12:
1840  * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1841  * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1842  * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1843  * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1844  */
1845 
1846  return;
1847 #else /* CONFIG_KVM_HYPERV */
1848  KVM_BUG_ON(1, vmx->vcpu.kvm);
1849 #endif /* CONFIG_KVM_HYPERV */
1850 }
struct vmcs12 * cached_vmcs12
Definition: vmx.h:129
struct nested_vmx nested
Definition: vmx.h:329
Definition: vmcs12.h:27
natural_width guest_rsp
Definition: vmcs12.h:102
u64 guest_ia32_efer
Definition: vmcs12.h:57
u16 guest_gs_selector
Definition: vmcs12.h:176
u16 guest_ss_selector
Definition: vmcs12.h:173
natural_width guest_sysenter_eip
Definition: vmcs12.h:107
natural_width guest_ldtr_base
Definition: vmcs12.h:97
natural_width guest_es_base
Definition: vmcs12.h:91
natural_width guest_cr4
Definition: vmcs12.h:90
natural_width guest_tr_base
Definition: vmcs12.h:98
natural_width host_cr4
Definition: vmcs12.h:110
u32 vm_entry_controls
Definition: vmcs12.h:130
u64 guest_ia32_pat
Definition: vmcs12.h:56
natural_width guest_gs_base
Definition: vmcs12.h:96
u32 vm_entry_exception_error_code
Definition: vmcs12.h:133
natural_width cr0_read_shadow
Definition: vmcs12.h:83
natural_width guest_rip
Definition: vmcs12.h:103
u16 host_tr_selector
Definition: vmcs12.h:186
u32 guest_es_limit
Definition: vmcs12.h:145
u64 xss_exit_bitmap
Definition: vmcs12.h:52
u16 guest_ldtr_selector
Definition: vmcs12.h:177
natural_width host_ia32_sysenter_eip
Definition: vmcs12.h:117
u32 cpu_based_vm_exec_control
Definition: vmcs12.h:122
natural_width host_fs_base
Definition: vmcs12.h:111
u64 io_bitmap_a
Definition: vmcs12.h:37
u32 exception_bitmap
Definition: vmcs12.h:123
u16 host_ss_selector
Definition: vmcs12.h:182
u32 guest_ldtr_ar_bytes
Definition: vmcs12.h:161
natural_width guest_cs_base
Definition: vmcs12.h:92
u32 tpr_threshold
Definition: vmcs12.h:135
natural_width host_idtr_base
Definition: vmcs12.h:115
u16 host_gs_selector
Definition: vmcs12.h:185
natural_width host_gdtr_base
Definition: vmcs12.h:114
u64 guest_pdptr1
Definition: vmcs12.h:60
u32 vm_exit_controls
Definition: vmcs12.h:127
u32 secondary_vm_exec_control
Definition: vmcs12.h:136
natural_width guest_ss_base
Definition: vmcs12.h:93
u64 guest_pdptr0
Definition: vmcs12.h:59
u32 guest_cs_limit
Definition: vmcs12.h:146
natural_width guest_cr0
Definition: vmcs12.h:88
u64 guest_ia32_debugctl
Definition: vmcs12.h:55
natural_width host_rip
Definition: vmcs12.h:119
natural_width host_ia32_sysenter_esp
Definition: vmcs12.h:116
u32 vm_entry_intr_info_field
Definition: vmcs12.h:132
u64 guest_bndcfgs
Definition: vmcs12.h:63
u16 host_ds_selector
Definition: vmcs12.h:183
u32 guest_ds_limit
Definition: vmcs12.h:148
u32 vm_entry_instruction_len
Definition: vmcs12.h:134
natural_width guest_idtr_base
Definition: vmcs12.h:100
natural_width host_tr_base
Definition: vmcs12.h:113
u64 guest_pdptr3
Definition: vmcs12.h:62
u64 host_ia32_efer
Definition: vmcs12.h:65
u16 guest_es_selector
Definition: vmcs12.h:171
u64 encls_exiting_bitmap
Definition: vmcs12.h:72
natural_width guest_sysenter_esp
Definition: vmcs12.h:106
u64 tsc_multiplier
Definition: vmcs12.h:73
u32 guest_idtr_limit
Definition: vmcs12.h:154
natural_width host_cr3
Definition: vmcs12.h:109
u16 host_fs_selector
Definition: vmcs12.h:184
u32 guest_ss_limit
Definition: vmcs12.h:147
u64 io_bitmap_b
Definition: vmcs12.h:38
natural_width guest_gdtr_base
Definition: vmcs12.h:99
u16 guest_cs_selector
Definition: vmcs12.h:172
u16 host_cs_selector
Definition: vmcs12.h:181
natural_width guest_dr7
Definition: vmcs12.h:101
u64 virtual_apic_page_addr
Definition: vmcs12.h:44
u32 guest_activity_state
Definition: vmcs12.h:164
natural_width host_gs_base
Definition: vmcs12.h:112
u64 guest_pdptr2
Definition: vmcs12.h:61
natural_width guest_ds_base
Definition: vmcs12.h:94
u64 guest_ia32_perf_global_ctrl
Definition: vmcs12.h:58
natural_width guest_cr3
Definition: vmcs12.h:89
natural_width guest_rflags
Definition: vmcs12.h:104
u16 virtual_processor_id
Definition: vmcs12.h:169
u16 host_es_selector
Definition: vmcs12.h:180
natural_width cr0_guest_host_mask
Definition: vmcs12.h:81
natural_width guest_fs_base
Definition: vmcs12.h:95
u32 guest_interruptibility_info
Definition: vmcs12.h:163
u16 guest_ds_selector
Definition: vmcs12.h:174
u64 vmcs_link_pointer
Definition: vmcs12.h:54
u16 guest_tr_selector
Definition: vmcs12.h:178
u32 guest_fs_ar_bytes
Definition: vmcs12.h:159
u32 guest_ss_ar_bytes
Definition: vmcs12.h:157
u32 guest_tr_limit
Definition: vmcs12.h:152
u32 guest_fs_limit
Definition: vmcs12.h:149
u32 guest_sysenter_cs
Definition: vmcs12.h:165
natural_width host_cr0
Definition: vmcs12.h:108
u32 guest_tr_ar_bytes
Definition: vmcs12.h:162
natural_width cr4_guest_host_mask
Definition: vmcs12.h:82
u64 msr_bitmap
Definition: vmcs12.h:39
u32 guest_ds_ar_bytes
Definition: vmcs12.h:158
u32 guest_gs_ar_bytes
Definition: vmcs12.h:160
u32 host_ia32_sysenter_cs
Definition: vmcs12.h:166
natural_width host_rsp
Definition: vmcs12.h:118
u32 guest_gdtr_limit
Definition: vmcs12.h:153
natural_width guest_pending_dbg_exceptions
Definition: vmcs12.h:105
u16 guest_fs_selector
Definition: vmcs12.h:175
u32 guest_es_ar_bytes
Definition: vmcs12.h:155
u32 guest_ldtr_limit
Definition: vmcs12.h:151
u32 pin_based_vm_exec_control
Definition: vmcs12.h:121
u64 tsc_offset
Definition: vmcs12.h:43
u64 host_ia32_perf_global_ctrl
Definition: vmcs12.h:66
natural_width cr4_read_shadow
Definition: vmcs12.h:84
u64 ept_pointer
Definition: vmcs12.h:47
u32 guest_cs_ar_bytes
Definition: vmcs12.h:156
u64 host_ia32_pat
Definition: vmcs12.h:64
u32 guest_gs_limit
Definition: vmcs12.h:150
static struct hv_enlightened_vmcs * nested_vmx_evmcs(struct vcpu_vmx *vmx)
Definition: hyperv.h:84
Here is the call graph for this function:
Here is the caller graph for this function:

◆ copy_shadow_to_vmcs12()

static void copy_shadow_to_vmcs12 ( struct vcpu_vmx vmx)
static

Definition at line 1543 of file nested.c.

1544 {
1545  struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1546  struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1547  struct shadow_vmcs_field field;
1548  unsigned long val;
1549  int i;
1550 
1551  if (WARN_ON(!shadow_vmcs))
1552  return;
1553 
1554  preempt_disable();
1555 
1556  vmcs_load(shadow_vmcs);
1557 
1558  for (i = 0; i < max_shadow_read_write_fields; i++) {
1559  field = shadow_read_write_fields[i];
1560  val = __vmcs_readl(field.encoding);
1561  vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1562  }
1563 
1564  vmcs_clear(shadow_vmcs);
1565  vmcs_load(vmx->loaded_vmcs->vmcs);
1566 
1567  preempt_enable();
1568 }
static struct vmcs12 * get_vmcs12(struct kvm_vcpu *vcpu)
Definition: nested.h:40
struct vmcs * vmcs
Definition: vmcs.h:62
Definition: vmcs.h:21
static void vmcs12_write_any(struct vmcs12 *vmcs12, unsigned long field, u16 offset, u64 field_value)
Definition: vmcs12.h:405
static int max_shadow_read_write_fields
Definition: nested.c:66
static struct shadow_vmcs_field shadow_read_write_fields[]
Definition: nested.c:62
static void vmcs_load(struct vmcs *vmcs)
Definition: vmx_ops.h:294
static __always_inline unsigned long __vmcs_readl(unsigned long field)
Definition: vmx_ops.h:91
Here is the call graph for this function:
Here is the caller graph for this function:

◆ copy_vmcs02_to_vmcs12_rare()

static void copy_vmcs02_to_vmcs12_rare ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 4343 of file nested.c.

4345 {
4346  struct vcpu_vmx *vmx = to_vmx(vcpu);
4347  int cpu;
4348 
4350  return;
4351 
4352 
4353  WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4354 
4355  cpu = get_cpu();
4356  vmx->loaded_vmcs = &vmx->nested.vmcs02;
4357  vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4358 
4360 
4361  vmx->loaded_vmcs = &vmx->vmcs01;
4362  vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4363  put_cpu();
4364 }
struct loaded_vmcs vmcs02
Definition: vmx.h:194
bool need_sync_vmcs02_to_vmcs12_rare
Definition: vmx.h:167
static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:4298
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy)
Definition: vmx.c:1415
Here is the call graph for this function:
Here is the caller graph for this function:

◆ copy_vmcs12_to_enlightened()

static void copy_vmcs12_to_enlightened ( struct vcpu_vmx vmx)
static

Definition at line 1852 of file nested.c.

1853 {
1854 #ifdef CONFIG_KVM_HYPERV
1855  struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1856  struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
1857 
1858  /*
1859  * Should not be changed by KVM:
1860  *
1861  * evmcs->host_es_selector = vmcs12->host_es_selector;
1862  * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1863  * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1864  * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1865  * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1866  * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1867  * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1868  * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1869  * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1870  * evmcs->host_cr0 = vmcs12->host_cr0;
1871  * evmcs->host_cr3 = vmcs12->host_cr3;
1872  * evmcs->host_cr4 = vmcs12->host_cr4;
1873  * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1874  * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1875  * evmcs->host_rip = vmcs12->host_rip;
1876  * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1877  * evmcs->host_fs_base = vmcs12->host_fs_base;
1878  * evmcs->host_gs_base = vmcs12->host_gs_base;
1879  * evmcs->host_tr_base = vmcs12->host_tr_base;
1880  * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1881  * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1882  * evmcs->host_rsp = vmcs12->host_rsp;
1883  * sync_vmcs02_to_vmcs12() doesn't read these:
1884  * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1885  * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1886  * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1887  * evmcs->ept_pointer = vmcs12->ept_pointer;
1888  * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1889  * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1890  * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1891  * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1892  * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1893  * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1894  * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1895  * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1896  * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1897  * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1898  * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1899  * evmcs->page_fault_error_code_mask =
1900  * vmcs12->page_fault_error_code_mask;
1901  * evmcs->page_fault_error_code_match =
1902  * vmcs12->page_fault_error_code_match;
1903  * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1904  * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1905  * evmcs->tsc_offset = vmcs12->tsc_offset;
1906  * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1907  * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1908  * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1909  * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1910  * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1911  * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1912  * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1913  * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1914  * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl;
1915  * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl;
1916  * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap;
1917  * evmcs->tsc_multiplier = vmcs12->tsc_multiplier;
1918  *
1919  * Not present in struct vmcs12:
1920  * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1921  * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1922  * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1923  * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1924  * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet;
1925  * evmcs->host_ssp = vmcs12->host_ssp;
1926  * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr;
1927  * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet;
1928  * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl;
1929  * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr;
1930  * evmcs->guest_ssp = vmcs12->guest_ssp;
1931  */
1932 
1933  evmcs->guest_es_selector = vmcs12->guest_es_selector;
1934  evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1935  evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1936  evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1937  evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1938  evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1939  evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1940  evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1941 
1942  evmcs->guest_es_limit = vmcs12->guest_es_limit;
1943  evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1944  evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1945  evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1946  evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1947  evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1948  evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1949  evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1950  evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1951  evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1952 
1953  evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1954  evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1955  evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1956  evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1957  evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1958  evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1959  evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1960  evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1961 
1962  evmcs->guest_es_base = vmcs12->guest_es_base;
1963  evmcs->guest_cs_base = vmcs12->guest_cs_base;
1964  evmcs->guest_ss_base = vmcs12->guest_ss_base;
1965  evmcs->guest_ds_base = vmcs12->guest_ds_base;
1966  evmcs->guest_fs_base = vmcs12->guest_fs_base;
1967  evmcs->guest_gs_base = vmcs12->guest_gs_base;
1968  evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1969  evmcs->guest_tr_base = vmcs12->guest_tr_base;
1970  evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1971  evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1972 
1973  evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1974  evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1975 
1976  evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1977  evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1978  evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1979  evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1980 
1981  evmcs->guest_pending_dbg_exceptions =
1983  evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1984  evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1985 
1986  evmcs->guest_activity_state = vmcs12->guest_activity_state;
1987  evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1988 
1989  evmcs->guest_cr0 = vmcs12->guest_cr0;
1990  evmcs->guest_cr3 = vmcs12->guest_cr3;
1991  evmcs->guest_cr4 = vmcs12->guest_cr4;
1992  evmcs->guest_dr7 = vmcs12->guest_dr7;
1993 
1994  evmcs->guest_physical_address = vmcs12->guest_physical_address;
1995 
1996  evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1997  evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1998  evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1999  evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
2000  evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
2001  evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
2002  evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
2003  evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
2004 
2005  evmcs->exit_qualification = vmcs12->exit_qualification;
2006 
2007  evmcs->guest_linear_address = vmcs12->guest_linear_address;
2008  evmcs->guest_rsp = vmcs12->guest_rsp;
2009  evmcs->guest_rflags = vmcs12->guest_rflags;
2010 
2011  evmcs->guest_interruptibility_info =
2013  evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
2014  evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
2015  evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
2016  evmcs->vm_entry_exception_error_code =
2018  evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
2019 
2020  evmcs->guest_rip = vmcs12->guest_rip;
2021 
2022  evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
2023 
2024  return;
2025 #else /* CONFIG_KVM_HYPERV */
2026  KVM_BUG_ON(1, vmx->vcpu.kvm);
2027 #endif /* CONFIG_KVM_HYPERV */
2028 }
u32 vm_exit_reason
Definition: vmcs12.h:138
u32 vmx_instruction_info
Definition: vmcs12.h:144
u32 vm_exit_intr_info
Definition: vmcs12.h:139
u32 vm_exit_instruction_len
Definition: vmcs12.h:143
u32 vm_instruction_error
Definition: vmcs12.h:137
natural_width guest_linear_address
Definition: vmcs12.h:87
u32 idt_vectoring_info_field
Definition: vmcs12.h:141
u64 guest_physical_address
Definition: vmcs12.h:53
u32 idt_vectoring_error_code
Definition: vmcs12.h:142
natural_width exit_qualification
Definition: vmcs12.h:86
u32 vm_exit_intr_error_code
Definition: vmcs12.h:140
Here is the call graph for this function:
Here is the caller graph for this function:

◆ copy_vmcs12_to_shadow()

static void copy_vmcs12_to_shadow ( struct vcpu_vmx vmx)
static

Definition at line 1570 of file nested.c.

1571 {
1572  const struct shadow_vmcs_field *fields[] = {
1575  };
1576  const int max_fields[] = {
1579  };
1580  struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1581  struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1582  struct shadow_vmcs_field field;
1583  unsigned long val;
1584  int i, q;
1585 
1586  if (WARN_ON(!shadow_vmcs))
1587  return;
1588 
1589  vmcs_load(shadow_vmcs);
1590 
1591  for (q = 0; q < ARRAY_SIZE(fields); q++) {
1592  for (i = 0; i < max_fields[q]; i++) {
1593  field = fields[q][i];
1594  val = vmcs12_read_any(vmcs12, field.encoding,
1595  field.offset);
1596  __vmcs_writel(field.encoding, val);
1597  }
1598  }
1599 
1600  vmcs_clear(shadow_vmcs);
1601  vmcs_load(vmx->loaded_vmcs->vmcs);
1602 }
static u64 vmcs12_read_any(struct vmcs12 *vmcs12, unsigned long field, u16 offset)
Definition: vmcs12.h:385
static int max_shadow_read_only_fields
Definition: nested.c:59
static struct shadow_vmcs_field shadow_read_only_fields[]
Definition: nested.c:55
static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
Definition: vmx_ops.h:223
Here is the call graph for this function:
Here is the caller graph for this function:

◆ enable_x2apic_msr_intercepts()

static void enable_x2apic_msr_intercepts ( unsigned long *  msr_bitmap)
inlinestatic

Definition at line 553 of file nested.c.

554 {
555  int msr;
556 
557  for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
558  unsigned word = msr / BITS_PER_LONG;
559 
560  msr_bitmap[word] = ~0;
561  msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
562  }
563 }
Here is the caller graph for this function:

◆ enter_vmx_operation()

static int enter_vmx_operation ( struct kvm_vcpu *  vcpu)
static

Definition at line 5139 of file nested.c.

5140 {
5141  struct vcpu_vmx *vmx = to_vmx(vcpu);
5142  int r;
5143 
5144  r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
5145  if (r < 0)
5146  goto out_vmcs02;
5147 
5148  vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5149  if (!vmx->nested.cached_vmcs12)
5150  goto out_cached_vmcs12;
5151 
5152  vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
5153  vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5154  if (!vmx->nested.cached_shadow_vmcs12)
5155  goto out_cached_shadow_vmcs12;
5156 
5158  goto out_shadow_vmcs;
5159 
5160  hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
5161  HRTIMER_MODE_ABS_PINNED);
5163 
5164  vmx->nested.vpid02 = allocate_vpid();
5165 
5166  vmx->nested.vmcs02_initialized = false;
5167  vmx->nested.vmxon = true;
5168 
5169  if (vmx_pt_mode_is_host_guest()) {
5170  vmx->pt_desc.guest.ctl = 0;
5172  }
5173 
5174  return 0;
5175 
5176 out_shadow_vmcs:
5177  kfree(vmx->nested.cached_shadow_vmcs12);
5178 
5179 out_cached_shadow_vmcs12:
5180  kfree(vmx->nested.cached_vmcs12);
5181 
5182 out_cached_vmcs12:
5184 
5185 out_vmcs02:
5186  return -ENOMEM;
5187 }
static bool vmx_pt_mode_is_host_guest(void)
Definition: capabilities.h:388
struct hrtimer preemption_timer
Definition: vmx.h:210
struct vmcs12 * cached_shadow_vmcs12
Definition: vmx.h:135
bool vmxon
Definition: vmx.h:118
u16 vpid02
Definition: vmx.h:231
bool vmcs02_initialized
Definition: vmx.h:174
struct gfn_to_hva_cache shadow_vmcs12_cache
Definition: vmx.h:140
u64 ctl
Definition: vmx.h:56
struct pt_ctx guest
Definition: vmx.h:70
struct pt_desc pt_desc
Definition: vmx.h:358
#define VMCS12_SIZE
Definition: vmcs12.h:206
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
Definition: nested.c:2136
static bool __read_mostly enable_shadow_vmcs
Definition: nested.c:21
static struct vmcs * alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
Definition: nested.c:5116
int allocate_vpid(void)
Definition: vmx.c:3919
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:2905
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
Definition: vmx.c:4098
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:2893
Here is the call graph for this function:
Here is the caller graph for this function:

◆ free_nested()

static void free_nested ( struct kvm_vcpu *  vcpu)
static

Definition at line 323 of file nested.c.

324 {
325  struct vcpu_vmx *vmx = to_vmx(vcpu);
326 
327  if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
328  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
329 
330  if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
331  return;
332 
333  kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
334 
335  vmx->nested.vmxon = false;
336  vmx->nested.smm.vmxon = false;
337  vmx->nested.vmxon_ptr = INVALID_GPA;
338  free_vpid(vmx->nested.vpid02);
339  vmx->nested.posted_intr_nv = -1;
340  vmx->nested.current_vmptr = INVALID_GPA;
341  if (enable_shadow_vmcs) {
345  vmx->vmcs01.shadow_vmcs = NULL;
346  }
347  kfree(vmx->nested.cached_vmcs12);
348  vmx->nested.cached_vmcs12 = NULL;
349  kfree(vmx->nested.cached_shadow_vmcs12);
350  vmx->nested.cached_shadow_vmcs12 = NULL;
351  /*
352  * Unpin physical memory we referred to in the vmcs02. The APIC access
353  * page's backing page (yeah, confusing) shouldn't actually be accessed,
354  * and if it is written, the contents are irrelevant.
355  */
358  kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
359  vmx->nested.pi_desc = NULL;
360 
361  kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
362 
364 
366 }
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
Definition: kvm_main.c:3186
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free)
Definition: mmu.c:3587
u16 posted_intr_nv
Definition: vmx.h:208
struct kvm_host_map virtual_apic_map
Definition: vmx.h:201
gpa_t current_vmptr
Definition: vmx.h:123
struct kvm_host_map pi_desc_map
Definition: vmx.h:202
struct pi_desc * pi_desc
Definition: vmx.h:206
struct kvm_host_map apic_access_page_map
Definition: vmx.h:200
struct nested_vmx::@39 smm
gpa_t vmxon_ptr
Definition: vmx.h:119
static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
Definition: nested.c:220
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
Definition: nested.c:294
static void nested_release_evmcs(struct kvm_vcpu *vcpu)
Definition: nested.c:227
void free_vmcs(struct vmcs *vmcs)
Definition: vmx.c:2885
void free_vpid(int vpid)
Definition: vmx.c:3935
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_vmx_mem_address()

int get_vmx_mem_address ( struct kvm_vcpu *  vcpu,
unsigned long  exit_qualification,
u32  vmx_instruction_info,
bool  wr,
int  len,
gva_t *  ret 
)

Definition at line 4963 of file nested.c.

4965 {
4966  gva_t off;
4967  bool exn;
4968  struct kvm_segment s;
4969 
4970  /*
4971  * According to Vol. 3B, "Information for VM Exits Due to Instruction
4972  * Execution", on an exit, vmx_instruction_info holds most of the
4973  * addressing components of the operand. Only the displacement part
4974  * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4975  * For how an actual address is calculated from all these components,
4976  * refer to Vol. 1, "Operand Addressing".
4977  */
4978  int scaling = vmx_instruction_info & 3;
4979  int addr_size = (vmx_instruction_info >> 7) & 7;
4980  bool is_reg = vmx_instruction_info & (1u << 10);
4981  int seg_reg = (vmx_instruction_info >> 15) & 7;
4982  int index_reg = (vmx_instruction_info >> 18) & 0xf;
4983  bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4984  int base_reg = (vmx_instruction_info >> 23) & 0xf;
4985  bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4986 
4987  if (is_reg) {
4988  kvm_queue_exception(vcpu, UD_VECTOR);
4989  return 1;
4990  }
4991 
4992  /* Addr = segment_base + offset */
4993  /* offset = base + [index * scale] + displacement */
4994  off = exit_qualification; /* holds the displacement */
4995  if (addr_size == 1)
4996  off = (gva_t)sign_extend64(off, 31);
4997  else if (addr_size == 0)
4998  off = (gva_t)sign_extend64(off, 15);
4999  if (base_is_valid)
5000  off += kvm_register_read(vcpu, base_reg);
5001  if (index_is_valid)
5002  off += kvm_register_read(vcpu, index_reg) << scaling;
5003  vmx_get_segment(vcpu, &s, seg_reg);
5004 
5005  /*
5006  * The effective address, i.e. @off, of a memory operand is truncated
5007  * based on the address size of the instruction. Note that this is
5008  * the *effective address*, i.e. the address prior to accounting for
5009  * the segment's base.
5010  */
5011  if (addr_size == 1) /* 32 bit */
5012  off &= 0xffffffff;
5013  else if (addr_size == 0) /* 16 bit */
5014  off &= 0xffff;
5015 
5016  /* Checks for #GP/#SS exceptions. */
5017  exn = false;
5018  if (is_long_mode(vcpu)) {
5019  /*
5020  * The virtual/linear address is never truncated in 64-bit
5021  * mode, e.g. a 32-bit address size can yield a 64-bit virtual
5022  * address when using FS/GS with a non-zero base.
5023  */
5024  if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
5025  *ret = s.base + off;
5026  else
5027  *ret = off;
5028 
5029  *ret = vmx_get_untagged_addr(vcpu, *ret, 0);
5030  /* Long mode: #GP(0)/#SS(0) if the memory address is in a
5031  * non-canonical form. This is the only check on the memory
5032  * destination for long mode!
5033  */
5034  exn = is_noncanonical_address(*ret, vcpu);
5035  } else {
5036  /*
5037  * When not in long mode, the virtual/linear address is
5038  * unconditionally truncated to 32 bits regardless of the
5039  * address size.
5040  */
5041  *ret = (s.base + off) & 0xffffffff;
5042 
5043  /* Protected mode: apply checks for segment validity in the
5044  * following order:
5045  * - segment type check (#GP(0) may be thrown)
5046  * - usability check (#GP(0)/#SS(0))
5047  * - limit check (#GP(0)/#SS(0))
5048  */
5049  if (wr)
5050  /* #GP(0) if the destination operand is located in a
5051  * read-only data segment or any code segment.
5052  */
5053  exn = ((s.type & 0xa) == 0 || (s.type & 8));
5054  else
5055  /* #GP(0) if the source operand is located in an
5056  * execute-only code segment
5057  */
5058  exn = ((s.type & 0xa) == 8);
5059  if (exn) {
5060  kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
5061  return 1;
5062  }
5063  /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
5064  */
5065  exn = (s.unusable != 0);
5066 
5067  /*
5068  * Protected mode: #GP(0)/#SS(0) if the memory operand is
5069  * outside the segment limit. All CPUs that support VMX ignore
5070  * limit checks for flat segments, i.e. segments with base==0,
5071  * limit==0xffffffff and of type expand-up data or code.
5072  */
5073  if (!(s.base == 0 && s.limit == 0xffffffff &&
5074  ((s.type & 8) || !(s.type & 4))))
5075  exn = exn || ((u64)off + len - 1 > s.limit);
5076  }
5077  if (exn) {
5078  kvm_queue_exception_e(vcpu,
5079  seg_reg == VCPU_SREG_SS ?
5080  SS_VECTOR : GP_VECTOR,
5081  0);
5082  return 1;
5083  }
5084 
5085  return 0;
5086 }
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
Definition: vmx.c:8250
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: vmx.c:3496
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
Definition: x86.c:824
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
Definition: x86.c:731
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
Definition: x86.h:273
static bool is_long_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:143
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
Definition: x86.h:213
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_invept()

static int handle_invept ( struct kvm_vcpu *  vcpu)
static

Definition at line 5702 of file nested.c.

5703 {
5704  struct vcpu_vmx *vmx = to_vmx(vcpu);
5705  u32 vmx_instruction_info, types;
5706  unsigned long type, roots_to_free;
5707  struct kvm_mmu *mmu;
5708  gva_t gva;
5709  struct x86_exception e;
5710  struct {
5711  u64 eptp, gpa;
5712  } operand;
5713  int i, r, gpr_index;
5714 
5715  if (!(vmx->nested.msrs.secondary_ctls_high &
5716  SECONDARY_EXEC_ENABLE_EPT) ||
5717  !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5718  kvm_queue_exception(vcpu, UD_VECTOR);
5719  return 1;
5720  }
5721 
5722  if (!nested_vmx_check_permission(vcpu))
5723  return 1;
5724 
5725  vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5726  gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5727  type = kvm_register_read(vcpu, gpr_index);
5728 
5729  types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5730 
5731  if (type >= 32 || !(types & (1 << type)))
5732  return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5733 
5734  /* According to the Intel VMX instruction reference, the memory
5735  * operand is read even if it isn't needed (e.g., for type==global)
5736  */
5737  if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5738  vmx_instruction_info, false, sizeof(operand), &gva))
5739  return 1;
5740  r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5741  if (r != X86EMUL_CONTINUE)
5742  return kvm_handle_memory_failure(vcpu, r, &e);
5743 
5744  /*
5745  * Nested EPT roots are always held through guest_mmu,
5746  * not root_mmu.
5747  */
5748  mmu = &vcpu->arch.guest_mmu;
5749 
5750  switch (type) {
5751  case VMX_EPT_EXTENT_CONTEXT:
5752  if (!nested_vmx_check_eptp(vcpu, operand.eptp))
5753  return nested_vmx_fail(vcpu,
5754  VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5755 
5756  roots_to_free = 0;
5757  if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd,
5758  operand.eptp))
5759  roots_to_free |= KVM_MMU_ROOT_CURRENT;
5760 
5761  for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5762  if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
5763  mmu->prev_roots[i].pgd,
5764  operand.eptp))
5765  roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5766  }
5767  break;
5768  case VMX_EPT_EXTENT_GLOBAL:
5769  roots_to_free = KVM_MMU_ROOTS_ALL;
5770  break;
5771  default:
5772  BUG();
5773  break;
5774  }
5775 
5776  if (roots_to_free)
5777  kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
5778 
5779  return nested_vmx_succeed(vcpu);
5780 }
#define X86EMUL_CONTINUE
Definition: kvm_emulate.h:81
struct nested_vmx_msrs msrs
Definition: vmx.h:234
static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
Definition: nested.c:152
static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
Definition: nested.c:188
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
Definition: nested.c:381
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
Definition: nested.c:4963
static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
Definition: nested.c:2723
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
Definition: nested.c:3391
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
Definition: vmx.h:681
static int vmx_get_instr_info_reg2(u32 vmx_instr_info)
Definition: vmx.h:742
static __always_inline u32 vmcs_read32(unsigned long field)
Definition: vmx_ops.h:161
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
Definition: x86.c:7572
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e)
Definition: x86.c:13588
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_invvpid()

static int handle_invvpid ( struct kvm_vcpu *  vcpu)
static

Definition at line 5782 of file nested.c.

5783 {
5784  struct vcpu_vmx *vmx = to_vmx(vcpu);
5785  u32 vmx_instruction_info;
5786  unsigned long type, types;
5787  gva_t gva;
5788  struct x86_exception e;
5789  struct {
5790  u64 vpid;
5791  u64 gla;
5792  } operand;
5793  u16 vpid02;
5794  int r, gpr_index;
5795 
5796  if (!(vmx->nested.msrs.secondary_ctls_high &
5797  SECONDARY_EXEC_ENABLE_VPID) ||
5798  !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5799  kvm_queue_exception(vcpu, UD_VECTOR);
5800  return 1;
5801  }
5802 
5803  if (!nested_vmx_check_permission(vcpu))
5804  return 1;
5805 
5806  vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5807  gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5808  type = kvm_register_read(vcpu, gpr_index);
5809 
5810  types = (vmx->nested.msrs.vpid_caps &
5812 
5813  if (type >= 32 || !(types & (1 << type)))
5814  return nested_vmx_fail(vcpu,
5815  VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5816 
5817  /* according to the intel vmx instruction reference, the memory
5818  * operand is read even if it isn't needed (e.g., for type==global)
5819  */
5820  if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5821  vmx_instruction_info, false, sizeof(operand), &gva))
5822  return 1;
5823  r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5824  if (r != X86EMUL_CONTINUE)
5825  return kvm_handle_memory_failure(vcpu, r, &e);
5826 
5827  if (operand.vpid >> 16)
5828  return nested_vmx_fail(vcpu,
5829  VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5830 
5831  vpid02 = nested_get_vpid02(vcpu);
5832  switch (type) {
5833  case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5834  /*
5835  * LAM doesn't apply to addresses that are inputs to TLB
5836  * invalidation.
5837  */
5838  if (!operand.vpid ||
5839  is_noncanonical_address(operand.gla, vcpu))
5840  return nested_vmx_fail(vcpu,
5841  VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5842  vpid_sync_vcpu_addr(vpid02, operand.gla);
5843  break;
5844  case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5845  case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5846  if (!operand.vpid)
5847  return nested_vmx_fail(vcpu,
5848  VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5849  vpid_sync_context(vpid02);
5850  break;
5851  case VMX_VPID_EXTENT_ALL_CONTEXT:
5852  vpid_sync_context(vpid02);
5853  break;
5854  default:
5855  WARN_ON_ONCE(1);
5856  return kvm_skip_emulated_instruction(vcpu);
5857  }
5858 
5859  /*
5860  * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5861  * linear mappings for L2 (tagged with L2's VPID). Free all guest
5862  * roots as VPIDs are not tracked in the MMU role.
5863  *
5864  * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5865  * an MMU when EPT is disabled.
5866  *
5867  * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5868  */
5869  if (!enable_ept)
5870  kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5871 
5872  return nested_vmx_succeed(vcpu);
5873 }
bool __read_mostly enable_ept
Definition: vmx.c:91
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
Definition: mmu.c:3643
static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
Definition: nested.h:64
#define VMX_VPID_EXTENT_SUPPORTED_MASK
Definition: nested.c:33
static void vpid_sync_vcpu_addr(int vpid, gva_t addr)
Definition: vmx_ops.h:345
static void vpid_sync_context(int vpid)
Definition: vmx_ops.h:337
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
Definition: x86.c:8916
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmclear()

static int handle_vmclear ( struct kvm_vcpu *  vcpu)
static

Definition at line 5323 of file nested.c.

5324 {
5325  struct vcpu_vmx *vmx = to_vmx(vcpu);
5326  u32 zero = 0;
5327  gpa_t vmptr;
5328  int r;
5329 
5331  return 1;
5332 
5333  if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5334  return r;
5335 
5336  if (!page_address_valid(vcpu, vmptr))
5337  return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5338 
5339  if (vmptr == vmx->nested.vmxon_ptr)
5340  return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
5341 
5342  if (likely(!nested_evmcs_handle_vmclear(vcpu, vmptr))) {
5343  if (vmptr == vmx->nested.current_vmptr)
5345 
5346  /*
5347  * Silently ignore memory errors on VMCLEAR, Intel's pseudocode
5348  * for VMCLEAR includes a "ensure that data for VMCS referenced
5349  * by the operand is in memory" clause that guards writes to
5350  * memory, i.e. doing nothing for I/O is architecturally valid.
5351  *
5352  * FIXME: Suppress failures if and only if no memslot is found,
5353  * i.e. exit to userspace if __copy_to_user() fails.
5354  */
5355  (void)kvm_vcpu_write_guest(vcpu,
5356  vmptr + offsetof(struct vmcs12,
5357  launch_state),
5358  &zero, sizeof(zero));
5359  }
5360 
5361  return nested_vmx_succeed(vcpu);
5362 }
static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
Definition: cpuid.h:56
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
Definition: kvm_main.c:3470
static void nested_release_vmcs12(struct kvm_vcpu *vcpu)
Definition: nested.c:5281
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, int *ret)
Definition: nested.c:5088
static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
Definition: nested.c:248
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmfunc()

static int handle_vmfunc ( struct kvm_vcpu *  vcpu)
static

Definition at line 5908 of file nested.c.

5909 {
5910  struct vcpu_vmx *vmx = to_vmx(vcpu);
5911  struct vmcs12 *vmcs12;
5912  u32 function = kvm_rax_read(vcpu);
5913 
5914  /*
5915  * VMFUNC should never execute cleanly while L1 is active; KVM supports
5916  * VMFUNC for nested VMs, but not for L1.
5917  */
5918  if (WARN_ON_ONCE(!is_guest_mode(vcpu))) {
5919  kvm_queue_exception(vcpu, UD_VECTOR);
5920  return 1;
5921  }
5922 
5923  vmcs12 = get_vmcs12(vcpu);
5924 
5925  /*
5926  * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5927  * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5928  */
5929  if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5930  kvm_queue_exception(vcpu, UD_VECTOR);
5931  return 1;
5932  }
5933 
5934  if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5935  goto fail;
5936 
5937  switch (function) {
5938  case 0:
5939  if (nested_vmx_eptp_switching(vcpu, vmcs12))
5940  goto fail;
5941  break;
5942  default:
5943  goto fail;
5944  }
5945  return kvm_skip_emulated_instruction(vcpu);
5946 
5947 fail:
5948  /*
5949  * This is effectively a reflected VM-Exit, as opposed to a synthesized
5950  * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5951  * EXIT_REASON_VMFUNC as the exit reason.
5952  */
5953  nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
5954  vmx_get_intr_info(vcpu),
5955  vmx_get_exit_qual(vcpu));
5956  return 1;
5957 }
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
Definition: nested.h:205
union vmx_exit_reason exit_reason
Definition: vmx.h:320
u64 vm_function_control
Definition: vmcs12.h:69
u32 full
Definition: vmx.h:93
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
Definition: nested.c:4767
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:5875
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
Definition: vmx.h:691
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmlaunch()

static int handle_vmlaunch ( struct kvm_vcpu *  vcpu)
static

Definition at line 5365 of file nested.c.

5366 {
5367  return nested_vmx_run(vcpu, true);
5368 }
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
Definition: nested.c:3592
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmptrld()

static int handle_vmptrld ( struct kvm_vcpu *  vcpu)
static

Definition at line 5604 of file nested.c.

5605 {
5606  struct vcpu_vmx *vmx = to_vmx(vcpu);
5607  gpa_t vmptr;
5608  int r;
5609 
5611  return 1;
5612 
5613  if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5614  return r;
5615 
5616  if (!page_address_valid(vcpu, vmptr))
5617  return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5618 
5619  if (vmptr == vmx->nested.vmxon_ptr)
5620  return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
5621 
5622  /* Forbid normal VMPTRLD if Enlightened version was used */
5624  return 1;
5625 
5626  if (vmx->nested.current_vmptr != vmptr) {
5627  struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5628  struct vmcs_hdr hdr;
5629 
5630  if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
5631  /*
5632  * Reads from an unbacked page return all 1s,
5633  * which means that the 32 bits located at the
5634  * given physical address won't match the required
5635  * VMCS12_REVISION identifier.
5636  */
5637  return nested_vmx_fail(vcpu,
5638  VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5639  }
5640 
5641  if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
5642  offsetof(struct vmcs12, hdr),
5643  sizeof(hdr))) {
5644  return nested_vmx_fail(vcpu,
5645  VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5646  }
5647 
5648  if (hdr.revision_id != VMCS12_REVISION ||
5649  (hdr.shadow_vmcs &&
5651  return nested_vmx_fail(vcpu,
5652  VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5653  }
5654 
5655  nested_release_vmcs12(vcpu);
5656 
5657  /*
5658  * Load VMCS12 from guest memory since it is not already
5659  * cached.
5660  */
5661  if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5662  VMCS12_SIZE)) {
5663  return nested_vmx_fail(vcpu,
5664  VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5665  }
5666 
5667  set_current_vmptr(vmx, vmptr);
5668  }
5669 
5670  return nested_vmx_succeed(vcpu);
5671 }
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
Definition: kvm_main.c:3608
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len)
Definition: kvm_main.c:3532
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned int offset, unsigned long len)
Definition: kvm_main.c:3578
static bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
Definition: nested.h:126
struct gfn_to_hva_cache vmcs12_cache
Definition: vmx.h:145
Definition: vmcs.h:16
#define VMCS12_REVISION
Definition: vmcs12.h:198
static bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
Definition: hyperv.h:69
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
Definition: nested.c:5590
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmptrst()

static int handle_vmptrst ( struct kvm_vcpu *  vcpu)
static

Definition at line 5674 of file nested.c.

5675 {
5676  unsigned long exit_qual = vmx_get_exit_qual(vcpu);
5677  u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5678  gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5679  struct x86_exception e;
5680  gva_t gva;
5681  int r;
5682 
5683  if (!nested_vmx_check_permission(vcpu))
5684  return 1;
5685 
5686  if (unlikely(nested_vmx_is_evmptr12_valid(to_vmx(vcpu))))
5687  return 1;
5688 
5689  if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5690  true, sizeof(gpa_t), &gva))
5691  return 1;
5692  /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5693  r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
5694  sizeof(gpa_t), &e);
5695  if (r != X86EMUL_CONTINUE)
5696  return kvm_handle_memory_failure(vcpu, r, &e);
5697 
5698  return nested_vmx_succeed(vcpu);
5699 }
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
Definition: x86.c:7651
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmread()

static int handle_vmread ( struct kvm_vcpu *  vcpu)
static

Definition at line 5377 of file nested.c.

5378 {
5379  struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5380  : get_vmcs12(vcpu);
5381  unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5382  u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5383  struct vcpu_vmx *vmx = to_vmx(vcpu);
5384  struct x86_exception e;
5385  unsigned long field;
5386  u64 value;
5387  gva_t gva = 0;
5388  short offset;
5389  int len, r;
5390 
5391  if (!nested_vmx_check_permission(vcpu))
5392  return 1;
5393 
5394  /* Decode instruction info and find the field to read */
5395  field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5396 
5397  if (!nested_vmx_is_evmptr12_valid(vmx)) {
5398  /*
5399  * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5400  * any VMREAD sets the ALU flags for VMfailInvalid.
5401  */
5402  if (vmx->nested.current_vmptr == INVALID_GPA ||
5403  (is_guest_mode(vcpu) &&
5404  get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5405  return nested_vmx_failInvalid(vcpu);
5406 
5407  offset = get_vmcs12_field_offset(field);
5408  if (offset < 0)
5409  return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5410 
5411  if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5413 
5414  /* Read the field, zero-extended to a u64 value */
5415  value = vmcs12_read_any(vmcs12, field, offset);
5416  } else {
5417  /*
5418  * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
5419  * enlightened VMCS is active VMREAD/VMWRITE instructions are
5420  * unsupported. Unfortunately, certain versions of Windows 11
5421  * don't comply with this requirement which is not enforced in
5422  * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
5423  * workaround, as misbehaving guests will panic on VM-Fail.
5424  * Note, enlightened VMCS is incompatible with shadow VMCS so
5425  * all VMREADs from L2 should go to L1.
5426  */
5427  if (WARN_ON_ONCE(is_guest_mode(vcpu)))
5428  return nested_vmx_failInvalid(vcpu);
5429 
5430  offset = evmcs_field_offset(field, NULL);
5431  if (offset < 0)
5432  return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5433 
5434  /* Read the field, zero-extended to a u64 value */
5435  value = evmcs_read_any(nested_vmx_evmcs(vmx), field, offset);
5436  }
5437 
5438  /*
5439  * Now copy part of this value to register or memory, as requested.
5440  * Note that the number of bits actually copied is 32 or 64 depending
5441  * on the guest's mode (32 or 64 bit), not on the given field's length.
5442  */
5443  if (instr_info & BIT(10)) {
5444  kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
5445  } else {
5446  len = is_64_bit_mode(vcpu) ? 8 : 4;
5447  if (get_vmx_mem_address(vcpu, exit_qualification,
5448  instr_info, true, len, &gva))
5449  return 1;
5450  /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5451  r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5452  if (r != X86EMUL_CONTINUE)
5453  return kvm_handle_memory_failure(vcpu, r, &e);
5454  }
5455 
5456  return nested_vmx_succeed(vcpu);
5457 }
static __always_inline int evmcs_field_offset(unsigned long field, u16 *clean_field)
Definition: hyperv_evmcs.h:129
static u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs, unsigned long field, u16 offset)
Definition: hyperv_evmcs.h:154
static struct vmcs12 * get_shadow_vmcs12(struct kvm_vcpu *vcpu)
Definition: nested.h:45
static short get_vmcs12_field_offset(unsigned long field)
Definition: vmcs12.h:366
static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
Definition: nested.c:160
static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:4343
static bool is_vmcs12_ext_field(unsigned long field)
Definition: nested.c:4251
static void kvm_register_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
Definition: x86.h:280
static bool is_64_bit_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:152
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmresume()

static int handle_vmresume ( struct kvm_vcpu *  vcpu)
static

Definition at line 5371 of file nested.c.

5372 {
5373 
5374  return nested_vmx_run(vcpu, false);
5375 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmwrite()

static int handle_vmwrite ( struct kvm_vcpu *  vcpu)
static

Definition at line 5483 of file nested.c.

5484 {
5485  struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5486  : get_vmcs12(vcpu);
5487  unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5488  u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5489  struct vcpu_vmx *vmx = to_vmx(vcpu);
5490  struct x86_exception e;
5491  unsigned long field;
5492  short offset;
5493  gva_t gva;
5494  int len, r;
5495 
5496  /*
5497  * The value to write might be 32 or 64 bits, depending on L1's long
5498  * mode, and eventually we need to write that into a field of several
5499  * possible lengths. The code below first zero-extends the value to 64
5500  * bit (value), and then copies only the appropriate number of
5501  * bits into the vmcs12 field.
5502  */
5503  u64 value = 0;
5504 
5505  if (!nested_vmx_check_permission(vcpu))
5506  return 1;
5507 
5508  /*
5509  * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5510  * any VMWRITE sets the ALU flags for VMfailInvalid.
5511  */
5512  if (vmx->nested.current_vmptr == INVALID_GPA ||
5513  (is_guest_mode(vcpu) &&
5514  get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5515  return nested_vmx_failInvalid(vcpu);
5516 
5517  if (instr_info & BIT(10))
5518  value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
5519  else {
5520  len = is_64_bit_mode(vcpu) ? 8 : 4;
5521  if (get_vmx_mem_address(vcpu, exit_qualification,
5522  instr_info, false, len, &gva))
5523  return 1;
5524  r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5525  if (r != X86EMUL_CONTINUE)
5526  return kvm_handle_memory_failure(vcpu, r, &e);
5527  }
5528 
5529  field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5530 
5531  offset = get_vmcs12_field_offset(field);
5532  if (offset < 0)
5533  return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5534 
5535  /*
5536  * If the vCPU supports "VMWRITE to any supported field in the
5537  * VMCS," then the "read-only" fields are actually read/write.
5538  */
5539  if (vmcs_field_readonly(field) &&
5541  return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5542 
5543  /*
5544  * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5545  * vmcs12, else we may crush a field or consume a stale value.
5546  */
5547  if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5549 
5550  /*
5551  * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5552  * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5553  * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5554  * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5555  * from L1 will return a different value than VMREAD from L2 (L1 sees
5556  * the stripped down value, L2 sees the full value as stored by KVM).
5557  */
5558  if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
5559  value &= 0x1f0ff;
5560 
5561  vmcs12_write_any(vmcs12, field, offset, value);
5562 
5563  /*
5564  * Do not track vmcs12 dirty-state if in guest-mode as we actually
5565  * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5566  * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5567  * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5568  */
5569  if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5570  /*
5571  * L1 can read these fields without exiting, ensure the
5572  * shadow VMCS is up-to-date.
5573  */
5574  if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5575  preempt_disable();
5577 
5578  __vmcs_writel(field, value);
5579 
5581  vmcs_load(vmx->loaded_vmcs->vmcs);
5582  preempt_enable();
5583  }
5584  vmx->nested.dirty_vmcs12 = true;
5585  }
5586 
5587  return nested_vmx_succeed(vcpu);
5588 }
static bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
Definition: nested.h:109
bool dirty_vmcs12
Definition: vmx.h:152
static int vmcs_field_readonly(unsigned long field)
Definition: vmcs.h:180
static bool is_shadow_field_ro(unsigned long field)
Definition: nested.c:5471
static bool is_shadow_field_rw(unsigned long field)
Definition: nested.c:5459
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmxoff()

static int handle_vmxoff ( struct kvm_vcpu *  vcpu)
static

Definition at line 5309 of file nested.c.

5310 {
5311  if (!nested_vmx_check_permission(vcpu))
5312  return 1;
5313 
5314  free_nested(vcpu);
5315 
5317  kvm_make_request(KVM_REQ_EVENT, vcpu);
5318 
5319  return nested_vmx_succeed(vcpu);
5320 }
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
Definition: lapic.h:231
static void free_nested(struct kvm_vcpu *vcpu)
Definition: nested.c:323
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_vmxon()

static int handle_vmxon ( struct kvm_vcpu *  vcpu)
static

Definition at line 5190 of file nested.c.

5191 {
5192  int ret;
5193  gpa_t vmptr;
5194  uint32_t revision;
5195  struct vcpu_vmx *vmx = to_vmx(vcpu);
5196  const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
5197  | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
5198 
5199  /*
5200  * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter
5201  * the guest and so cannot rely on hardware to perform the check,
5202  * which has higher priority than VM-Exit (see Intel SDM's pseudocode
5203  * for VMXON).
5204  *
5205  * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86
5206  * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't
5207  * force any of the relevant guest state. For a restricted guest, KVM
5208  * does force CR0.PE=1, but only to also force VM86 in order to emulate
5209  * Real Mode, and so there's no need to check CR0.PE manually.
5210  */
5211  if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
5212  kvm_queue_exception(vcpu, UD_VECTOR);
5213  return 1;
5214  }
5215 
5216  /*
5217  * The CPL is checked for "not in VMX operation" and for "in VMX root",
5218  * and has higher priority than the VM-Fail due to being post-VMXON,
5219  * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root,
5220  * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits
5221  * from L2 to L1, i.e. there's no need to check for the vCPU being in
5222  * VMX non-root.
5223  *
5224  * Forwarding the VM-Exit unconditionally, i.e. without performing the
5225  * #UD checks (see above), is functionally ok because KVM doesn't allow
5226  * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's
5227  * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are
5228  * missed by hardware due to shadowing CR0 and/or CR4.
5229  */
5230  if (vmx_get_cpl(vcpu)) {
5231  kvm_inject_gp(vcpu, 0);
5232  return 1;
5233  }
5234 
5235  if (vmx->nested.vmxon)
5236  return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
5237 
5238  /*
5239  * Invalid CR0/CR4 generates #GP. These checks are performed if and
5240  * only if the vCPU isn't already in VMX operation, i.e. effectively
5241  * have lower priority than the VM-Fail above.
5242  */
5245  kvm_inject_gp(vcpu, 0);
5246  return 1;
5247  }
5248 
5249  if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5250  != VMXON_NEEDED_FEATURES) {
5251  kvm_inject_gp(vcpu, 0);
5252  return 1;
5253  }
5254 
5255  if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
5256  return ret;
5257 
5258  /*
5259  * SDM 3: 24.11.5
5260  * The first 4 bytes of VMXON region contain the supported
5261  * VMCS revision identifier
5262  *
5263  * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
5264  * which replaces physical address width with 32
5265  */
5266  if (!page_address_valid(vcpu, vmptr))
5267  return nested_vmx_failInvalid(vcpu);
5268 
5269  if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
5270  revision != VMCS12_REVISION)
5271  return nested_vmx_failInvalid(vcpu);
5272 
5273  vmx->nested.vmxon_ptr = vmptr;
5274  ret = enter_vmx_operation(vcpu);
5275  if (ret)
5276  return ret;
5277 
5278  return nested_vmx_succeed(vcpu);
5279 }
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
Definition: kvm_main.c:3346
#define nested_host_cr4_valid
Definition: nested.h:290
static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
Definition: nested.h:271
u64 msr_ia32_feature_control
Definition: vmx.h:351
static int enter_vmx_operation(struct kvm_vcpu *vcpu)
Definition: nested.c:5139
int vmx_get_cpl(struct kvm_vcpu *vcpu)
Definition: vmx.c:3543
Here is the call graph for this function:
Here is the caller graph for this function:

◆ init_vmcs_shadow_fields()

static void init_vmcs_shadow_fields ( void  )
static

Definition at line 69 of file nested.c.

70 {
71  int i, j;
72 
73  memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
74  memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
75 
76  for (i = j = 0; i < max_shadow_read_only_fields; i++) {
78  u16 field = entry.encoding;
79 
80  if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
81  (i + 1 == max_shadow_read_only_fields ||
82  shadow_read_only_fields[i + 1].encoding != field + 1))
83  pr_err("Missing field from shadow_read_only_field %x\n",
84  field + 1);
85 
86  clear_bit(field, vmx_vmread_bitmap);
87  if (field & 1)
88 #ifdef CONFIG_X86_64
89  continue;
90 #else
91  entry.offset += sizeof(u32);
92 #endif
93  shadow_read_only_fields[j++] = entry;
94  }
96 
97  for (i = j = 0; i < max_shadow_read_write_fields; i++) {
99  u16 field = entry.encoding;
100 
101  if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
102  (i + 1 == max_shadow_read_write_fields ||
103  shadow_read_write_fields[i + 1].encoding != field + 1))
104  pr_err("Missing field from shadow_read_write_field %x\n",
105  field + 1);
106 
107  WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
108  field <= GUEST_TR_AR_BYTES,
109  "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
110 
111  /*
112  * PML and the preemption timer can be emulated, but the
113  * processor cannot vmwrite to fields that don't exist
114  * on bare metal.
115  */
116  switch (field) {
117  case GUEST_PML_INDEX:
118  if (!cpu_has_vmx_pml())
119  continue;
120  break;
121  case VMX_PREEMPTION_TIMER_VALUE:
123  continue;
124  break;
125  case GUEST_INTR_STATUS:
126  if (!cpu_has_vmx_apicv())
127  continue;
128  break;
129  default:
130  break;
131  }
132 
133  clear_bit(field, vmx_vmwrite_bitmap);
134  clear_bit(field, vmx_vmread_bitmap);
135  if (field & 1)
136 #ifdef CONFIG_X86_64
137  continue;
138 #else
139  entry.offset += sizeof(u32);
140 #endif
141  shadow_read_write_fields[j++] = entry;
142  }
144 }
static bool cpu_has_vmx_apicv(void)
Definition: capabilities.h:276
static bool cpu_has_vmx_pml(void)
Definition: capabilities.h:247
static bool cpu_has_vmx_preemption_timer(void)
Definition: capabilities.h:88
vmcs_field_width
Definition: vmcs.h:166
@ VMCS_FIELD_WIDTH_U64
Definition: vmcs.h:168
#define vmx_vmwrite_bitmap
Definition: nested.c:49
#define vmx_vmread_bitmap
Definition: nested.c:48
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_bitwise_subset()

static bool is_bitwise_subset ( u64  superset,
u64  subset,
u64  mask 
)
static

Definition at line 1221 of file nested.c.

1222 {
1223  superset &= mask;
1224  subset &= mask;
1225 
1226  return (superset | subset) == superset;
1227 }
Here is the caller graph for this function:

◆ is_shadow_field_ro()

static bool is_shadow_field_ro ( unsigned long  field)
static

Definition at line 5471 of file nested.c.

5472 {
5473  switch (field) {
5474 #define SHADOW_FIELD_RO(x, y) case x:
5475 #include "vmcs_shadow_fields.h"
5476  return true;
5477  default:
5478  break;
5479  }
5480  return false;
5481 }
Here is the caller graph for this function:

◆ is_shadow_field_rw()

static bool is_shadow_field_rw ( unsigned long  field)
static

Definition at line 5459 of file nested.c.

5460 {
5461  switch (field) {
5462 #define SHADOW_FIELD_RW(x, y) case x:
5463 #include "vmcs_shadow_fields.h"
5464  return true;
5465  default:
5466  break;
5467  }
5468  return false;
5469 }
Here is the caller graph for this function:

◆ is_vmcs12_ext_field()

static bool is_vmcs12_ext_field ( unsigned long  field)
static

Definition at line 4251 of file nested.c.

4252 {
4253  switch (field) {
4254  case GUEST_ES_SELECTOR:
4255  case GUEST_CS_SELECTOR:
4256  case GUEST_SS_SELECTOR:
4257  case GUEST_DS_SELECTOR:
4258  case GUEST_FS_SELECTOR:
4259  case GUEST_GS_SELECTOR:
4260  case GUEST_LDTR_SELECTOR:
4261  case GUEST_TR_SELECTOR:
4262  case GUEST_ES_LIMIT:
4263  case GUEST_CS_LIMIT:
4264  case GUEST_SS_LIMIT:
4265  case GUEST_DS_LIMIT:
4266  case GUEST_FS_LIMIT:
4267  case GUEST_GS_LIMIT:
4268  case GUEST_LDTR_LIMIT:
4269  case GUEST_TR_LIMIT:
4270  case GUEST_GDTR_LIMIT:
4271  case GUEST_IDTR_LIMIT:
4272  case GUEST_ES_AR_BYTES:
4273  case GUEST_DS_AR_BYTES:
4274  case GUEST_FS_AR_BYTES:
4275  case GUEST_GS_AR_BYTES:
4276  case GUEST_LDTR_AR_BYTES:
4277  case GUEST_TR_AR_BYTES:
4278  case GUEST_ES_BASE:
4279  case GUEST_CS_BASE:
4280  case GUEST_SS_BASE:
4281  case GUEST_DS_BASE:
4282  case GUEST_FS_BASE:
4283  case GUEST_GS_BASE:
4284  case GUEST_LDTR_BASE:
4285  case GUEST_TR_BASE:
4286  case GUEST_GDTR_BASE:
4287  case GUEST_IDTR_BASE:
4288  case GUEST_PENDING_DBG_EXCEPTIONS:
4289  case GUEST_BNDCFGS:
4290  return true;
4291  default:
4292  break;
4293  }
4294 
4295  return false;
4296 }
Here is the caller graph for this function:

◆ load_vmcs12_host_state()

static void load_vmcs12_host_state ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 4509 of file nested.c.

4511 {
4512  enum vm_entry_failure_code ignored;
4513  struct kvm_segment seg;
4514 
4515  if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4516  vcpu->arch.efer = vmcs12->host_ia32_efer;
4517  else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4518  vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4519  else
4520  vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4521  vmx_set_efer(vcpu, vcpu->arch.efer);
4522 
4523  kvm_rsp_write(vcpu, vmcs12->host_rsp);
4524  kvm_rip_write(vcpu, vmcs12->host_rip);
4525  vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4526  vmx_set_interrupt_shadow(vcpu, 0);
4527 
4528  /*
4529  * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4530  * actually changed, because vmx_set_cr0 refers to efer set above.
4531  *
4532  * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4533  * (KVM doesn't change it);
4534  */
4535  vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4536  vmx_set_cr0(vcpu, vmcs12->host_cr0);
4537 
4538  /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4539  vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4540  vmx_set_cr4(vcpu, vmcs12->host_cr4);
4541 
4543 
4544  /*
4545  * Only PDPTE load can fail as the value of cr3 was checked on entry and
4546  * couldn't have changed.
4547  */
4548  if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
4549  nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4550 
4552 
4553  vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4554  vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4555  vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4556  vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4557  vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4558  vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4559  vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4560 
4561  /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4562  if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4563  vmcs_write64(GUEST_BNDCFGS, 0);
4564 
4565  if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4566  vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4567  vcpu->arch.pat = vmcs12->host_ia32_pat;
4568  }
4569  if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
4571  WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4573 
4574  /* Set L1 segment info according to Intel SDM
4575  27.5.2 Loading Host Segment and Descriptor-Table Registers */
4576  seg = (struct kvm_segment) {
4577  .base = 0,
4578  .limit = 0xFFFFFFFF,
4579  .selector = vmcs12->host_cs_selector,
4580  .type = 11,
4581  .present = 1,
4582  .s = 1,
4583  .g = 1
4584  };
4585  if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4586  seg.l = 1;
4587  else
4588  seg.db = 1;
4589  __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
4590  seg = (struct kvm_segment) {
4591  .base = 0,
4592  .limit = 0xFFFFFFFF,
4593  .type = 3,
4594  .present = 1,
4595  .s = 1,
4596  .db = 1,
4597  .g = 1
4598  };
4599  seg.selector = vmcs12->host_ds_selector;
4600  __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
4601  seg.selector = vmcs12->host_es_selector;
4602  __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
4603  seg.selector = vmcs12->host_ss_selector;
4604  __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
4605  seg.selector = vmcs12->host_fs_selector;
4606  seg.base = vmcs12->host_fs_base;
4607  __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
4608  seg.selector = vmcs12->host_gs_selector;
4609  seg.base = vmcs12->host_gs_base;
4610  __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
4611  seg = (struct kvm_segment) {
4612  .base = vmcs12->host_tr_base,
4613  .limit = 0x67,
4614  .selector = vmcs12->host_tr_selector,
4615  .type = 11,
4616  .present = 1
4617  };
4618  __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
4619 
4620  memset(&seg, 0, sizeof(seg));
4621  seg.unusable = 1;
4622  __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
4623 
4624  kvm_set_dr(vcpu, 7, 0x400);
4625  vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4626 
4629  nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4630 
4632 }
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
#define vcpu_to_pmu(vcpu)
Definition: pmu.h:7
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
Definition: pmu.h:44
bool emulation_required
Definition: vmx.h:318
u64 vm_exit_msr_load_addr
Definition: vmcs12.h:41
u32 vm_exit_msr_load_count
Definition: vmcs12.h:129
static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool is_vmenter)
Definition: nested.c:1167
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
Definition: nested.c:938
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code)
Definition: nested.c:1115
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
Definition: nested.c:203
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
Definition: nested.c:464
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: vmx.c:3572
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
Definition: vmx.c:1504
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
Definition: vmx.c:3115
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Definition: vmx.c:3275
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Definition: vmx.c:3432
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
Definition: vmx.c:1574
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
Definition: vmx.c:1527
static unsigned long vmx_l1_guest_owned_cr0_bits(void)
Definition: vmx.h:634
static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
Definition: vmx_ops.h:258
static __always_inline void vmcs_write32(unsigned long field, u32 value)
Definition: vmx_ops.h:237
static __always_inline void vmcs_write64(unsigned long field, u64 value)
Definition: vmx_ops.h:246
static __always_inline unsigned long vmcs_readl(unsigned long field)
Definition: vmx_ops.h:181
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
Definition: x86.c:1373
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
Definition: x86.c:1987
Here is the call graph for this function:
Here is the caller graph for this function:

◆ module_param()

module_param ( nested_early_check  ,
bool  ,
S_IRUGO   
)

◆ module_param_named()

module_param_named ( enable_shadow_vmcs  ,
enable_shadow_vmcs  ,
bool  ,
S_IRUGO   
)

◆ nested_cache_shadow_vmcs12()

static void nested_cache_shadow_vmcs12 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 701 of file nested.c.

703 {
704  struct vcpu_vmx *vmx = to_vmx(vcpu);
705  struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
706 
708  vmcs12->vmcs_link_pointer == INVALID_GPA)
709  return;
710 
711  if (ghc->gpa != vmcs12->vmcs_link_pointer &&
712  kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
714  return;
715 
716  kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
717  VMCS12_SIZE);
718 }
static bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
Definition: nested.h:217
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_check_guest_non_reg_state()

static int nested_check_guest_non_reg_state ( struct vmcs12 vmcs12)
static

Definition at line 3048 of file nested.c.

3049 {
3050  if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
3051  vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
3052  vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
3053  return -EINVAL;
3054 
3055  return 0;
3056 }
#define CC
Definition: nested.c:27
Here is the caller graph for this function:

◆ nested_check_vm_entry_controls()

static int nested_check_vm_entry_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 2847 of file nested.c.

2849 {
2850  struct vcpu_vmx *vmx = to_vmx(vcpu);
2851 
2853  vmx->nested.msrs.entry_ctls_low,
2854  vmx->nested.msrs.entry_ctls_high)))
2855  return -EINVAL;
2856 
2857  /*
2858  * From the Intel SDM, volume 3:
2859  * Fields relevant to VM-entry event injection must be set properly.
2860  * These fields are the VM-entry interruption-information field, the
2861  * VM-entry exception error code, and the VM-entry instruction length.
2862  */
2863  if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2864  u32 intr_info = vmcs12->vm_entry_intr_info_field;
2865  u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2866  u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2867  bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2868  bool should_have_error_code;
2869  bool urg = nested_cpu_has2(vmcs12,
2870  SECONDARY_EXEC_UNRESTRICTED_GUEST);
2871  bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2872 
2873  /* VM-entry interruption-info field: interruption type */
2874  if (CC(intr_type == INTR_TYPE_RESERVED) ||
2875  CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2877  return -EINVAL;
2878 
2879  /* VM-entry interruption-info field: vector */
2880  if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2881  CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2882  CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2883  return -EINVAL;
2884 
2885  /* VM-entry interruption-info field: deliver error code */
2886  should_have_error_code =
2887  intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2889  if (CC(has_error_code != should_have_error_code))
2890  return -EINVAL;
2891 
2892  /* VM-entry exception error code */
2893  if (CC(has_error_code &&
2894  vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2895  return -EINVAL;
2896 
2897  /* VM-entry interruption-info field: reserved bits */
2898  if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2899  return -EINVAL;
2900 
2901  /* VM-entry instruction length */
2902  switch (intr_type) {
2903  case INTR_TYPE_SOFT_EXCEPTION:
2904  case INTR_TYPE_SOFT_INTR:
2905  case INTR_TYPE_PRIV_SW_EXCEPTION:
2906  if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2909  return -EINVAL;
2910  }
2911  }
2912 
2914  return -EINVAL;
2915 
2916  return 0;
2917 }
static bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
Definition: nested.h:120
static bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
Definition: nested.h:115
static bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
Definition: nested.h:137
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:831
static bool vmx_control_verify(u32 control, u32 low, u32 high)
Definition: nested.c:210
static bool x86_exception_has_error_code(unsigned int vector)
Definition: x86.h:174
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_check_vm_execution_controls()

static int nested_check_vm_execution_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 2771 of file nested.c.

2773 {
2774  struct vcpu_vmx *vmx = to_vmx(vcpu);
2775 
2778  vmx->nested.msrs.pinbased_ctls_high)) ||
2782  return -EINVAL;
2783 
2784  if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2788  return -EINVAL;
2789 
2802  return -EINVAL;
2803 
2806  return -EINVAL;
2807 
2808  if (nested_cpu_has_ept(vmcs12) &&
2810  return -EINVAL;
2811 
2814  ~vmx->nested.msrs.vmfunc_controls))
2815  return -EINVAL;
2816 
2818  if (CC(!nested_cpu_has_ept(vmcs12)) ||
2820  return -EINVAL;
2821  }
2822  }
2823 
2824  return 0;
2825 }
static bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
Definition: nested.h:144
static bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
Definition: nested.h:222
static unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
Definition: nested.h:99
static bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
Definition: nested.h:185
static bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
Definition: nested.h:210
static bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
Definition: nested.h:132
static int nested_cpu_has_ept(struct vmcs12 *vmcs12)
Definition: nested.h:165
u64 eptp_list_address
Definition: vmcs12.h:70
u32 cr3_target_count
Definition: vmcs12.h:126
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:842
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:759
static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:749
static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:873
static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:864
static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:855
static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:513
static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
Definition: nested.c:2710
static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:525
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:500
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_check_vm_exit_controls()

static int nested_check_vm_exit_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 2830 of file nested.c.

2832 {
2833  struct vcpu_vmx *vmx = to_vmx(vcpu);
2834 
2836  vmx->nested.msrs.exit_ctls_low,
2837  vmx->nested.msrs.exit_ctls_high)) ||
2839  return -EINVAL;
2840 
2841  return 0;
2842 }
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:817
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_init_mmu_context()

static void nested_ept_init_mmu_context ( struct kvm_vcpu *  vcpu)
static

Definition at line 451 of file nested.c.

452 {
453  WARN_ON(mmu_is_nested(vcpu));
454 
455  vcpu->arch.mmu = &vcpu->arch.guest_mmu;
456  nested_ept_new_eptp(vcpu);
457  vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
458  vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
459  vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
460 
461  vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
462 }
static u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
static unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
Definition: nested.h:71
static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
Definition: nested.c:440
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
Definition: nested.c:407
static bool mmu_is_nested(struct kvm_vcpu *vcpu)
Definition: x86.h:183
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_inject_page_fault()

static void nested_ept_inject_page_fault ( struct kvm_vcpu *  vcpu,
struct x86_exception fault 
)
static

Definition at line 407 of file nested.c.

409 {
410  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
411  struct vcpu_vmx *vmx = to_vmx(vcpu);
412  u32 vm_exit_reason;
413  unsigned long exit_qualification = vcpu->arch.exit_qualification;
414 
415  if (vmx->nested.pml_full) {
416  vm_exit_reason = EXIT_REASON_PML_FULL;
417  vmx->nested.pml_full = false;
418  exit_qualification &= INTR_INFO_UNBLOCK_NMI;
419  } else {
420  if (fault->error_code & PFERR_RSVD_MASK)
421  vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
422  else
423  vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
424 
425  /*
426  * Although the caller (kvm_inject_emulated_page_fault) would
427  * have already synced the faulting address in the shadow EPT
428  * tables for the current EPTP12, we also need to sync it for
429  * any other cached EPTP02s based on the same EP4TA, since the
430  * TLB associates mappings to the EP4TA rather than the full EPTP.
431  */
433  fault->address);
434  }
435 
436  nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
438 }
bool pml_full
Definition: vmx.h:120
unsigned long exit_qualification
Definition: vmx.h:265
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, gpa_t addr)
Definition: nested.c:387
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_invalidate_addr()

static void nested_ept_invalidate_addr ( struct kvm_vcpu *  vcpu,
gpa_t  eptp,
gpa_t  addr 
)
static

Definition at line 387 of file nested.c.

389 {
390  unsigned long roots = 0;
391  uint i;
392  struct kvm_mmu_root_info *cached_root;
393 
394  WARN_ON_ONCE(!mmu_is_nested(vcpu));
395 
396  for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
397  cached_root = &vcpu->arch.mmu->prev_roots[i];
398 
399  if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
400  eptp))
401  roots |= KVM_MMU_ROOT_PREVIOUS(i);
402  }
403  if (roots)
404  kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots);
405 }
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots)
Definition: mmu.c:5939
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_new_eptp()

static void nested_ept_new_eptp ( struct kvm_vcpu *  vcpu)
static

Definition at line 440 of file nested.c.

441 {
442  struct vcpu_vmx *vmx = to_vmx(vcpu);
443  bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
444  int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
445 
446  kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,
449 }
static int ept_caps_to_lpage_level(u32 ept_caps)
Definition: capabilities.h:324
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp)
Definition: mmu.c:5458
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
Definition: nested.h:77
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_root_matches()

static bool nested_ept_root_matches ( hpa_t  root_hpa,
u64  root_eptp,
u64  eptp 
)
static

Definition at line 381 of file nested.c.

382 {
383  return VALID_PAGE(root_hpa) &&
384  ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
385 }
#define EPTP_PA_MASK
Definition: nested.c:379
Here is the caller graph for this function:

◆ nested_ept_uninit_mmu_context()

static void nested_ept_uninit_mmu_context ( struct kvm_vcpu *  vcpu)
static

Definition at line 464 of file nested.c.

465 {
466  vcpu->arch.mmu = &vcpu->arch.root_mmu;
467  vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
468 }
Here is the caller graph for this function:

◆ nested_evmcs_handle_vmclear()

static bool nested_evmcs_handle_vmclear ( struct kvm_vcpu *  vcpu,
gpa_t  vmptr 
)
static

Definition at line 248 of file nested.c.

249 {
250 #ifdef CONFIG_KVM_HYPERV
251  struct vcpu_vmx *vmx = to_vmx(vcpu);
252  /*
253  * When Enlightened VMEntry is enabled on the calling CPU we treat
254  * memory area pointer by vmptr as Enlightened VMCS (as there's no good
255  * way to distinguish it from VMCS12) and we must not corrupt it by
256  * writing to the non-existent 'launch_state' field. The area doesn't
257  * have to be the currently active EVMCS on the calling CPU and there's
258  * nothing KVM has to do to transition it from 'active' to 'non-active'
259  * state. It is possible that the area will stay mapped as
260  * vmx->nested.hv_evmcs but this shouldn't be a problem.
261  */
262  if (!guest_cpuid_has_evmcs(vcpu) ||
264  return false;
265 
266  if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr)
268 
269  return true;
270 #else
271  return false;
272 #endif
273 }
u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
Definition: hyperv.c:16
static bool evmptr_is_valid(u64 evmptr)
Definition: hyperv.h:64
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_exit_intr_ack_set()

static bool nested_exit_intr_ack_set ( struct kvm_vcpu *  vcpu)
static

Definition at line 743 of file nested.c.

744 {
745  return get_vmcs12(vcpu)->vm_exit_controls &
746  VM_EXIT_ACK_INTR_ON_EXIT;
747 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_flush_cached_shadow_vmcs12()

static void nested_flush_cached_shadow_vmcs12 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 720 of file nested.c.

722 {
723  struct vcpu_vmx *vmx = to_vmx(vcpu);
724  struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
725 
727  vmcs12->vmcs_link_pointer == INVALID_GPA)
728  return;
729 
730  if (ghc->gpa != vmcs12->vmcs_link_pointer &&
731  kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
733  return;
734 
735  kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
736  VMCS12_SIZE);
737 }
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
Definition: kvm_main.c:3571
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_get_vmcs12_pages()

static bool nested_get_vmcs12_pages ( struct kvm_vcpu *  vcpu)
static

Definition at line 3232 of file nested.c.

3233 {
3234  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3235  struct vcpu_vmx *vmx = to_vmx(vcpu);
3236  struct kvm_host_map *map;
3237 
3238  if (!vcpu->arch.pdptrs_from_userspace &&
3240  /*
3241  * Reload the guest's PDPTRs since after a migration
3242  * the guest CR3 might be restored prior to setting the nested
3243  * state which can lead to a load of wrong PDPTRs.
3244  */
3245  if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
3246  return false;
3247  }
3248 
3249 
3250  if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3251  map = &vmx->nested.apic_access_page_map;
3252 
3253  if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) {
3254  vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn));
3255  } else {
3256  pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n",
3257  __func__);
3258  vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3259  vcpu->run->internal.suberror =
3260  KVM_INTERNAL_ERROR_EMULATION;
3261  vcpu->run->internal.ndata = 0;
3262  return false;
3263  }
3264  }
3265 
3266  if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3267  map = &vmx->nested.virtual_apic_map;
3268 
3269  if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3270  vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3271  } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3272  nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3273  !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3274  /*
3275  * The processor will never use the TPR shadow, simply
3276  * clear the bit from the execution control. Such a
3277  * configuration is useless, but it happens in tests.
3278  * For any other configuration, failing the vm entry is
3279  * _not_ what the processor does but it's basically the
3280  * only possibility we have.
3281  */
3282  exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3283  } else {
3284  /*
3285  * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3286  * force VM-Entry to fail.
3287  */
3288  vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA);
3289  }
3290  }
3291 
3293  map = &vmx->nested.pi_desc_map;
3294 
3295  if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3296  vmx->nested.pi_desc =
3297  (struct pi_desc *)(((void *)map->hva) +
3298  offset_in_page(vmcs12->posted_intr_desc_addr));
3299  vmcs_write64(POSTED_INTR_DESC_ADDR,
3300  pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3301  } else {
3302  /*
3303  * Defer the KVM_INTERNAL_EXIT until KVM tries to
3304  * access the contents of the VMCS12 posted interrupt
3305  * descriptor. (Note that KVM may do this when it
3306  * should not, per the architectural specification.)
3307  */
3308  vmx->nested.pi_desc = NULL;
3309  pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
3310  }
3311  }
3313  exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3314  else
3315  exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3316 
3317  return true;
3318 }
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
Definition: kvm_main.c:3152
static bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
Definition: nested.h:200
u64 posted_intr_desc_addr
Definition: vmcs12.h:46
u64 apic_access_addr
Definition: vmcs12.h:45
static bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:597
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
Definition: x86.c:866
static bool is_pae_paging(struct kvm_vcpu *vcpu)
Definition: x86.h:203
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_has_guest_tlb_tag()

static bool nested_has_guest_tlb_tag ( struct kvm_vcpu *  vcpu)
static

Definition at line 1159 of file nested.c.

1160 {
1161  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1162 
1163  return enable_ept ||
1165 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_mark_vmcs12_pages_dirty()

void nested_mark_vmcs12_pages_dirty ( struct kvm_vcpu *  vcpu)

Definition at line 3838 of file nested.c.

3839 {
3840  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3841  gfn_t gfn;
3842 
3843  /*
3844  * Don't need to mark the APIC access page dirty; it is never
3845  * written to by the CPU during APIC virtualization.
3846  */
3847 
3848  if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3849  gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3850  kvm_vcpu_mark_page_dirty(vcpu, gfn);
3851  }
3852 
3854  gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3855  kvm_vcpu_mark_page_dirty(vcpu, gfn);
3856  }
3857 }
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
Definition: kvm_main.c:3669
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_msr_store_list_has_msr()

static bool nested_msr_store_list_has_msr ( struct kvm_vcpu *  vcpu,
u32  msr_index 
)
static

Definition at line 1055 of file nested.c.

1056 {
1057  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1058  u32 count = vmcs12->vm_exit_msr_store_count;
1059  u64 gpa = vmcs12->vm_exit_msr_store_addr;
1060  struct vmx_msr_entry e;
1061  u32 i;
1062 
1063  for (i = 0; i < count; i++) {
1064  if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1065  return false;
1066 
1067  if (e.index == msr_index)
1068  return true;
1069  }
1070  return false;
1071 }
u32 vm_exit_msr_store_count
Definition: vmcs12.h:128
u64 vm_exit_msr_store_addr
Definition: vmcs12.h:40
static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, struct vmx_msr_entry *e)
Definition: nested.c:1005
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_release_evmcs()

static void nested_release_evmcs ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 227 of file nested.c.

228 {
229 #ifdef CONFIG_KVM_HYPERV
230  struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
231  struct vcpu_vmx *vmx = to_vmx(vcpu);
232 
233  if (nested_vmx_is_evmptr12_valid(vmx)) {
234  kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
235  vmx->nested.hv_evmcs = NULL;
236  }
237 
238  vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
239 
240  if (hv_vcpu) {
241  hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
242  hv_vcpu->nested.vm_id = 0;
243  hv_vcpu->nested.vp_id = 0;
244  }
245 #endif
246 }
#define EVMPTR_INVALID
Definition: hyperv.h:9
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_release_vmcs12()

static void nested_release_vmcs12 ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 5281 of file nested.c.

5282 {
5283  struct vcpu_vmx *vmx = to_vmx(vcpu);
5284 
5285  if (vmx->nested.current_vmptr == INVALID_GPA)
5286  return;
5287 
5289 
5290  if (enable_shadow_vmcs) {
5291  /* copy to memory all shadowed fields in case
5292  they were modified */
5293  copy_shadow_to_vmcs12(vmx);
5295  }
5296  vmx->nested.posted_intr_nv = -1;
5297 
5298  /* Flush VMCS12 to guest memory */
5300  vmx->nested.current_vmptr >> PAGE_SHIFT,
5301  vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5302 
5303  kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5304 
5305  vmx->nested.current_vmptr = INVALID_GPA;
5306 }
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len)
Definition: kvm_main.c:3440
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
Definition: nested.c:1543
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_sync_vmcs12_to_shadow()

void nested_sync_vmcs12_to_shadow ( struct kvm_vcpu *  vcpu)

Definition at line 2124 of file nested.c.

2125 {
2126  struct vcpu_vmx *vmx = to_vmx(vcpu);
2127 
2130  else
2131  copy_vmcs12_to_shadow(vmx);
2132 
2133  vmx->nested.need_vmcs12_to_shadow_sync = false;
2134 }
bool need_vmcs12_to_shadow_sync
Definition: vmx.h:151
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
Definition: nested.c:1570
static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
Definition: nested.c:1852
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_abort()

static void nested_vmx_abort ( struct kvm_vcpu *  vcpu,
u32  indicator 
)
static

Definition at line 203 of file nested.c.

204 {
205  /* TODO: not to reset guest simply here. */
206  kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
207  pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator);
208 }
Here is the caller graph for this function:

◆ nested_vmx_calc_efer()

static u64 nested_vmx_calc_efer ( struct vcpu_vmx vmx,
struct vmcs12 vmcs12 
)
static

Definition at line 2189 of file nested.c.

2190 {
2191  if (vmx->nested.nested_run_pending &&
2192  (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2193  return vmcs12->guest_ia32_efer;
2194  else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2195  return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2196  else
2197  return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2198 }
bool nested_run_pending
Definition: vmx.h:189
Here is the caller graph for this function:

◆ nested_vmx_calc_vmcs_enum_msr()

static u64 nested_vmx_calc_vmcs_enum_msr ( void  )
static

Definition at line 6778 of file nested.c.

6779 {
6780  /*
6781  * Note these are the so called "index" of the VMCS field encoding, not
6782  * the index into vmcs12.
6783  */
6784  unsigned int max_idx, idx;
6785  int i;
6786 
6787  /*
6788  * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6789  * vmcs12, regardless of whether or not the associated feature is
6790  * exposed to L1. Simply find the field with the highest index.
6791  */
6792  max_idx = 0;
6793  for (i = 0; i < nr_vmcs12_fields; i++) {
6794  /* The vmcs12 table is very, very sparsely populated. */
6795  if (!vmcs12_field_offsets[i])
6796  continue;
6797 
6799  if (idx > max_idx)
6800  max_idx = idx;
6801  }
6802 
6803  return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
6804 }
const unsigned int nr_vmcs12_fields
Definition: vmcs12.c:155
const unsigned short vmcs12_field_offsets[]
Definition: vmcs12.c:12
#define VMCS_FIELD_INDEX_SHIFT
Definition: vmcs.h:185
static unsigned int vmcs_field_index(unsigned long field)
Definition: vmcs.h:188
#define VMCS12_IDX_TO_ENC(idx)
Definition: nested.c:6776
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_address_space_size()

static int nested_vmx_check_address_space_size ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 2935 of file nested.c.

2937 {
2938 #ifdef CONFIG_X86_64
2939  if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2940  !!(vcpu->arch.efer & EFER_LMA)))
2941  return -EINVAL;
2942 #endif
2943  return 0;
2944 }
Here is the caller graph for this function:

◆ nested_vmx_check_apic_access_controls()

static int nested_vmx_check_apic_access_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 749 of file nested.c.

751 {
752  if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
754  return -EINVAL;
755  else
756  return 0;
757 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_apicv_controls()

static int nested_vmx_check_apicv_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 759 of file nested.c.

761 {
766  return 0;
767 
768  /*
769  * If virtualize x2apic mode is enabled,
770  * virtualize apic access must be disabled.
771  */
773  nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
774  return -EINVAL;
775 
776  /*
777  * If virtual interrupt delivery is enabled,
778  * we must exit on external interrupts.
779  */
781  return -EINVAL;
782 
783  /*
784  * bits 15:8 should be zero in posted_intr_nv,
785  * the descriptor address has been already checked
786  * in nested_get_vmcs12_pages.
787  *
788  * bits 5:0 of posted_intr_desc_addr should be zero.
789  */
792  CC(!nested_exit_intr_ack_set(vcpu)) ||
793  CC((vmcs12->posted_intr_nv & 0xff00)) ||
795  return -EINVAL;
796 
797  /* tpr shadow is needed by all apicv features. */
798  if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
799  return -EINVAL;
800 
801  return 0;
802 }
static bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment)
Definition: cpuid.h:50
static bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
Definition: nested.h:180
static bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
Definition: nested.h:195
static bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
Definition: nested.h:190
u16 posted_intr_nv
Definition: vmcs12.h:170
static bool nested_exit_on_intr(struct vcpu_svm *svm)
Definition: svm.h:581
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
Definition: nested.c:743
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_controls()

static int nested_vmx_check_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 2919 of file nested.c.

2921 {
2925  return -EINVAL;
2926 
2927 #ifdef CONFIG_KVM_HYPERV
2928  if (guest_cpuid_has_evmcs(vcpu))
2930 #endif
2931 
2932  return 0;
2933 }
int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
Definition: hyperv.c:161
static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:2830
static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:2847
static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:2771
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_entry_msr_switch_controls()

static int nested_vmx_check_entry_msr_switch_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 831 of file nested.c.

833 {
837  return -EINVAL;
838 
839  return 0;
840 }
u32 vm_entry_msr_load_count
Definition: vmcs12.h:131
u64 vm_entry_msr_load_addr
Definition: vmcs12.h:42
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, u32 count, u64 addr)
Definition: nested.c:804
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_eptp()

static bool nested_vmx_check_eptp ( struct kvm_vcpu *  vcpu,
u64  new_eptp 
)
static

Definition at line 2723 of file nested.c.

2724 {
2725  struct vcpu_vmx *vmx = to_vmx(vcpu);
2726 
2727  /* Check for memory type validity */
2728  switch (new_eptp & VMX_EPTP_MT_MASK) {
2729  case VMX_EPTP_MT_UC:
2730  if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2731  return false;
2732  break;
2733  case VMX_EPTP_MT_WB:
2734  if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2735  return false;
2736  break;
2737  default:
2738  return false;
2739  }
2740 
2741  /* Page-walk levels validity. */
2742  switch (new_eptp & VMX_EPTP_PWL_MASK) {
2743  case VMX_EPTP_PWL_5:
2744  if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2745  return false;
2746  break;
2747  case VMX_EPTP_PWL_4:
2748  if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2749  return false;
2750  break;
2751  default:
2752  return false;
2753  }
2754 
2755  /* Reserved bits should not be set */
2756  if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
2757  return false;
2758 
2759  /* AD, if set, should be supported */
2760  if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2761  if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2762  return false;
2763  }
2764 
2765  return true;
2766 }
static bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Definition: cpuid.h:45
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_exit_msr_switch_controls()

static int nested_vmx_check_exit_msr_switch_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 817 of file nested.c.

819 {
826  return -EINVAL;
827 
828  return 0;
829 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_guest_state()

static int nested_vmx_check_guest_state ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
enum vm_entry_failure_code *  entry_failure_code 
)
static

Definition at line 3058 of file nested.c.

3061 {
3062  bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
3063 
3064  *entry_failure_code = ENTRY_FAIL_DEFAULT;
3065 
3066  if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3068  return -EINVAL;
3069 
3070  if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3072  return -EINVAL;
3073 
3074  if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3076  return -EINVAL;
3077 
3079  *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
3080  return -EINVAL;
3081  }
3082 
3083  if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3086  return -EINVAL;
3087 
3088  if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
3089  return -EINVAL;
3090 
3091  if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
3092  CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
3093  return -EINVAL;
3094 
3095  /*
3096  * If the load IA32_EFER VM-entry control is 1, the following checks
3097  * are performed on the field for the IA32_EFER MSR:
3098  * - Bits reserved in the IA32_EFER MSR must be 0.
3099  * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3100  * the IA-32e mode guest VM-exit control. It must also be identical
3101  * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3102  * CR0.PG) is 1.
3103  */
3104  if (to_vmx(vcpu)->nested.nested_run_pending &&
3105  (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3106  if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3107  CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3108  CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3109  ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3110  return -EINVAL;
3111  }
3112 
3113  if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3114  (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3115  CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3116  return -EINVAL;
3117 
3119  return -EINVAL;
3120 
3121  return 0;
3122 }
#define nested_guest_cr4_valid
Definition: nested.h:289
static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
Definition: nested.h:257
static bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, u64 data)
Definition: pmu.h:90
static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
Definition: nested.c:3048
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:3015
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
Definition: x86.c:1744
static bool kvm_pat_valid(u64 data)
Definition: x86.h:456
static bool kvm_dr7_valid(u64 data)
Definition: x86.h:464
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_host_state()

static int nested_vmx_check_host_state ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 2946 of file nested.c.

2948 {
2949  bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2950 
2951  if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2954  return -EINVAL;
2955 
2958  return -EINVAL;
2959 
2960  if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2962  return -EINVAL;
2963 
2964  if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2967  return -EINVAL;
2968 
2969  if (ia32e) {
2970  if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2971  return -EINVAL;
2972  } else {
2973  if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2974  CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2975  CC((vmcs12->host_rip) >> 32))
2976  return -EINVAL;
2977  }
2978 
2979  if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2980  CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2981  CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2982  CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2983  CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2984  CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2985  CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2986  CC(vmcs12->host_cs_selector == 0) ||
2987  CC(vmcs12->host_tr_selector == 0) ||
2988  CC(vmcs12->host_ss_selector == 0 && !ia32e))
2989  return -EINVAL;
2990 
2997  return -EINVAL;
2998 
2999  /*
3000  * If the load IA32_EFER VM-exit control is 1, bits reserved in the
3001  * IA32_EFER MSR must be 0 in the field for that register. In addition,
3002  * the values of the LMA and LME bits in the field must each be that of
3003  * the host address-space size VM-exit control.
3004  */
3005  if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
3006  if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
3007  CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
3008  CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
3009  return -EINVAL;
3010  }
3011 
3012  return 0;
3013 }
static bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
Definition: cpuid.h:287
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_io_bitmap_controls()

static int nested_vmx_check_io_bitmap_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 500 of file nested.c.

502 {
503  if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
504  return 0;
505 
506  if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
508  return -EINVAL;
509 
510  return 0;
511 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_io_bitmaps()

bool nested_vmx_check_io_bitmaps ( struct kvm_vcpu *  vcpu,
unsigned int  port,
int  size 
)

Definition at line 5963 of file nested.c.

5965 {
5966  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5967  gpa_t bitmap, last_bitmap;
5968  u8 b;
5969 
5970  last_bitmap = INVALID_GPA;
5971  b = -1;
5972 
5973  while (size > 0) {
5974  if (port < 0x8000)
5975  bitmap = vmcs12->io_bitmap_a;
5976  else if (port < 0x10000)
5977  bitmap = vmcs12->io_bitmap_b;
5978  else
5979  return true;
5980  bitmap += (port & 0x7fff) / 8;
5981 
5982  if (last_bitmap != bitmap)
5983  if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5984  return true;
5985  if (b & (1 << (port & 7)))
5986  return true;
5987 
5988  port++;
5989  size--;
5990  last_bitmap = bitmap;
5991  }
5992 
5993  return false;
5994 }
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
Definition: kvm_main.c:3366
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_mode_based_ept_exec_controls()

static int nested_vmx_check_mode_based_ept_exec_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 864 of file nested.c.

866 {
867  if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
869  return -EINVAL;
870  return 0;
871 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_msr_bitmap_controls()

static int nested_vmx_check_msr_bitmap_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 513 of file nested.c.

515 {
516  if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
517  return 0;
518 
519  if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
520  return -EINVAL;
521 
522  return 0;
523 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_msr_switch()

static int nested_vmx_check_msr_switch ( struct kvm_vcpu *  vcpu,
u32  count,
u64  addr 
)
static

Definition at line 804 of file nested.c.

806 {
807  if (count == 0)
808  return 0;
809 
810  if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
811  !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
812  return -EINVAL;
813 
814  return 0;
815 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_nmi_controls()

static int nested_vmx_check_nmi_controls ( struct vmcs12 vmcs12)
static

Definition at line 2710 of file nested.c.

2711 {
2714  return -EINVAL;
2715 
2717  nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2718  return -EINVAL;
2719 
2720  return 0;
2721 }
static bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
Definition: nested.h:150
static bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
Definition: nested.h:155
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_permission()

static int nested_vmx_check_permission ( struct kvm_vcpu *  vcpu)
static

Definition at line 3391 of file nested.c.

3392 {
3393  if (!to_vmx(vcpu)->nested.vmxon) {
3394  kvm_queue_exception(vcpu, UD_VECTOR);
3395  return 0;
3396  }
3397 
3398  if (vmx_get_cpl(vcpu)) {
3399  kvm_inject_gp(vcpu, 0);
3400  return 0;
3401  }
3402 
3403  return 1;
3404 }
static int nested
Definition: svm.c:202
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_pml_controls()

static int nested_vmx_check_pml_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 842 of file nested.c.

844 {
846  return 0;
847 
848  if (CC(!nested_cpu_has_ept(vmcs12)) ||
850  return -EINVAL;
851 
852  return 0;
853 }
static bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
Definition: nested.h:175
u64 pml_address
Definition: vmcs12.h:71
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_shadow_vmcs_controls()

static int nested_vmx_check_shadow_vmcs_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 873 of file nested.c.

875 {
877  return 0;
878 
879  if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
881  return -EINVAL;
882 
883  return 0;
884 }
u64 vmread_bitmap
Definition: vmcs12.h:67
u64 vmwrite_bitmap
Definition: vmcs12.h:68
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_tpr_shadow_controls()

static int nested_vmx_check_tpr_shadow_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 525 of file nested.c.

527 {
528  if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
529  return 0;
530 
532  return -EINVAL;
533 
534  return 0;
535 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_unrestricted_guest_controls()

static int nested_vmx_check_unrestricted_guest_controls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 855 of file nested.c.

857 {
858  if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
860  return -EINVAL;
861  return 0;
862 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_vmcs_link_ptr()

static int nested_vmx_check_vmcs_link_ptr ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 3015 of file nested.c.

3017 {
3018  struct vcpu_vmx *vmx = to_vmx(vcpu);
3019  struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
3020  struct vmcs_hdr hdr;
3021 
3022  if (vmcs12->vmcs_link_pointer == INVALID_GPA)
3023  return 0;
3024 
3026  return -EINVAL;
3027 
3028  if (ghc->gpa != vmcs12->vmcs_link_pointer &&
3029  CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
3031  return -EINVAL;
3032 
3033  if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
3034  offsetof(struct vmcs12, hdr),
3035  sizeof(hdr))))
3036  return -EINVAL;
3037 
3038  if (CC(hdr.revision_id != VMCS12_REVISION) ||
3039  CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
3040  return -EINVAL;
3041 
3042  return 0;
3043 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_vmentry_hw()

static int nested_vmx_check_vmentry_hw ( struct kvm_vcpu *  vcpu)
static

Definition at line 3124 of file nested.c.

3125 {
3126  struct vcpu_vmx *vmx = to_vmx(vcpu);
3127  unsigned long cr3, cr4;
3128  bool vm_fail;
3129 
3130  if (!nested_early_check)
3131  return 0;
3132 
3133  if (vmx->msr_autoload.host.nr)
3134  vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3135  if (vmx->msr_autoload.guest.nr)
3136  vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3137 
3138  preempt_disable();
3139 
3141 
3142  /*
3143  * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3144  * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3145  * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3146  * there is no need to preserve other bits or save/restore the field.
3147  */
3148  vmcs_writel(GUEST_RFLAGS, 0);
3149 
3150  cr3 = __get_current_cr3_fast();
3151  if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3152  vmcs_writel(HOST_CR3, cr3);
3153  vmx->loaded_vmcs->host_state.cr3 = cr3;
3154  }
3155 
3156  cr4 = cr4_read_shadow();
3157  if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3158  vmcs_writel(HOST_CR4, cr4);
3159  vmx->loaded_vmcs->host_state.cr4 = cr4;
3160  }
3161 
3162  vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3163  __vmx_vcpu_run_flags(vmx));
3164 
3165  if (vmx->msr_autoload.host.nr)
3166  vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3167  if (vmx->msr_autoload.guest.nr)
3168  vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3169 
3170  if (vm_fail) {
3171  u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3172 
3173  preempt_enable();
3174 
3175  trace_kvm_nested_vmenter_failed(
3176  "early hardware check VM-instruction error: ", error);
3177  WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3178  return 1;
3179  }
3180 
3181  /*
3182  * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3183  */
3184  if (hw_breakpoint_active())
3185  set_debugreg(__this_cpu_read(cpu_dr7), 7);
3186  local_irq_enable();
3187  preempt_enable();
3188 
3189  /*
3190  * A non-failing VMEntry means we somehow entered guest mode with
3191  * an illegal RIP, and that's just the tip of the iceberg. There
3192  * is no telling what memory has been modified or what state has
3193  * been exposed to unknown code. Hitting this all but guarantees
3194  * a (very critical) hardware issue.
3195  */
3196  WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3197  VMX_EXIT_REASONS_FAILED_VMENTRY));
3198 
3199  return 0;
3200 }
struct vmcs_host_state host_state
Definition: vmcs.h:74
struct vmx_msrs host
Definition: vmx.h:296
struct vmx_msrs guest
Definition: vmx.h:295
struct vcpu_vmx::msr_autoload msr_autoload
unsigned long cr3
Definition: vmcs.h:35
unsigned long cr4
Definition: vmcs.h:36
unsigned int nr
Definition: vmx.h:34
static bool __read_mostly nested_early_check
Definition: nested.c:24
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
Definition: vmx.c:944
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
Definition: vmx.c:1282
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, unsigned int flags)
Here is the call graph for this function:

◆ nested_vmx_disable_intercept_for_x2apic_msr()

static void nested_vmx_disable_intercept_for_x2apic_msr ( unsigned long *  msr_bitmap_l1,
unsigned long *  msr_bitmap_l0,
u32  msr,
int  type 
)
static

Definition at line 542 of file nested.c.

545 {
546  if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
547  vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
548 
549  if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
550  vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
551 }
#define MSR_TYPE_R
Definition: vmx.h:19
#define MSR_TYPE_W
Definition: vmx.h:20
Here is the caller graph for this function:

◆ nested_vmx_enter_non_root_mode()

enum nvmx_vmentry_status nested_vmx_enter_non_root_mode ( struct kvm_vcpu *  vcpu,
bool  from_vmentry 
)

Definition at line 3414 of file nested.c.

3429 {
3430  struct vcpu_vmx *vmx = to_vmx(vcpu);
3431  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3432  enum vm_entry_failure_code entry_failure_code;
3433  bool evaluate_pending_interrupts;
3434  union vmx_exit_reason exit_reason = {
3435  .basic = EXIT_REASON_INVALID_STATE,
3436  .failed_vmentry = 1,
3437  };
3438  u32 failed_index;
3439 
3440  trace_kvm_nested_vmenter(kvm_rip_read(vcpu),
3441  vmx->nested.current_vmptr,
3442  vmcs12->guest_rip,
3445  vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT,
3447  vmcs12->guest_cr3,
3448  KVM_ISA_VMX);
3449 
3451 
3452  evaluate_pending_interrupts = exec_controls_get(vmx) &
3453  (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3454  if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3455  evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3456  if (!evaluate_pending_interrupts)
3457  evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu);
3458 
3459  if (!vmx->nested.nested_run_pending ||
3460  !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3461  vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3462  if (kvm_mpx_supported() &&
3463  (!vmx->nested.nested_run_pending ||
3464  !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3465  vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3466 
3467  /*
3468  * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3469  * nested early checks are disabled. In the event of a "late" VM-Fail,
3470  * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3471  * software model to the pre-VMEntry host state. When EPT is disabled,
3472  * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3473  * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3474  * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3475  * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3476  * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3477  * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3478  * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3479  * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3480  * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3481  * path would need to manually save/restore vmcs01.GUEST_CR3.
3482  */
3483  if (!enable_ept && !nested_early_check)
3484  vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3485 
3486  vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3487 
3488  prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3489 
3490  if (from_vmentry) {
3491  if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3492  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3494  }
3495 
3496  if (nested_vmx_check_vmentry_hw(vcpu)) {
3497  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3498  return NVMX_VMENTRY_VMFAIL;
3499  }
3500 
3502  &entry_failure_code)) {
3503  exit_reason.basic = EXIT_REASON_INVALID_STATE;
3504  vmcs12->exit_qualification = entry_failure_code;
3505  goto vmentry_fail_vmexit;
3506  }
3507  }
3508 
3509  enter_guest_mode(vcpu);
3510 
3511  if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
3512  exit_reason.basic = EXIT_REASON_INVALID_STATE;
3513  vmcs12->exit_qualification = entry_failure_code;
3514  goto vmentry_fail_vmexit_guest_mode;
3515  }
3516 
3517  if (from_vmentry) {
3518  failed_index = nested_vmx_load_msr(vcpu,
3521  if (failed_index) {
3522  exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
3523  vmcs12->exit_qualification = failed_index;
3524  goto vmentry_fail_vmexit_guest_mode;
3525  }
3526  } else {
3527  /*
3528  * The MMU is not initialized to point at the right entities yet and
3529  * "get pages" would need to read data from the guest (i.e. we will
3530  * need to perform gpa to hpa translation). Request a call
3531  * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3532  * have already been set at vmentry time and should not be reset.
3533  */
3534  kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3535  }
3536 
3537  /*
3538  * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI
3539  * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can
3540  * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit
3541  * unconditionally.
3542  */
3543  if (unlikely(evaluate_pending_interrupts))
3544  kvm_make_request(KVM_REQ_EVENT, vcpu);
3545 
3546  /*
3547  * Do not start the preemption timer hrtimer until after we know
3548  * we are successful, so that only nested_vmx_vmexit needs to cancel
3549  * the timer.
3550  */
3551  vmx->nested.preemption_timer_expired = false;
3553  u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3554  vmx_start_preemption_timer(vcpu, timer_value);
3555  }
3556 
3557  /*
3558  * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3559  * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3560  * returned as far as L1 is concerned. It will only return (and set
3561  * the success flag) when L2 exits (see nested_vmx_vmexit()).
3562  */
3563  return NVMX_VMENTRY_SUCCESS;
3564 
3565  /*
3566  * A failed consistency check that leads to a VMExit during L1's
3567  * VMEnter to L2 is a variation of a normal VMexit, as explained in
3568  * 26.7 "VM-entry failures during or after loading guest state".
3569  */
3570 vmentry_fail_vmexit_guest_mode:
3571  if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3572  vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3573  leave_guest_mode(vcpu);
3574 
3575 vmentry_fail_vmexit:
3576  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3577 
3578  if (!from_vmentry)
3579  return NVMX_VMENTRY_VMEXIT;
3580 
3582  vmcs12->vm_exit_reason = exit_reason.full;
3584  vmx->nested.need_vmcs12_to_shadow_sync = true;
3585  return NVMX_VMENTRY_VMEXIT;
3586 }
static void enter_guest_mode(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void leave_guest_mode(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
Definition: lapic.h:226
@ NVMX_VMENTRY_VMFAIL
Definition: nested.h:15
@ NVMX_VMENTRY_KVM_INTERNAL_ERROR
Definition: nested.h:17
@ NVMX_VMENTRY_VMEXIT
Definition: nested.h:16
@ NVMX_VMENTRY_SUCCESS
Definition: nested.h:14
bool preemption_timer_expired
Definition: vmx.h:213
u64 pre_vmenter_debugctl
Definition: vmx.h:225
u64 pre_vmenter_bndcfgs
Definition: vmx.h:226
u16 guest_intr_status
Definition: vmcs12.h:179
#define KVM_ISA_VMX
Definition: trace.h:283
u32 basic
Definition: vmx.h:75
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:4509
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, enum vm_entry_failure_code *entry_failure_code)
Definition: nested.c:2570
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, struct vmcs12 *vmcs12)
Definition: nested.c:2276
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
Definition: nested.c:3232
static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enum vm_entry_failure_code *entry_failure_code)
Definition: nested.c:3058
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
Definition: nested.c:3124
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
Definition: nested.c:3406
static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
Definition: nested.c:2148
static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, u64 preemption_timeout)
Definition: nested.c:2164
static __always_inline u64 vmcs_read64(unsigned long field)
Definition: vmx_ops.h:169
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
Definition: x86.c:3617
static bool kvm_mpx_supported(void)
Definition: x86.h:361
Here is the caller graph for this function:

◆ nested_vmx_eptp_switching()

static int nested_vmx_eptp_switching ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 5875 of file nested.c.

5877 {
5878  u32 index = kvm_rcx_read(vcpu);
5879  u64 new_eptp;
5880 
5881  if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
5882  return 1;
5883  if (index >= VMFUNC_EPTP_ENTRIES)
5884  return 1;
5885 
5886  if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5887  &new_eptp, index * 8, 8))
5888  return 1;
5889 
5890  /*
5891  * If the (L2) guest does a vmfunc to the currently
5892  * active ept pointer, we don't have to do anything else
5893  */
5894  if (vmcs12->ept_pointer != new_eptp) {
5895  if (!nested_vmx_check_eptp(vcpu, new_eptp))
5896  return 1;
5897 
5898  vmcs12->ept_pointer = new_eptp;
5899  nested_ept_new_eptp(vcpu);
5900 
5902  kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
5903  }
5904 
5905  return 0;
5906 }
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len)
Definition: kvm_main.c:3337
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_exit_handled_cr()

static bool nested_vmx_exit_handled_cr ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 6058 of file nested.c.

6060 {
6061  unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
6062  int cr = exit_qualification & 15;
6063  int reg;
6064  unsigned long val;
6065 
6066  switch ((exit_qualification >> 4) & 3) {
6067  case 0: /* mov to cr */
6068  reg = (exit_qualification >> 8) & 15;
6069  val = kvm_register_read(vcpu, reg);
6070  switch (cr) {
6071  case 0:
6073  (val ^ vmcs12->cr0_read_shadow))
6074  return true;
6075  break;
6076  case 3:
6077  if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
6078  return true;
6079  break;
6080  case 4:
6082  (vmcs12->cr4_read_shadow ^ val))
6083  return true;
6084  break;
6085  case 8:
6086  if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
6087  return true;
6088  break;
6089  }
6090  break;
6091  case 2: /* clts */
6092  if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
6093  (vmcs12->cr0_read_shadow & X86_CR0_TS))
6094  return true;
6095  break;
6096  case 1: /* mov from cr */
6097  switch (cr) {
6098  case 3:
6100  CPU_BASED_CR3_STORE_EXITING)
6101  return true;
6102  break;
6103  case 8:
6105  CPU_BASED_CR8_STORE_EXITING)
6106  return true;
6107  break;
6108  }
6109  break;
6110  case 3: /* lmsw */
6111  /*
6112  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
6113  * cr0. Other attempted changes are ignored, with no exit.
6114  */
6115  val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
6116  if (vmcs12->cr0_guest_host_mask & 0xe &
6117  (val ^ vmcs12->cr0_read_shadow))
6118  return true;
6119  if ((vmcs12->cr0_guest_host_mask & 0x1) &&
6120  !(vmcs12->cr0_read_shadow & 0x1) &&
6121  (val & 0x1))
6122  return true;
6123  break;
6124  }
6125  return false;
6126 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_exit_handled_encls()

static bool nested_vmx_exit_handled_encls ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 6128 of file nested.c.

6130 {
6131  u32 encls_leaf;
6132 
6133  if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
6134  !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
6135  return false;
6136 
6137  encls_leaf = kvm_rax_read(vcpu);
6138  if (encls_leaf > 62)
6139  encls_leaf = 63;
6140  return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
6141 }
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:83
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_exit_handled_io()

static bool nested_vmx_exit_handled_io ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 5996 of file nested.c.

5998 {
5999  unsigned long exit_qualification;
6000  unsigned short port;
6001  int size;
6002 
6003  if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
6004  return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
6005 
6006  exit_qualification = vmx_get_exit_qual(vcpu);
6007 
6008  port = exit_qualification >> 16;
6009  size = (exit_qualification & 7) + 1;
6010 
6011  return nested_vmx_check_io_bitmaps(vcpu, port, size);
6012 }
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, int size)
Definition: nested.c:5963
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_exit_handled_msr()

static bool nested_vmx_exit_handled_msr ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
union vmx_exit_reason  exit_reason 
)
static

Definition at line 6020 of file nested.c.

6023 {
6024  u32 msr_index = kvm_rcx_read(vcpu);
6025  gpa_t bitmap;
6026 
6027  if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
6028  return true;
6029 
6030  /*
6031  * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
6032  * for the four combinations of read/write and low/high MSR numbers.
6033  * First we need to figure out which of the four to use:
6034  */
6035  bitmap = vmcs12->msr_bitmap;
6036  if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6037  bitmap += 2048;
6038  if (msr_index >= 0xc0000000) {
6039  msr_index -= 0xc0000000;
6040  bitmap += 1024;
6041  }
6042 
6043  /* Then read the msr_index'th bit from this bitmap: */
6044  if (msr_index < 1024*8) {
6045  unsigned char b;
6046  if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
6047  return true;
6048  return 1 & (b >> (msr_index & 7));
6049  } else
6050  return true; /* let L1 handle the wrong parameter */
6051 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_exit_handled_mtf()

static bool nested_vmx_exit_handled_mtf ( struct vmcs12 vmcs12)
static

Definition at line 6167 of file nested.c.

6168 {
6169  u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
6170 
6172  return true;
6173 
6174  /*
6175  * An MTF VM-exit may be injected into the guest by setting the
6176  * interruption-type to 7 (other event) and the vector field to 0. Such
6177  * is the case regardless of the 'monitor trap flag' VM-execution
6178  * control.
6179  */
6180  return entry_intr_info == (INTR_INFO_VALID_MASK
6181  | INTR_TYPE_OTHER_EVENT);
6182 }
static int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
Definition: nested.h:160
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_exit_handled_vmcs_access()

static bool nested_vmx_exit_handled_vmcs_access ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
gpa_t  bitmap 
)
static

Definition at line 6143 of file nested.c.

6145 {
6146  u32 vmx_instruction_info;
6147  unsigned long field;
6148  u8 b;
6149 
6151  return true;
6152 
6153  /* Decode instruction info and find the field to access */
6154  vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6155  field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6156 
6157  /* Out-of-range fields always cause a VM exit from L2 to L1 */
6158  if (field >> 15)
6159  return true;
6160 
6161  if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
6162  return true;
6163 
6164  return 1 & (b >> (field & 7));
6165 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_fail()

static int nested_vmx_fail ( struct kvm_vcpu *  vcpu,
u32  vm_instruction_error 
)
static

Definition at line 188 of file nested.c.

189 {
190  struct vcpu_vmx *vmx = to_vmx(vcpu);
191 
192  /*
193  * failValid writes the error number to the current VMCS, which
194  * can't be done if there isn't a current VMCS.
195  */
196  if (vmx->nested.current_vmptr == INVALID_GPA &&
199 
200  return nested_vmx_failValid(vcpu, vm_instruction_error);
201 }
static int nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
Definition: nested.c:169
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_failInvalid()

static int nested_vmx_failInvalid ( struct kvm_vcpu *  vcpu)
static

Definition at line 160 of file nested.c.

161 {
162  vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
163  & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
164  X86_EFLAGS_SF | X86_EFLAGS_OF))
165  | X86_EFLAGS_CF);
166  return kvm_skip_emulated_instruction(vcpu);
167 }
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
Definition: vmx.c:1509
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_failValid()

static int nested_vmx_failValid ( struct kvm_vcpu *  vcpu,
u32  vm_instruction_error 
)
static

Definition at line 169 of file nested.c.

171 {
172  vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
173  & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
174  X86_EFLAGS_SF | X86_EFLAGS_OF))
175  | X86_EFLAGS_ZF);
176  get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
177  /*
178  * We don't need to force sync to shadow VMCS because
179  * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
180  * fields and thus must be synced.
181  */
184 
185  return kvm_skip_emulated_instruction(vcpu);
186 }
static bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
Definition: hyperv.h:79
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_free_vcpu()

void nested_vmx_free_vcpu ( struct kvm_vcpu *  vcpu)

Definition at line 372 of file nested.c.

373 {
374  vcpu_load(vcpu);
375  vmx_leave_nested(vcpu);
376  vcpu_put(vcpu);
377 }
void vcpu_put(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:219
void vcpu_load(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:208
void vmx_leave_nested(struct kvm_vcpu *vcpu)
Definition: nested.c:6568
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_get_vmcs01_guest_efer()

static u64 nested_vmx_get_vmcs01_guest_efer ( struct vcpu_vmx vmx)
inlinestatic

Definition at line 4634 of file nested.c.

4635 {
4636  struct vmx_uret_msr *efer_msr;
4637  unsigned int i;
4638 
4639  if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4640  return vmcs_read64(GUEST_IA32_EFER);
4641 
4642  if (cpu_has_load_ia32_efer())
4643  return host_efer;
4644 
4645  for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4646  if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4647  return vmx->msr_autoload.guest.val[i].value;
4648  }
4649 
4650  efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
4651  if (efer_msr)
4652  return efer_msr->data;
4653 
4654  return host_efer;
4655 }
static bool cpu_has_load_ia32_efer(void)
Definition: capabilities.h:99
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
Definition: vmx.h:35
u64 data
Definition: vmx.h:40
struct vmx_uret_msr * vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
Definition: vmx.c:713
u64 __read_mostly host_efer
Definition: x86.c:229
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_get_vmexit_msr_value()

static bool nested_vmx_get_vmexit_msr_value ( struct kvm_vcpu *  vcpu,
u32  msr_index,
u64 *  data 
)
static

Definition at line 974 of file nested.c.

977 {
978  struct vcpu_vmx *vmx = to_vmx(vcpu);
979 
980  /*
981  * If the L0 hypervisor stored a more accurate value for the TSC that
982  * does not include the time taken for emulation of the L2->L1
983  * VM-exit in L0, use the more accurate value.
984  */
985  if (msr_index == MSR_IA32_TSC) {
987  MSR_IA32_TSC);
988 
989  if (i >= 0) {
990  u64 val = vmx->msr_autostore.guest.val[i].value;
991 
992  *data = kvm_read_l1_tsc(vcpu, val);
993  return true;
994  }
995  }
996 
997  if (kvm_get_msr(vcpu, msr_index, data)) {
998  pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
999  msr_index);
1000  return false;
1001  }
1002  return true;
1003 }
struct vmx_msrs guest
Definition: vmx.h:300
struct vcpu_vmx::msr_autostore msr_autostore
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
Definition: vmx.c:969
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
Definition: x86.c:1981
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
Definition: x86.c:2583
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_get_vmptr()

static int nested_vmx_get_vmptr ( struct kvm_vcpu *  vcpu,
gpa_t *  vmpointer,
int *  ret 
)
static

Definition at line 5088 of file nested.c.

5090 {
5091  gva_t gva;
5092  struct x86_exception e;
5093  int r;
5094 
5095  if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5096  vmcs_read32(VMX_INSTRUCTION_INFO), false,
5097  sizeof(*vmpointer), &gva)) {
5098  *ret = 1;
5099  return -EINVAL;
5100  }
5101 
5102  r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
5103  if (r != X86EMUL_CONTINUE) {
5104  *ret = kvm_handle_memory_failure(vcpu, r, &e);
5105  return -EINVAL;
5106  }
5107 
5108  return 0;
5109 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_handle_enlightened_vmptrld()

static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld ( struct kvm_vcpu *  vcpu,
bool  from_launch 
)
static

Definition at line 1852 of file nested.c.

2036 {
2037 #ifdef CONFIG_KVM_HYPERV
2038  struct vcpu_vmx *vmx = to_vmx(vcpu);
2039  bool evmcs_gpa_changed = false;
2040  u64 evmcs_gpa;
2041 
2042  if (likely(!guest_cpuid_has_evmcs(vcpu)))
2043  return EVMPTRLD_DISABLED;
2044 
2045  evmcs_gpa = nested_get_evmptr(vcpu);
2046  if (!evmptr_is_valid(evmcs_gpa)) {
2048  return EVMPTRLD_DISABLED;
2049  }
2050 
2051  if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
2052  vmx->nested.current_vmptr = INVALID_GPA;
2053 
2055 
2056  if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
2057  &vmx->nested.hv_evmcs_map))
2058  return EVMPTRLD_ERROR;
2059 
2060  vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
2061 
2062  /*
2063  * Currently, KVM only supports eVMCS version 1
2064  * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2065  * value to first u32 field of eVMCS which should specify eVMCS
2066  * VersionNumber.
2067  *
2068  * Guest should be aware of supported eVMCS versions by host by
2069  * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2070  * expected to set this CPUID leaf according to the value
2071  * returned in vmcs_version from nested_enable_evmcs().
2072  *
2073  * However, it turns out that Microsoft Hyper-V fails to comply
2074  * to their own invented interface: When Hyper-V use eVMCS, it
2075  * just sets first u32 field of eVMCS to revision_id specified
2076  * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2077  * which is one of the supported versions specified in
2078  * CPUID.0x4000000A.EAX[0:15].
2079  *
2080  * To overcome Hyper-V bug, we accept here either a supported
2081  * eVMCS version or VMCS12 revision_id as valid values for first
2082  * u32 field of eVMCS.
2083  */
2084  if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2085  (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2087  return EVMPTRLD_VMFAIL;
2088  }
2089 
2090  vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2091 
2092  evmcs_gpa_changed = true;
2093  /*
2094  * Unlike normal vmcs12, enlightened vmcs12 is not fully
2095  * reloaded from guest's memory (read only fields, fields not
2096  * present in struct hv_enlightened_vmcs, ...). Make sure there
2097  * are no leftovers.
2098  */
2099  if (from_launch) {
2100  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2101  memset(vmcs12, 0, sizeof(*vmcs12));
2103  }
2104 
2105  }
2106 
2107  /*
2108  * Clean fields data can't be used on VMLAUNCH and when we switch
2109  * between different L2 guests as KVM keeps a single VMCS12 per L1.
2110  */
2111  if (from_launch || evmcs_gpa_changed) {
2112  vmx->nested.hv_evmcs->hv_clean_fields &=
2113  ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2114 
2115  vmx->nested.force_msr_bitmap_recalc = true;
2116  }
2117 
2118  return EVMPTRLD_SUCCEEDED;
2119 #else
2120  return EVMPTRLD_DISABLED;
2121 #endif
2122 }
#define KVM_EVMCS_VERSION
Definition: hyperv_evmcs.h:14
bool force_msr_bitmap_recalc
Definition: vmx.h:161
struct vmcs_hdr hdr
Definition: vmcs12.h:31
u32 revision_id
Definition: vmcs.h:17
@ EVMPTRLD_ERROR
Definition: hyperv.h:16
@ EVMPTRLD_DISABLED
Definition: hyperv.h:13
@ EVMPTRLD_VMFAIL
Definition: hyperv.h:15
@ EVMPTRLD_SUCCEEDED
Definition: hyperv.h:14
Here is the caller graph for this function:

◆ nested_vmx_hardware_setup()

__init int nested_vmx_hardware_setup ( int(*[])(struct kvm_vcpu *)  exit_handlers)

Definition at line 7092 of file nested.c.

7093 {
7094  int i;
7095 
7096  if (!cpu_has_vmx_shadow_vmcs())
7097  enable_shadow_vmcs = 0;
7098  if (enable_shadow_vmcs) {
7099  for (i = 0; i < VMX_BITMAP_NR; i++) {
7100  /*
7101  * The vmx_bitmap is not tied to a VM and so should
7102  * not be charged to a memcg.
7103  */
7104  vmx_bitmap[i] = (unsigned long *)
7105  __get_free_page(GFP_KERNEL);
7106  if (!vmx_bitmap[i]) {
7108  return -ENOMEM;
7109  }
7110  }
7111 
7113  }
7114 
7115  exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
7116  exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
7117  exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
7118  exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
7119  exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
7120  exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
7121  exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
7122  exit_handlers[EXIT_REASON_VMOFF] = handle_vmxoff;
7123  exit_handlers[EXIT_REASON_VMON] = handle_vmxon;
7124  exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
7125  exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
7126  exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
7127 
7128  return 0;
7129 }
static bool cpu_has_vmx_shadow_vmcs(void)
Definition: capabilities.h:225
static int handle_vmwrite(struct kvm_vcpu *vcpu)
Definition: nested.c:5483
static int handle_vmptrst(struct kvm_vcpu *vcpu)
Definition: nested.c:5674
static int handle_vmptrld(struct kvm_vcpu *vcpu)
Definition: nested.c:5604
static int handle_vmresume(struct kvm_vcpu *vcpu)
Definition: nested.c:5371
static int handle_vmread(struct kvm_vcpu *vcpu)
Definition: nested.c:5377
static int handle_vmlaunch(struct kvm_vcpu *vcpu)
Definition: nested.c:5365
static int handle_vmxoff(struct kvm_vcpu *vcpu)
Definition: nested.c:5309
static int handle_invept(struct kvm_vcpu *vcpu)
Definition: nested.c:5702
void nested_vmx_hardware_unsetup(void)
Definition: nested.c:7082
static int handle_invvpid(struct kvm_vcpu *vcpu)
Definition: nested.c:5782
static unsigned long * vmx_bitmap[VMX_BITMAP_NR]
Definition: nested.c:46
static int handle_vmxon(struct kvm_vcpu *vcpu)
Definition: nested.c:5190
static int handle_vmclear(struct kvm_vcpu *vcpu)
Definition: nested.c:5323
static int handle_vmfunc(struct kvm_vcpu *vcpu)
Definition: nested.c:5908
static void init_vmcs_shadow_fields(void)
Definition: nested.c:69
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_hardware_unsetup()

void nested_vmx_hardware_unsetup ( void  )

Definition at line 7082 of file nested.c.

7083 {
7084  int i;
7085 
7086  if (enable_shadow_vmcs) {
7087  for (i = 0; i < VMX_BITMAP_NR; i++)
7088  free_page((unsigned long)vmx_bitmap[i]);
7089  }
7090 }
Here is the caller graph for this function:

◆ nested_vmx_inject_exception_vmexit()

static void nested_vmx_inject_exception_vmexit ( struct kvm_vcpu *  vcpu)
static

Definition at line 3901 of file nested.c.

3902 {
3903  struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
3904  u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
3905  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3906  unsigned long exit_qual;
3907 
3908  if (ex->has_payload) {
3909  exit_qual = ex->payload;
3910  } else if (ex->vector == PF_VECTOR) {
3911  exit_qual = vcpu->arch.cr2;
3912  } else if (ex->vector == DB_VECTOR) {
3913  exit_qual = vcpu->arch.dr6;
3914  exit_qual &= ~DR6_BT;
3915  exit_qual ^= DR6_ACTIVE_LOW;
3916  } else {
3917  exit_qual = 0;
3918  }
3919 
3920  /*
3921  * Unlike AMD's Paged Real Mode, which reports an error code on #PF
3922  * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
3923  * "has error code" flags on VM-Exit if the CPU is in Real Mode.
3924  */
3925  if (ex->has_error_code && is_protmode(vcpu)) {
3926  /*
3927  * Intel CPUs do not generate error codes with bits 31:16 set,
3928  * and more importantly VMX disallows setting bits 31:16 in the
3929  * injected error code for VM-Entry. Drop the bits to mimic
3930  * hardware and avoid inducing failure on nested VM-Entry if L1
3931  * chooses to inject the exception back to L2. AMD CPUs _do_
3932  * generate "full" 32-bit error codes, so KVM allows userspace
3933  * to inject exception error codes with bits 31:16 set.
3934  */
3935  vmcs12->vm_exit_intr_error_code = (u16)ex->error_code;
3936  intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3937  }
3938 
3939  if (kvm_exception_is_soft(ex->vector))
3940  intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3941  else
3942  intr_info |= INTR_TYPE_HARD_EXCEPTION;
3943 
3944  if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3945  vmx_get_nmi_mask(vcpu))
3946  intr_info |= INTR_INFO_UNBLOCK_NMI;
3947 
3948  nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3949 }
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
Definition: vmx.c:4991
static bool is_protmode(struct kvm_vcpu *vcpu)
Definition: x86.h:138
static bool kvm_exception_is_soft(unsigned int nr)
Definition: x86.h:133
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_is_exception_vmexit()

static bool nested_vmx_is_exception_vmexit ( struct kvm_vcpu *  vcpu,
u8  vector,
u32  error_code 
)
static

Definition at line 482 of file nested.c.

484 {
485  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
486 
487  /*
488  * Drop bits 31:16 of the error code when performing the #PF mask+match
489  * check. All VMCS fields involved are 32 bits, but Intel CPUs never
490  * set bits 31:16 and VMX disallows setting bits 31:16 in the injected
491  * error code. Including the to-be-dropped bits in the check might
492  * result in an "impossible" or missed exit from L1's perspective.
493  */
494  if (vector == PF_VECTOR)
495  return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code);
496 
497  return (vmcs12->exception_bitmap & (1u << vector));
498 }
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code)
Definition: nested.c:470
Here is the call graph for this function:

◆ nested_vmx_is_page_fault_vmexit()

static bool nested_vmx_is_page_fault_vmexit ( struct vmcs12 vmcs12,
u16  error_code 
)
static

Definition at line 470 of file nested.c.

472 {
473  bool inequality, bit;
474 
475  bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
476  inequality =
477  (error_code & vmcs12->page_fault_error_code_mask) !=
479  return inequality ^ bit;
480 }
u32 page_fault_error_code_mask
Definition: vmcs12.h:124
u32 page_fault_error_code_match
Definition: vmcs12.h:125
Here is the caller graph for this function:

◆ nested_vmx_l0_wants_exit()

static bool nested_vmx_l0_wants_exit ( struct kvm_vcpu *  vcpu,
union vmx_exit_reason  exit_reason 
)
static

Definition at line 6188 of file nested.c.

6190 {
6191  u32 intr_info;
6192 
6193  switch ((u16)exit_reason.basic) {
6194  case EXIT_REASON_EXCEPTION_NMI:
6195  intr_info = vmx_get_intr_info(vcpu);
6196  if (is_nmi(intr_info))
6197  return true;
6198  else if (is_page_fault(intr_info))
6199  return vcpu->arch.apf.host_apf_flags ||
6200  vmx_need_pf_intercept(vcpu);
6201  else if (is_debug(intr_info) &&
6202  vcpu->guest_debug &
6203  (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
6204  return true;
6205  else if (is_breakpoint(intr_info) &&
6206  vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
6207  return true;
6208  else if (is_alignment_check(intr_info) &&
6209  !vmx_guest_inject_ac(vcpu))
6210  return true;
6211  return false;
6212  case EXIT_REASON_EXTERNAL_INTERRUPT:
6213  return true;
6214  case EXIT_REASON_MCE_DURING_VMENTRY:
6215  return true;
6216  case EXIT_REASON_EPT_VIOLATION:
6217  /*
6218  * L0 always deals with the EPT violation. If nested EPT is
6219  * used, and the nested mmu code discovers that the address is
6220  * missing in the guest EPT table (EPT12), the EPT violation
6221  * will be injected with nested_ept_inject_page_fault()
6222  */
6223  return true;
6224  case EXIT_REASON_EPT_MISCONFIG:
6225  /*
6226  * L2 never uses directly L1's EPT, but rather L0's own EPT
6227  * table (shadow on EPT) or a merged EPT table that L0 built
6228  * (EPT on EPT). So any problems with the structure of the
6229  * table is L0's fault.
6230  */
6231  return true;
6232  case EXIT_REASON_PREEMPTION_TIMER:
6233  return true;
6234  case EXIT_REASON_PML_FULL:
6235  /*
6236  * PML is emulated for an L1 VMM and should never be enabled in
6237  * vmcs02, always "handle" PML_FULL by exiting to userspace.
6238  */
6239  return true;
6240  case EXIT_REASON_VMFUNC:
6241  /* VM functions are emulated through L2->L0 vmexits. */
6242  return true;
6243  case EXIT_REASON_BUS_LOCK:
6244  /*
6245  * At present, bus lock VM exit is never exposed to L1.
6246  * Handle L2's bus locks in L0 directly.
6247  */
6248  return true;
6249 #ifdef CONFIG_KVM_HYPERV
6250  case EXIT_REASON_VMCALL:
6251  /* Hyper-V L2 TLB flush hypercall is handled by L0 */
6252  return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
6255 #endif
6256  default:
6257  break;
6258  }
6259  return false;
6260 }
static bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
Definition: hyperv.h:308
static bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
Definition: hyperv.h:312
static __always_inline bool is_nmi(u32 intr_info)
Definition: vmcs.h:149
static bool is_page_fault(u32 intr_info)
Definition: vmcs.h:113
static bool is_alignment_check(u32 intr_info)
Definition: vmcs.h:128
static bool is_breakpoint(u32 intr_info)
Definition: vmcs.h:103
static bool is_debug(u32 intr_info)
Definition: vmcs.h:98
bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
Definition: hyperv.c:211
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
Definition: vmx.c:5174
static bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
Definition: vmx.h:719
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_l1_wants_exit()

static bool nested_vmx_l1_wants_exit ( struct kvm_vcpu *  vcpu,
union vmx_exit_reason  exit_reason 
)
static

Definition at line 6266 of file nested.c.

6268 {
6269  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6270  u32 intr_info;
6271 
6272  switch ((u16)exit_reason.basic) {
6273  case EXIT_REASON_EXCEPTION_NMI:
6274  intr_info = vmx_get_intr_info(vcpu);
6275  if (is_nmi(intr_info))
6276  return true;
6277  else if (is_page_fault(intr_info))
6278  return true;
6279  return vmcs12->exception_bitmap &
6280  (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6281  case EXIT_REASON_EXTERNAL_INTERRUPT:
6282  return nested_exit_on_intr(vcpu);
6283  case EXIT_REASON_TRIPLE_FAULT:
6284  return true;
6285  case EXIT_REASON_INTERRUPT_WINDOW:
6286  return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
6287  case EXIT_REASON_NMI_WINDOW:
6288  return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
6289  case EXIT_REASON_TASK_SWITCH:
6290  return true;
6291  case EXIT_REASON_CPUID:
6292  return true;
6293  case EXIT_REASON_HLT:
6294  return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6295  case EXIT_REASON_INVD:
6296  return true;
6297  case EXIT_REASON_INVLPG:
6298  return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6299  case EXIT_REASON_RDPMC:
6300  return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6301  case EXIT_REASON_RDRAND:
6302  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
6303  case EXIT_REASON_RDSEED:
6304  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
6305  case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
6306  return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6307  case EXIT_REASON_VMREAD:
6310  case EXIT_REASON_VMWRITE:
6313  case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6314  case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6315  case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
6316  case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6317  case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
6318  /*
6319  * VMX instructions trap unconditionally. This allows L1 to
6320  * emulate them for its L2 guest, i.e., allows 3-level nesting!
6321  */
6322  return true;
6323  case EXIT_REASON_CR_ACCESS:
6324  return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6325  case EXIT_REASON_DR_ACCESS:
6326  return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6327  case EXIT_REASON_IO_INSTRUCTION:
6328  return nested_vmx_exit_handled_io(vcpu, vmcs12);
6329  case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
6330  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
6331  case EXIT_REASON_MSR_READ:
6332  case EXIT_REASON_MSR_WRITE:
6333  return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6334  case EXIT_REASON_INVALID_STATE:
6335  return true;
6336  case EXIT_REASON_MWAIT_INSTRUCTION:
6337  return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6338  case EXIT_REASON_MONITOR_TRAP_FLAG:
6340  case EXIT_REASON_MONITOR_INSTRUCTION:
6341  return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6342  case EXIT_REASON_PAUSE_INSTRUCTION:
6343  return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6345  SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6346  case EXIT_REASON_MCE_DURING_VMENTRY:
6347  return true;
6348  case EXIT_REASON_TPR_BELOW_THRESHOLD:
6349  return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6350  case EXIT_REASON_APIC_ACCESS:
6351  case EXIT_REASON_APIC_WRITE:
6352  case EXIT_REASON_EOI_INDUCED:
6353  /*
6354  * The controls for "virtualize APIC accesses," "APIC-
6355  * register virtualization," and "virtual-interrupt
6356  * delivery" only come from vmcs12.
6357  */
6358  return true;
6359  case EXIT_REASON_INVPCID:
6360  return
6361  nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6362  nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6363  case EXIT_REASON_WBINVD:
6364  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6365  case EXIT_REASON_XSETBV:
6366  return true;
6367  case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6368  /*
6369  * This should never happen, since it is not possible to
6370  * set XSS to a non-zero value---neither in L1 nor in L2.
6371  * If if it were, XSS would have to be checked against
6372  * the XSS exit bitmap in vmcs12.
6373  */
6374  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
6375  case EXIT_REASON_UMWAIT:
6376  case EXIT_REASON_TPAUSE:
6377  return nested_cpu_has2(vmcs12,
6378  SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
6379  case EXIT_REASON_ENCLS:
6380  return nested_vmx_exit_handled_encls(vcpu, vmcs12);
6381  case EXIT_REASON_NOTIFY:
6382  /* Notify VM exit is not exposed to L1 */
6383  return false;
6384  default:
6385  return true;
6386  }
6387 }
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:6058
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, union vmx_exit_reason exit_reason)
Definition: nested.c:6020
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:5996
static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:6128
static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
Definition: nested.c:6167
static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, gpa_t bitmap)
Definition: nested.c:6143
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_load_cr3()

static int nested_vmx_load_cr3 ( struct kvm_vcpu *  vcpu,
unsigned long  cr3,
bool  nested_ept,
bool  reload_pdptrs,
enum vm_entry_failure_code *  entry_failure_code 
)
static

Definition at line 1115 of file nested.c.

1118 {
1119  if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) {
1120  *entry_failure_code = ENTRY_FAIL_DEFAULT;
1121  return -EINVAL;
1122  }
1123 
1124  /*
1125  * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1126  * must not be dereferenced.
1127  */
1128  if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
1129  CC(!load_pdptrs(vcpu, cr3))) {
1130  *entry_failure_code = ENTRY_FAIL_PDPTE;
1131  return -EINVAL;
1132  }
1133 
1134  vcpu->arch.cr3 = cr3;
1135  kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1136 
1137  /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
1138  kvm_init_mmu(vcpu);
1139 
1140  if (!nested_ept)
1141  kvm_mmu_new_pgd(vcpu, cr3);
1142 
1143  return 0;
1144 }
static void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg)
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
Definition: mmu.c:4753
void kvm_init_mmu(struct kvm_vcpu *vcpu)
Definition: mmu.c:5538
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_load_msr()

static u32 nested_vmx_load_msr ( struct kvm_vcpu *  vcpu,
u64  gpa,
u32  count 
)
static

Definition at line 938 of file nested.c.

939 {
940  u32 i;
941  struct vmx_msr_entry e;
942  u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
943 
944  for (i = 0; i < count; i++) {
945  if (unlikely(i >= max_msr_list_size))
946  goto fail;
947 
948  if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
949  &e, sizeof(e))) {
950  pr_debug_ratelimited(
951  "%s cannot read MSR entry (%u, 0x%08llx)\n",
952  __func__, i, gpa + i * sizeof(e));
953  goto fail;
954  }
955  if (nested_vmx_load_msr_check(vcpu, &e)) {
956  pr_debug_ratelimited(
957  "%s check failed (%u, 0x%x, 0x%x)\n",
958  __func__, i, e.index, e.reserved);
959  goto fail;
960  }
961  if (kvm_set_msr(vcpu, e.index, e.value)) {
962  pr_debug_ratelimited(
963  "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
964  __func__, i, e.index, e.value);
965  goto fail;
966  }
967  }
968  return 0;
969 fail:
970  /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
971  return i + 1;
972 }
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
Definition: nested.c:900
static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
Definition: nested.c:920
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_load_msr_check()

static int nested_vmx_load_msr_check ( struct kvm_vcpu *  vcpu,
struct vmx_msr_entry *  e 
)
static

Definition at line 900 of file nested.c.

902 {
903  if (CC(e->index == MSR_FS_BASE) ||
904  CC(e->index == MSR_GS_BASE) ||
905  CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
907  return -EINVAL;
908  return 0;
909 }
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
Definition: nested.c:886
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_max_atomic_switch_msrs()

static u32 nested_vmx_max_atomic_switch_msrs ( struct kvm_vcpu *  vcpu)
static

Definition at line 920 of file nested.c.

921 {
922  struct vcpu_vmx *vmx = to_vmx(vcpu);
923  u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
924  vmx->nested.msrs.misc_high);
925 
926  return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
927 }
static u64 vmx_control_msr(u32 low, u32 high)
Definition: nested.c:215
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_msr_check_common()

static int nested_vmx_msr_check_common ( struct kvm_vcpu *  vcpu,
struct vmx_msr_entry *  e 
)
static

Definition at line 886 of file nested.c.

888 {
889  /* x2APIC MSR accesses are not allowed */
890  if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
891  return -EINVAL;
892  if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
893  CC(e->index == MSR_IA32_UCODE_REV))
894  return -EINVAL;
895  if (CC(e->reserved != 0))
896  return -EINVAL;
897  return 0;
898 }
Here is the caller graph for this function:

◆ nested_vmx_preemption_timer_pending()

static bool nested_vmx_preemption_timer_pending ( struct kvm_vcpu *  vcpu)
static

Definition at line 4003 of file nested.c.

4004 {
4007 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_prepare_msr_bitmap()

static bool nested_vmx_prepare_msr_bitmap ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
inlinestatic

Definition at line 597 of file nested.c.

599 {
600  struct vcpu_vmx *vmx = to_vmx(vcpu);
601  int msr;
602  unsigned long *msr_bitmap_l1;
603  unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
604  struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
605 
606  /* Nothing to do if the MSR bitmap is not in use. */
607  if (!cpu_has_vmx_msr_bitmap() ||
608  !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
609  return false;
610 
611  /*
612  * MSR bitmap update can be skipped when:
613  * - MSR bitmap for L1 hasn't changed.
614  * - Nested hypervisor (L1) is attempting to launch the same L2 as
615  * before.
616  * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature
617  * and tells KVM (L0) there were no changes in MSR bitmap for L2.
618  */
619  if (!vmx->nested.force_msr_bitmap_recalc) {
620  struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
621 
622  if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap &&
623  evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
624  return true;
625  }
626 
627  if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
628  return false;
629 
630  msr_bitmap_l1 = (unsigned long *)map->hva;
631 
632  /*
633  * To keep the control flow simple, pay eight 8-byte writes (sixteen
634  * 4-byte writes on 32-bit systems) up front to enable intercepts for
635  * the x2APIC MSR range and selectively toggle those relevant to L2.
636  */
637  enable_x2apic_msr_intercepts(msr_bitmap_l0);
638 
641  /*
642  * L0 need not intercept reads for MSRs between 0x800
643  * and 0x8ff, it just lets the processor take the value
644  * from the virtual-APIC page; take those 256 bits
645  * directly from the L1 bitmap.
646  */
647  for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
648  unsigned word = msr / BITS_PER_LONG;
649 
650  msr_bitmap_l0[word] = msr_bitmap_l1[word];
651  }
652  }
653 
655  msr_bitmap_l1, msr_bitmap_l0,
656  X2APIC_MSR(APIC_TASKPRI),
658 
659  if (nested_cpu_has_vid(vmcs12)) {
661  msr_bitmap_l1, msr_bitmap_l0,
662  X2APIC_MSR(APIC_EOI),
663  MSR_TYPE_W);
665  msr_bitmap_l1, msr_bitmap_l0,
666  X2APIC_MSR(APIC_SELF_IPI),
667  MSR_TYPE_W);
668  }
669  }
670 
671  /*
672  * Always check vmcs01's bitmap to honor userspace MSR filters and any
673  * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
674  */
675 #ifdef CONFIG_X86_64
676  nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
677  MSR_FS_BASE, MSR_TYPE_RW);
678 
679  nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
680  MSR_GS_BASE, MSR_TYPE_RW);
681 
682  nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
683  MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
684 #endif
685  nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
686  MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
687 
688  nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
689  MSR_IA32_PRED_CMD, MSR_TYPE_W);
690 
691  nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
692  MSR_IA32_FLUSH_CMD, MSR_TYPE_W);
693 
694  kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
695 
696  vmx->nested.force_msr_bitmap_recalc = false;
697 
698  return true;
699 }
static bool cpu_has_vmx_msr_bitmap(void)
Definition: capabilities.h:124
unsigned long * msr_bitmap
Definition: vmcs.h:72
struct kvm_host_map msr_bitmap_map
Definition: vmx.h:204
#define X2APIC_MSR(x)
Definition: svm.c:81
static void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int types)
Definition: nested.c:580
static void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
Definition: nested.c:553
static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int type)
Definition: nested.c:542
#define MSR_TYPE_RW
Definition: vmx.h:21
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_reflect_vmexit()

bool nested_vmx_reflect_vmexit ( struct kvm_vcpu *  vcpu)

Definition at line 6393 of file nested.c.

6394 {
6395  struct vcpu_vmx *vmx = to_vmx(vcpu);
6396  union vmx_exit_reason exit_reason = vmx->exit_reason;
6397  unsigned long exit_qual;
6398  u32 exit_intr_info;
6399 
6400  WARN_ON_ONCE(vmx->nested.nested_run_pending);
6401 
6402  /*
6403  * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6404  * has already loaded L2's state.
6405  */
6406  if (unlikely(vmx->fail)) {
6407  trace_kvm_nested_vmenter_failed(
6408  "hardware VM-instruction error: ",
6409  vmcs_read32(VM_INSTRUCTION_ERROR));
6410  exit_intr_info = 0;
6411  exit_qual = 0;
6412  goto reflect_vmexit;
6413  }
6414 
6415  trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX);
6416 
6417  /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6418  if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6419  return false;
6420 
6421  /* If L1 doesn't want the exit, handle it in L0. */
6422  if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
6423  return false;
6424 
6425  /*
6426  * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6427  * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6428  * need to be synthesized by querying the in-kernel LAPIC, but external
6429  * interrupts are never reflected to L1 so it's a non-issue.
6430  */
6431  exit_intr_info = vmx_get_intr_info(vcpu);
6432  if (is_exception_with_error_code(exit_intr_info)) {
6433  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6434 
6436  vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6437  }
6438  exit_qual = vmx_get_exit_qual(vcpu);
6439 
6440 reflect_vmexit:
6441  nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
6442  return true;
6443 }
u8 fail
Definition: vmx.h:253
static bool is_exception_with_error_code(u32 intr_info)
Definition: vmcs.h:159
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
Definition: nested.c:6266
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
Definition: nested.c:6188
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_restore_host_state()

static void nested_vmx_restore_host_state ( struct kvm_vcpu *  vcpu)
static

Definition at line 4657 of file nested.c.

4658 {
4659  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4660  struct vcpu_vmx *vmx = to_vmx(vcpu);
4661  struct vmx_msr_entry g, h;
4662  gpa_t gpa;
4663  u32 i, j;
4664 
4665  vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4666 
4667  if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4668  /*
4669  * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4670  * as vmcs01.GUEST_DR7 contains a userspace defined value
4671  * and vcpu->arch.dr7 is not squirreled away before the
4672  * nested VMENTER (not worth adding a variable in nested_vmx).
4673  */
4674  if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4675  kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4676  else
4677  WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4678  }
4679 
4680  /*
4681  * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4682  * handle a variety of side effects to KVM's software model.
4683  */
4685 
4686  vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4687  vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4688 
4689  vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4690  vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4691 
4693  vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4694  kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
4695 
4696  /*
4697  * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4698  * from vmcs01 (if necessary). The PDPTRs are not loaded on
4699  * VMFail, like everything else we just need to ensure our
4700  * software model is up-to-date.
4701  */
4702  if (enable_ept && is_pae_paging(vcpu))
4703  ept_save_pdptrs(vcpu);
4704 
4705  kvm_mmu_reset_context(vcpu);
4706 
4707  /*
4708  * This nasty bit of open coding is a compromise between blindly
4709  * loading L1's MSRs using the exit load lists (incorrect emulation
4710  * of VMFail), leaving the nested VM's MSRs in the software model
4711  * (incorrect behavior) and snapshotting the modified MSRs (too
4712  * expensive since the lists are unbound by hardware). For each
4713  * MSR that was (prematurely) loaded from the nested VMEntry load
4714  * list, reload it from the exit load list if it exists and differs
4715  * from the guest value. The intent is to stuff host state as
4716  * silently as possible, not to fully process the exit load list.
4717  */
4718  for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4719  gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4720  if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4721  pr_debug_ratelimited(
4722  "%s read MSR index failed (%u, 0x%08llx)\n",
4723  __func__, i, gpa);
4724  goto vmabort;
4725  }
4726 
4727  for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4728  gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4729  if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4730  pr_debug_ratelimited(
4731  "%s read MSR failed (%u, 0x%08llx)\n",
4732  __func__, j, gpa);
4733  goto vmabort;
4734  }
4735  if (h.index != g.index)
4736  continue;
4737  if (h.value == g.value)
4738  break;
4739 
4740  if (nested_vmx_load_msr_check(vcpu, &h)) {
4741  pr_debug_ratelimited(
4742  "%s check failed (%u, 0x%x, 0x%x)\n",
4743  __func__, j, h.index, h.reserved);
4744  goto vmabort;
4745  }
4746 
4747  if (kvm_set_msr(vcpu, h.index, h.value)) {
4748  pr_debug_ratelimited(
4749  "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4750  __func__, j, h.index, h.value);
4751  goto vmabort;
4752  }
4753  }
4754  }
4755 
4756  return;
4757 
4758 vmabort:
4759  nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4760 }
static void kvm_register_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
Definition: mmu.c:5581
static u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
Definition: nested.c:4634
void ept_save_pdptrs(struct kvm_vcpu *vcpu)
Definition: vmx.c:3246
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_run()

static int nested_vmx_run ( struct kvm_vcpu *  vcpu,
bool  launch 
)
static

Definition at line 3592 of file nested.c.

3593 {
3594  struct vmcs12 *vmcs12;
3595  enum nvmx_vmentry_status status;
3596  struct vcpu_vmx *vmx = to_vmx(vcpu);
3597  u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3598  enum nested_evmptrld_status evmptrld_status;
3599 
3601  return 1;
3602 
3603  evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3604  if (evmptrld_status == EVMPTRLD_ERROR) {
3605  kvm_queue_exception(vcpu, UD_VECTOR);
3606  return 1;
3607  }
3608 
3609  kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
3610 
3611  if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
3612  return nested_vmx_failInvalid(vcpu);
3613 
3614  if (CC(!nested_vmx_is_evmptr12_valid(vmx) &&
3615  vmx->nested.current_vmptr == INVALID_GPA))
3616  return nested_vmx_failInvalid(vcpu);
3617 
3618  vmcs12 = get_vmcs12(vcpu);
3619 
3620  /*
3621  * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3622  * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3623  * rather than RFLAGS.ZF, and no error number is stored to the
3624  * VM-instruction error field.
3625  */
3626  if (CC(vmcs12->hdr.shadow_vmcs))
3627  return nested_vmx_failInvalid(vcpu);
3628 
3629  if (nested_vmx_is_evmptr12_valid(vmx)) {
3630  struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
3631 
3632  copy_enlightened_to_vmcs12(vmx, evmcs->hv_clean_fields);
3633  /* Enlightened VMCS doesn't have launch state */
3634  vmcs12->launch_state = !launch;
3635  } else if (enable_shadow_vmcs) {
3636  copy_shadow_to_vmcs12(vmx);
3637  }
3638 
3639  /*
3640  * The nested entry process starts with enforcing various prerequisites
3641  * on vmcs12 as required by the Intel SDM, and act appropriately when
3642  * they fail: As the SDM explains, some conditions should cause the
3643  * instruction to fail, while others will cause the instruction to seem
3644  * to succeed, but return an EXIT_REASON_INVALID_STATE.
3645  * To speed up the normal (success) code path, we should avoid checking
3646  * for misconfigurations which will anyway be caught by the processor
3647  * when using the merged vmcs02.
3648  */
3649  if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
3650  return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3651 
3652  if (CC(vmcs12->launch_state == launch))
3653  return nested_vmx_fail(vcpu,
3654  launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3655  : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3656 
3657  if (nested_vmx_check_controls(vcpu, vmcs12))
3658  return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3659 
3661  return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3662 
3664  return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3665 
3666  /*
3667  * We're finally done with prerequisite checking, and can start with
3668  * the nested entry.
3669  */
3670  vmx->nested.nested_run_pending = 1;
3672  status = nested_vmx_enter_non_root_mode(vcpu, true);
3673  if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3674  goto vmentry_failed;
3675 
3676  /* Emulate processing of posted interrupts on VM-Enter. */
3679  vmx->nested.pi_pending = true;
3680  kvm_make_request(KVM_REQ_EVENT, vcpu);
3682  }
3683 
3684  /* Hide L1D cache contents from the nested guest. */
3685  vmx->vcpu.arch.l1tf_flush_l1d = true;
3686 
3687  /*
3688  * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3689  * also be used as part of restoring nVMX state for
3690  * snapshot restore (migration).
3691  *
3692  * In this flow, it is assumed that vmcs12 cache was
3693  * transferred as part of captured nVMX state and should
3694  * therefore not be read from guest memory (which may not
3695  * exist on destination host yet).
3696  */
3698 
3699  switch (vmcs12->guest_activity_state) {
3700  case GUEST_ACTIVITY_HLT:
3701  /*
3702  * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3703  * awakened by event injection or by an NMI-window VM-exit or
3704  * by an interrupt-window VM-exit, halt the vcpu.
3705  */
3706  if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3707  !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3708  !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3709  (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3710  vmx->nested.nested_run_pending = 0;
3711  return kvm_emulate_halt_noskip(vcpu);
3712  }
3713  break;
3714  case GUEST_ACTIVITY_WAIT_SIPI:
3715  vmx->nested.nested_run_pending = 0;
3716  vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3717  break;
3718  default:
3719  break;
3720  }
3721 
3722  return 1;
3723 
3724 vmentry_failed:
3725  vmx->nested.nested_run_pending = 0;
3726  if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3727  return 0;
3728  if (status == NVMX_VMENTRY_VMEXIT)
3729  return 1;
3730  WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3731  return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3732 }
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
Definition: lapic.c:2859
void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
Definition: lapic.c:738
nvmx_vmentry_status
Definition: nested.h:13
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
Definition: pmu.c:828
bool has_preemption_timer_deadline
Definition: vmx.h:212
bool pi_pending
Definition: vmx.h:207
u32 launch_state
Definition: vmcs12.h:34
u32 shadow_vmcs
Definition: vmcs.h:18
nested_evmptrld_status
Definition: hyperv.h:12
static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
Definition: nested.c:1604
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
Definition: nested.c:3427
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:2946
static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, bool from_launch)
Definition: nested.c:2034
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:701
static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:2919
static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:2935
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Definition: vmx.c:1561
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
Definition: x86.c:9851
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_set_intercept_for_msr()

static void nested_vmx_set_intercept_for_msr ( struct vcpu_vmx vmx,
unsigned long *  msr_bitmap_l1,
unsigned long *  msr_bitmap_l0,
u32  msr,
int  types 
)
inlinestatic

Definition at line 580 of file nested.c.

584 {
585  if (types & MSR_TYPE_R)
586  nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
587  msr_bitmap_l0, msr);
588  if (types & MSR_TYPE_W)
589  nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
590  msr_bitmap_l0, msr);
591 }
Here is the caller graph for this function:

◆ nested_vmx_set_vmcs_shadowing_bitmap()

void nested_vmx_set_vmcs_shadowing_bitmap ( void  )

Definition at line 6764 of file nested.c.

6765 {
6766  if (enable_shadow_vmcs) {
6767  vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6768  vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6769  }
6770 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_setup_basic()

static void nested_vmx_setup_basic ( struct nested_vmx_msrs msrs)
static

Definition at line 6997 of file nested.c.

6998 {
6999  /*
7000  * This MSR reports some information about VMX support. We
7001  * should return information about the VMX we emulate for the
7002  * guest, and the VMCS structure we give it - not about the
7003  * VMX support of the underlying hardware.
7004  */
7005  msrs->basic =
7006  VMCS12_REVISION |
7007  VMX_BASIC_TRUE_CTLS |
7008  ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
7009  (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
7010 
7012  msrs->basic |= VMX_BASIC_INOUT;
7013 }
static bool cpu_has_vmx_basic_inout(void)
Definition: capabilities.h:77
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_setup_cpubased_ctls()

static void nested_vmx_setup_cpubased_ctls ( struct vmcs_config vmcs_conf,
struct nested_vmx_msrs msrs 
)
static

Definition at line 6866 of file nested.c.

6868 {
6869  msrs->procbased_ctls_low =
6870  CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6871 
6872  msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl;
6873  msrs->procbased_ctls_high &=
6874  CPU_BASED_INTR_WINDOW_EXITING |
6875  CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6876  CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6877  CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6878  CPU_BASED_CR3_STORE_EXITING |
6879 #ifdef CONFIG_X86_64
6880  CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6881 #endif
6882  CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6883  CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6884  CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6885  CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6886  CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6887  /*
6888  * We can allow some features even when not supported by the
6889  * hardware. For example, L1 can specify an MSR bitmap - and we
6890  * can use it to avoid exits to L1 - even when L0 runs L2
6891  * without MSR bitmaps.
6892  */
6893  msrs->procbased_ctls_high |=
6894  CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6895  CPU_BASED_USE_MSR_BITMAPS;
6896 
6897  /* We support free control of CR3 access interception. */
6898  msrs->procbased_ctls_low &=
6899  ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6900 }
u32 cpu_based_exec_ctrl
Definition: capabilities.h:61
Here is the caller graph for this function:

◆ nested_vmx_setup_cr_fixed()

static void nested_vmx_setup_cr_fixed ( struct nested_vmx_msrs msrs)
static

Definition at line 7015 of file nested.c.

7016 {
7017  /*
7018  * These MSRs specify bits which the guest must keep fixed on
7019  * while L1 is in VMXON mode (in L1's root mode, or running an L2).
7020  * We picked the standard core2 setting.
7021  */
7022 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
7023 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
7026 
7027  /* These MSRs specify bits which the guest must keep fixed off. */
7028  rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
7029  rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
7030 
7031  if (vmx_umip_emulated())
7032  msrs->cr4_fixed1 |= X86_CR4_UMIP;
7033 }
static bool vmx_umip_emulated(void)
Definition: capabilities.h:153
#define VMXON_CR0_ALWAYSON
#define VMXON_CR4_ALWAYSON
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_setup_ctls_msrs()

void nested_vmx_setup_ctls_msrs ( struct vmcs_config vmcs_conf,
u32  ept_caps 
)

Definition at line 7045 of file nested.c.

7046 {
7047  struct nested_vmx_msrs *msrs = &vmcs_conf->nested;
7048 
7049  /*
7050  * Note that as a general rule, the high half of the MSRs (bits in
7051  * the control fields which may be 1) should be initialized by the
7052  * intersection of the underlying hardware's MSR (i.e., features which
7053  * can be supported) and the list of features we want to expose -
7054  * because they are known to be properly supported in our code.
7055  * Also, usually, the low half of the MSRs (bits which must be 1) can
7056  * be set to 0, meaning that L1 may turn off any of these bits. The
7057  * reason is that if one of these bits is necessary, it will appear
7058  * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
7059  * fields of vmcs01 and vmcs02, will turn these bits off - and
7060  * nested_vmx_l1_wants_exit() will not pass related exits to L1.
7061  * These rules have exceptions below.
7062  */
7063  nested_vmx_setup_pinbased_ctls(vmcs_conf, msrs);
7064 
7065  nested_vmx_setup_exit_ctls(vmcs_conf, msrs);
7066 
7067  nested_vmx_setup_entry_ctls(vmcs_conf, msrs);
7068 
7069  nested_vmx_setup_cpubased_ctls(vmcs_conf, msrs);
7070 
7071  nested_vmx_setup_secondary_ctls(ept_caps, vmcs_conf, msrs);
7072 
7073  nested_vmx_setup_misc_data(vmcs_conf, msrs);
7074 
7075  nested_vmx_setup_basic(msrs);
7076 
7078 
7080 }
struct nested_vmx_msrs nested
Definition: capabilities.h:67
static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6806
static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
Definition: nested.c:6997
static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6846
static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6866
static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6823
static u64 nested_vmx_calc_vmcs_enum_msr(void)
Definition: nested.c:6778
static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
Definition: nested.c:7015
static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6985
static void nested_vmx_setup_secondary_ctls(u32 ept_caps, struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6902
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_setup_entry_ctls()

static void nested_vmx_setup_entry_ctls ( struct vmcs_config vmcs_conf,
struct nested_vmx_msrs msrs 
)
static

Definition at line 6846 of file nested.c.

6848 {
6849  msrs->entry_ctls_low =
6850  VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6851 
6852  msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl;
6853  msrs->entry_ctls_high &=
6854 #ifdef CONFIG_X86_64
6855  VM_ENTRY_IA32E_MODE |
6856 #endif
6857  VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
6858  msrs->entry_ctls_high |=
6859  (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER |
6860  VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
6861 
6862  /* We support free control of debug control loading. */
6863  msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6864 }
u32 vmentry_ctrl
Definition: capabilities.h:65
Here is the caller graph for this function:

◆ nested_vmx_setup_exit_ctls()

static void nested_vmx_setup_exit_ctls ( struct vmcs_config vmcs_conf,
struct nested_vmx_msrs msrs 
)
static

Definition at line 6823 of file nested.c.

6825 {
6826  msrs->exit_ctls_low =
6827  VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6828 
6829  msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl;
6830  msrs->exit_ctls_high &=
6831 #ifdef CONFIG_X86_64
6832  VM_EXIT_HOST_ADDR_SPACE_SIZE |
6833 #endif
6834  VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6835  VM_EXIT_CLEAR_BNDCFGS;
6836  msrs->exit_ctls_high |=
6837  VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6838  VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6839  VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT |
6840  VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
6841 
6842  /* We support free control of debug control saving. */
6843  msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6844 }
Here is the caller graph for this function:

◆ nested_vmx_setup_misc_data()

static void nested_vmx_setup_misc_data ( struct vmcs_config vmcs_conf,
struct nested_vmx_msrs msrs 
)
static

Definition at line 6985 of file nested.c.

6987 {
6988  msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA;
6989  msrs->misc_low |=
6990  MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6992  VMX_MISC_ACTIVITY_HLT |
6993  VMX_MISC_ACTIVITY_WAIT_SIPI;
6994  msrs->misc_high = 0;
6995 }
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
Definition: nested.c:39
Here is the caller graph for this function:

◆ nested_vmx_setup_pinbased_ctls()

static void nested_vmx_setup_pinbased_ctls ( struct vmcs_config vmcs_conf,
struct nested_vmx_msrs msrs 
)
static

Definition at line 6806 of file nested.c.

6808 {
6809  msrs->pinbased_ctls_low =
6810  PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6811 
6812  msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl;
6813  msrs->pinbased_ctls_high &=
6814  PIN_BASED_EXT_INTR_MASK |
6815  PIN_BASED_NMI_EXITING |
6816  PIN_BASED_VIRTUAL_NMIS |
6817  (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
6818  msrs->pinbased_ctls_high |=
6819  PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6820  PIN_BASED_VMX_PREEMPTION_TIMER;
6821 }
u32 pin_based_exec_ctrl
Definition: capabilities.h:60
bool __read_mostly enable_apicv
Definition: x86.c:235
Here is the caller graph for this function:

◆ nested_vmx_setup_secondary_ctls()

static void nested_vmx_setup_secondary_ctls ( u32  ept_caps,
struct vmcs_config vmcs_conf,
struct nested_vmx_msrs msrs 
)
static

Definition at line 6902 of file nested.c.

6905 {
6906  msrs->secondary_ctls_low = 0;
6907 
6908  msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl;
6909  msrs->secondary_ctls_high &=
6910  SECONDARY_EXEC_DESC |
6911  SECONDARY_EXEC_ENABLE_RDTSCP |
6912  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6913  SECONDARY_EXEC_WBINVD_EXITING |
6914  SECONDARY_EXEC_APIC_REGISTER_VIRT |
6915  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6916  SECONDARY_EXEC_RDRAND_EXITING |
6917  SECONDARY_EXEC_ENABLE_INVPCID |
6918  SECONDARY_EXEC_ENABLE_VMFUNC |
6919  SECONDARY_EXEC_RDSEED_EXITING |
6920  SECONDARY_EXEC_ENABLE_XSAVES |
6921  SECONDARY_EXEC_TSC_SCALING |
6922  SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
6923 
6924  /*
6925  * We can emulate "VMCS shadowing," even if the hardware
6926  * doesn't support it.
6927  */
6928  msrs->secondary_ctls_high |=
6929  SECONDARY_EXEC_SHADOW_VMCS;
6930 
6931  if (enable_ept) {
6932  /* nested EPT: emulate EPT also to L1 */
6933  msrs->secondary_ctls_high |=
6934  SECONDARY_EXEC_ENABLE_EPT;
6935  msrs->ept_caps =
6936  VMX_EPT_PAGE_WALK_4_BIT |
6937  VMX_EPT_PAGE_WALK_5_BIT |
6938  VMX_EPTP_WB_BIT |
6939  VMX_EPT_INVEPT_BIT |
6940  VMX_EPT_EXECUTE_ONLY_BIT;
6941 
6942  msrs->ept_caps &= ept_caps;
6943  msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6944  VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6945  VMX_EPT_1GB_PAGE_BIT;
6946  if (enable_ept_ad_bits) {
6947  msrs->secondary_ctls_high |=
6948  SECONDARY_EXEC_ENABLE_PML;
6949  msrs->ept_caps |= VMX_EPT_AD_BIT;
6950  }
6951 
6952  /*
6953  * Advertise EPTP switching irrespective of hardware support,
6954  * KVM emulates it in software so long as VMFUNC is supported.
6955  */
6956  if (cpu_has_vmx_vmfunc())
6957  msrs->vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING;
6958  }
6959 
6960  /*
6961  * Old versions of KVM use the single-context version without
6962  * checking for support, so declare that it is supported even
6963  * though it is treated as global context. The alternative is
6964  * not failing the single-context invvpid, and it is worse.
6965  */
6966  if (enable_vpid) {
6967  msrs->secondary_ctls_high |=
6968  SECONDARY_EXEC_ENABLE_VPID;
6969  msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6971  }
6972 
6974  msrs->secondary_ctls_high |=
6975  SECONDARY_EXEC_UNRESTRICTED_GUEST;
6976 
6978  msrs->secondary_ctls_high |=
6979  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6980 
6981  if (enable_sgx)
6982  msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
6983 }
static bool cpu_has_vmx_vmfunc(void)
Definition: capabilities.h:219
bool __read_mostly enable_ept_ad_bits
Definition: vmx.c:98
bool __read_mostly enable_vpid
Definition: vmx.c:82
bool __read_mostly enable_unrestricted_guest
Definition: vmx.c:94
bool __read_mostly flexpriority_enabled
Definition: vmx.c:88
bool __read_mostly enable_sgx
Definition: sgx.c:14
u32 cpu_based_2nd_exec_ctrl
Definition: capabilities.h:62
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_store_msr()

static int nested_vmx_store_msr ( struct kvm_vcpu *  vcpu,
u64  gpa,
u32  count 
)
static

Definition at line 1025 of file nested.c.

1026 {
1027  u64 data;
1028  u32 i;
1029  struct vmx_msr_entry e;
1030  u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
1031 
1032  for (i = 0; i < count; i++) {
1033  if (unlikely(i >= max_msr_list_size))
1034  return -EINVAL;
1035 
1036  if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1037  return -EINVAL;
1038 
1039  if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
1040  return -EINVAL;
1041 
1042  if (kvm_vcpu_write_guest(vcpu,
1043  gpa + i * sizeof(e) +
1044  offsetof(struct vmx_msr_entry, value),
1045  &data, sizeof(data))) {
1046  pr_debug_ratelimited(
1047  "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1048  __func__, i, e.index, data);
1049  return -EINVAL;
1050  }
1051  }
1052  return 0;
1053 }
static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data)
Definition: nested.c:974
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_store_msr_check()

static int nested_vmx_store_msr_check ( struct kvm_vcpu *  vcpu,
struct vmx_msr_entry *  e 
)
static

Definition at line 911 of file nested.c.

913 {
914  if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
916  return -EINVAL;
917  return 0;
918 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_succeed()

static int nested_vmx_succeed ( struct kvm_vcpu *  vcpu)
static

Definition at line 152 of file nested.c.

153 {
154  vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
155  & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
156  X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
157  return kvm_skip_emulated_instruction(vcpu);
158 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_transition_tlb_flush()

static void nested_vmx_transition_tlb_flush ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
bool  is_vmenter 
)
static

Definition at line 1167 of file nested.c.

1170 {
1171  struct vcpu_vmx *vmx = to_vmx(vcpu);
1172 
1173  /* Handle pending Hyper-V TLB flush requests */
1175 
1176  /*
1177  * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1178  * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1179  * full TLB flush from the guest's perspective. This is required even
1180  * if VPID is disabled in the host as KVM may need to synchronize the
1181  * MMU in response to the guest TLB flush.
1182  *
1183  * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1184  * EPT is a special snowflake, as guest-physical mappings aren't
1185  * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1186  * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1187  * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1188  * those mappings.
1189  */
1190  if (!nested_cpu_has_vpid(vmcs12)) {
1191  kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1192  return;
1193  }
1194 
1195  /* L2 should never have a VPID if VPID is disabled. */
1196  WARN_ON(!enable_vpid);
1197 
1198  /*
1199  * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
1200  * emulate a guest TLB flush as KVM does not track vpid12 history nor
1201  * is the VPID incorporated into the MMU context. I.e. KVM must assume
1202  * that the new vpid12 has never been used and thus represents a new
1203  * guest ASID that cannot have entries in the TLB.
1204  */
1205  if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1207  kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1208  return;
1209  }
1210 
1211  /*
1212  * If VPID is enabled, used by vmc12, and vpid12 is not changing but
1213  * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
1214  * KVM was unable to allocate a VPID for L2, flush the current context
1215  * as the effective ASID is common to both L1 and L2.
1216  */
1218  kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1219 }
static void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled)
Definition: hyperv.h:324
u16 last_vpid
Definition: vmx.h:232
static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
Definition: nested.c:1159
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_triple_fault()

static void nested_vmx_triple_fault ( struct kvm_vcpu *  vcpu)
static

Definition at line 4951 of file nested.c.

4952 {
4953  kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4954  nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
4955 }
Here is the call graph for this function:

◆ nested_vmx_update_pending_dbg()

static void nested_vmx_update_pending_dbg ( struct kvm_vcpu *  vcpu)
static

Definition at line 3994 of file nested.c.

3995 {
3996  unsigned long pending_dbg;
3997 
3998  pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception);
3999  if (pending_dbg)
4000  vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg);
4001 }
static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex)
Definition: nested.c:3967
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_vmexit()

void nested_vmx_vmexit ( struct kvm_vcpu *  vcpu,
u32  vm_exit_reason,
u32  exit_intr_info,
unsigned long  exit_qualification 
)

Definition at line 4767 of file nested.c.

4769 {
4770  struct vcpu_vmx *vmx = to_vmx(vcpu);
4771  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4772 
4773  /* Pending MTF traps are discarded on VM-Exit. */
4774  vmx->nested.mtf_pending = false;
4775 
4776  /* trying to cancel vmlaunch/vmresume is a bug */
4777  WARN_ON_ONCE(vmx->nested.nested_run_pending);
4778 
4779 #ifdef CONFIG_KVM_HYPERV
4780  if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4781  /*
4782  * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4783  * Enlightened VMCS after migration and we still need to
4784  * do that when something is forcing L2->L1 exit prior to
4785  * the first L2 run.
4786  */
4787  (void)nested_get_evmcs_page(vcpu);
4788  }
4789 #endif
4790 
4791  /* Service pending TLB flush requests for L2 before switching to L1. */
4793 
4794  /*
4795  * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4796  * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4797  * up-to-date before switching to L1.
4798  */
4799  if (enable_ept && is_pae_paging(vcpu))
4800  vmx_ept_load_pdptrs(vcpu);
4801 
4802  leave_guest_mode(vcpu);
4803 
4805  hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4806 
4807  if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4808  vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4809  if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4810  vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4811  }
4812 
4813  if (likely(!vmx->fail)) {
4815 
4816  if (vm_exit_reason != -1)
4818  exit_intr_info, exit_qualification);
4819 
4820  /*
4821  * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4822  * also be used to capture vmcs12 cache as part of
4823  * capturing nVMX state for snapshot (migration).
4824  *
4825  * Otherwise, this flush will dirty guest memory at a
4826  * point it is already assumed by user-space to be
4827  * immutable.
4828  */
4830  } else {
4831  /*
4832  * The only expected VM-instruction error is "VM entry with
4833  * invalid control field(s)." Anything else indicates a
4834  * problem with L0. And we should never get here with a
4835  * VMFail of any type if early consistency checks are enabled.
4836  */
4837  WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4838  VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4839  WARN_ON_ONCE(nested_early_check);
4840  }
4841 
4842  /*
4843  * Drop events/exceptions that were queued for re-injection to L2
4844  * (picked up via vmx_complete_interrupts()), as well as exceptions
4845  * that were pending for L2. Note, this must NOT be hoisted above
4846  * prepare_vmcs12(), events/exceptions queued for re-injection need to
4847  * be captured in vmcs12 (see vmcs12_save_pending_event()).
4848  */
4849  vcpu->arch.nmi_injected = false;
4852 
4853  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4854 
4855  /*
4856  * If IBRS is advertised to the vCPU, KVM must flush the indirect
4857  * branch predictors when transitioning from L2 to L1, as L1 expects
4858  * hardware (KVM in this case) to provide separate predictor modes.
4859  * Bare metal isolates VMX root (host) from VMX non-root (guest), but
4860  * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
4861  * separate modes for L2 vs L1.
4862  */
4863  if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4864  indirect_branch_prediction_barrier();
4865 
4866  /* Update any VMCS fields that might have changed while L2 ran */
4867  vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4868  vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4869  vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4871  vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4872 
4873  if (vmx->nested.l1_tpr_threshold != -1)
4874  vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4875 
4879  }
4880 
4884  }
4885 
4886  /* Unpin physical memory we referred to in vmcs02 */
4887  kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
4888  kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4889  kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4890  vmx->nested.pi_desc = NULL;
4891 
4894  kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4895  }
4896 
4898  vmx->nested.update_vmcs01_apicv_status = false;
4899  kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4900  }
4901 
4902  if ((vm_exit_reason != -1) &&
4904  vmx->nested.need_vmcs12_to_shadow_sync = true;
4905 
4906  /* in case we halted in L2 */
4907  vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4908 
4909  if (likely(!vmx->fail)) {
4910  if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4911  nested_exit_intr_ack_set(vcpu)) {
4912  int irq = kvm_cpu_get_interrupt(vcpu);
4913  WARN_ON(irq < 0);
4914  vmcs12->vm_exit_intr_info = irq |
4915  INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4916  }
4917 
4918  if (vm_exit_reason != -1)
4919  trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4924  KVM_ISA_VMX);
4925 
4927 
4928  return;
4929  }
4930 
4931  /*
4932  * After an early L2 VM-entry failure, we're now back
4933  * in L1 which thinks it just finished a VMLAUNCH or
4934  * VMRESUME instruction, so we need to set the failure
4935  * flag and the VM-instruction error field of the VMCS
4936  * accordingly, and skip the emulated instruction.
4937  */
4938  (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4939 
4940  /*
4941  * Restore L1's host state to KVM's software model. We're here
4942  * because a consistency check was caught by hardware, which
4943  * means some amount of guest state has been propagated to KVM's
4944  * model and needs to be unwound to the host's state.
4945  */
4947 
4948  vmx->fail = 0;
4949 }
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
Definition: irq.c:138
Definition: x86.h:12
bool has_tsc_control
Definition: x86.h:14
bool reload_vmcs01_apic_access_page
Definition: vmx.h:177
int l1_tpr_threshold
Definition: vmx.h:229
bool mtf_pending
Definition: vmx.h:192
bool update_vmcs01_apicv_status
Definition: vmx.h:179
bool update_vmcs01_cpu_dirty_logging
Definition: vmx.h:178
bool change_vmcs01_virtual_apic_mode
Definition: vmx.h:176
static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
Definition: nested.c:4657
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:720
static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:4372
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
Definition: nested.c:4453
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
Definition: vmx.c:6694
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
Definition: vmx.c:3231
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
Definition: vmx.c:8110
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:122
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:107
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_write_pml_buffer()

static int nested_vmx_write_pml_buffer ( struct kvm_vcpu *  vcpu,
gpa_t  gpa 
)
static

Definition at line 3347 of file nested.c.

3348 {
3349  struct vmcs12 *vmcs12;
3350  struct vcpu_vmx *vmx = to_vmx(vcpu);
3351  gpa_t dst;
3352 
3353  if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3354  return 0;
3355 
3356  if (WARN_ON_ONCE(vmx->nested.pml_full))
3357  return 1;
3358 
3359  /*
3360  * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3361  * set is already checked as part of A/D emulation.
3362  */
3363  vmcs12 = get_vmcs12(vcpu);
3364  if (!nested_cpu_has_pml(vmcs12))
3365  return 0;
3366 
3368  vmx->nested.pml_full = true;
3369  return 1;
3370  }
3371 
3372  gpa &= ~0xFFFull;
3373  dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3374 
3375  if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3376  offset_in_page(dst), sizeof(gpa)))
3377  return 0;
3378 
3380 
3381  return 0;
3382 }
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len)
Definition: kvm_main.c:3431
u16 guest_pml_index
Definition: vmcs12.h:187
#define PML_ENTITY_NUM
Definition: vmx.h:338
Here is the call graph for this function:

◆ prepare_vmcs02()

static int prepare_vmcs02 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
bool  from_vmentry,
enum vm_entry_failure_code *  entry_failure_code 
)
static

Definition at line 2570 of file nested.c.

2573 {
2574  struct vcpu_vmx *vmx = to_vmx(vcpu);
2575  struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
2576  bool load_guest_pdptrs_vmcs12 = false;
2577 
2580  vmx->nested.dirty_vmcs12 = false;
2581 
2582  load_guest_pdptrs_vmcs12 = !nested_vmx_is_evmptr12_valid(vmx) ||
2583  !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2584  }
2585 
2586  if (vmx->nested.nested_run_pending &&
2587  (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2588  kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2589  vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2590  } else {
2591  kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2592  vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl);
2593  }
2594  if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2595  !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2596  vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs);
2598 
2599  /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2600  * bitwise-or of what L1 wants to trap for L2, and what we want to
2601  * trap. Note that CR0.TS also needs updating - we do this later.
2602  */
2604  vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2605  vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2606 
2607  if (vmx->nested.nested_run_pending &&
2608  (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2609  vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2610  vcpu->arch.pat = vmcs12->guest_ia32_pat;
2611  } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2612  vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2613  }
2614 
2615  vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2616  vcpu->arch.l1_tsc_offset,
2617  vmx_get_l2_tsc_offset(vcpu),
2619 
2620  vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2621  vcpu->arch.l1_tsc_scaling_ratio,
2623 
2624  vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2626  vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2627 
2629 
2632 
2633  /*
2634  * Override the CR0/CR4 read shadows after setting the effective guest
2635  * CR0/CR4. The common helpers also set the shadows, but they don't
2636  * account for vmcs12's cr0/4_guest_host_mask.
2637  */
2638  vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2639  vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2640 
2641  vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2642  vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2643 
2644  vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2645  /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2646  vmx_set_efer(vcpu, vcpu->arch.efer);
2647 
2648  /*
2649  * Guest state is invalid and unrestricted guest is disabled,
2650  * which means L1 attempted VMEntry to L2 with invalid state.
2651  * Fail the VMEntry.
2652  *
2653  * However when force loading the guest state (SMM exit or
2654  * loading nested state after migration, it is possible to
2655  * have invalid guest state now, which will be later fixed by
2656  * restoring L2 register state
2657  */
2658  if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
2659  *entry_failure_code = ENTRY_FAIL_DEFAULT;
2660  return -EINVAL;
2661  }
2662 
2663  /* Shadow page tables on either EPT or shadow page tables. */
2665  from_vmentry, entry_failure_code))
2666  return -EINVAL;
2667 
2668  /*
2669  * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2670  * on nested VM-Exit, which can occur without actually running L2 and
2671  * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2672  * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2673  * transition to HLT instead of running L2.
2674  */
2675  if (enable_ept)
2676  vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2677 
2678  /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2679  if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2680  is_pae_paging(vcpu)) {
2681  vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2682  vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2683  vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2684  vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2685  }
2686 
2687  if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2689  WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2691  *entry_failure_code = ENTRY_FAIL_DEFAULT;
2692  return -EINVAL;
2693  }
2694 
2695  kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2696  kvm_rip_write(vcpu, vmcs12->guest_rip);
2697 
2698  /*
2699  * It was observed that genuine Hyper-V running in L1 doesn't reset
2700  * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2701  * bits when it changes a field in eVMCS. Mark all fields as clean
2702  * here.
2703  */
2705  evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2706 
2707  return 0;
2708 }
static unsigned long nested_read_cr4(struct vmcs12 *fields)
Definition: nested.h:93
static unsigned long nested_read_cr0(struct vmcs12 *fields)
Definition: nested.h:88
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
Definition: nested.c:451
static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
Definition: nested.c:2189
static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
Definition: nested.c:2441
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
Definition: vmx.c:1897
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
Definition: vmx.c:874
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
Definition: vmx.c:1907
static bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
Definition: vmx.h:735
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
Definition: x86.c:2605
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
Definition: x86.c:2590
Here is the call graph for this function:

◆ prepare_vmcs02_constant_state()

static void prepare_vmcs02_constant_state ( struct vcpu_vmx vmx)
static

Definition at line 2200 of file nested.c.

2201 {
2202  struct kvm *kvm = vmx->vcpu.kvm;
2203 
2204  /*
2205  * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2206  * according to L0's settings (vmcs12 is irrelevant here). Host
2207  * fields that come from L0 and are not constant, e.g. HOST_CR3,
2208  * will be set as needed prior to VMLAUNCH/VMRESUME.
2209  */
2210  if (vmx->nested.vmcs02_initialized)
2211  return;
2212  vmx->nested.vmcs02_initialized = true;
2213 
2214  /*
2215  * We don't care what the EPTP value is we just need to guarantee
2216  * it's valid so we don't get a false positive when doing early
2217  * consistency checks.
2218  */
2220  vmcs_write64(EPT_POINTER,
2221  construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
2222 
2223  /* All VMFUNCs are currently emulated through L0 vmexits. */
2224  if (cpu_has_vmx_vmfunc())
2225  vmcs_write64(VM_FUNCTION_CONTROL, 0);
2226 
2228  vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2229 
2230  if (cpu_has_vmx_msr_bitmap())
2231  vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2232 
2233  /*
2234  * PML is emulated for L2, but never enabled in hardware as the MMU
2235  * handles A/D emulation. Disabling PML for L2 also avoids having to
2236  * deal with filtering out L2 GPAs from the buffer.
2237  */
2238  if (enable_pml) {
2239  vmcs_write64(PML_ADDRESS, 0);
2240  vmcs_write16(GUEST_PML_INDEX, -1);
2241  }
2242 
2244  vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA);
2245 
2246  if (kvm_notify_vmexit_enabled(kvm))
2247  vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
2248 
2249  /*
2250  * Set the MSR load/store lists to match L0's settings. Only the
2251  * addresses are constant (for vmcs02), the counts can change based
2252  * on L2's behavior, e.g. switching to/from long mode.
2253  */
2254  vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2255  vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2256  vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2257 
2259 }
bool __read_mostly enable_pml
Definition: vmx.c:120
static bool cpu_has_vmx_encls_vmexit(void)
Definition: capabilities.h:235
static bool cpu_has_vmx_posted_intr(void)
Definition: capabilities.h:94
#define PT64_ROOT_4LEVEL
Definition: mmu.h:35
void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
Definition: vmx.c:4296
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
Definition: vmx.c:3371
static __always_inline void vmcs_write16(unsigned long field, u16 value)
Definition: vmx_ops.h:228
static bool kvm_notify_vmexit_enabled(struct kvm *kvm)
Definition: x86.h:429
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prepare_vmcs02_early()

static void prepare_vmcs02_early ( struct vcpu_vmx vmx,
struct loaded_vmcs vmcs01,
struct vmcs12 vmcs12 
)
static

Definition at line 2276 of file nested.c.

2278 {
2279  u32 exec_control;
2280  u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2281 
2284 
2285  /*
2286  * PIN CONTROLS
2287  */
2288  exec_control = __pin_controls_get(vmcs01);
2289  exec_control |= (vmcs12->pin_based_vm_exec_control &
2290  ~PIN_BASED_VMX_PREEMPTION_TIMER);
2291 
2292  /* Posted interrupts setting is only taken from vmcs12. */
2293  vmx->nested.pi_pending = false;
2296  else
2297  exec_control &= ~PIN_BASED_POSTED_INTR;
2298  pin_controls_set(vmx, exec_control);
2299 
2300  /*
2301  * EXEC CONTROLS
2302  */
2303  exec_control = __exec_controls_get(vmcs01); /* L0's desires */
2304  exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2305  exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2306  exec_control &= ~CPU_BASED_TPR_SHADOW;
2307  exec_control |= vmcs12->cpu_based_vm_exec_control;
2308 
2309  vmx->nested.l1_tpr_threshold = -1;
2310  if (exec_control & CPU_BASED_TPR_SHADOW)
2311  vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2312 #ifdef CONFIG_X86_64
2313  else
2314  exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2315  CPU_BASED_CR8_STORE_EXITING;
2316 #endif
2317 
2318  /*
2319  * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2320  * for I/O port accesses.
2321  */
2322  exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2323  exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2324 
2325  /*
2326  * This bit will be computed in nested_get_vmcs12_pages, because
2327  * we do not have access to L1's MSR bitmap yet. For now, keep
2328  * the same bit as before, hoping to avoid multiple VMWRITEs that
2329  * only set/clear this bit.
2330  */
2331  exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2332  exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2333 
2334  exec_controls_set(vmx, exec_control);
2335 
2336  /*
2337  * SECONDARY EXEC CONTROLS
2338  */
2340  exec_control = __secondary_exec_controls_get(vmcs01);
2341 
2342  /* Take the following fields only from vmcs12 */
2343  exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2344  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2345  SECONDARY_EXEC_ENABLE_INVPCID |
2346  SECONDARY_EXEC_ENABLE_RDTSCP |
2347  SECONDARY_EXEC_ENABLE_XSAVES |
2348  SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2349  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2350  SECONDARY_EXEC_APIC_REGISTER_VIRT |
2351  SECONDARY_EXEC_ENABLE_VMFUNC |
2352  SECONDARY_EXEC_DESC);
2353 
2354  if (nested_cpu_has(vmcs12,
2355  CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2356  exec_control |= vmcs12->secondary_vm_exec_control;
2357 
2358  /* PML is emulated and never enabled in hardware for L2. */
2359  exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
2360 
2361  /* VMCS shadowing for L2 is emulated for now */
2362  exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2363 
2364  /*
2365  * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2366  * will not have to rewrite the controls just for this bit.
2367  */
2368  if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP))
2369  exec_control |= SECONDARY_EXEC_DESC;
2370 
2371  if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2372  vmcs_write16(GUEST_INTR_STATUS,
2374 
2375  if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2376  exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2377 
2378  if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2380 
2381  secondary_exec_controls_set(vmx, exec_control);
2382  }
2383 
2384  /*
2385  * ENTRY CONTROLS
2386  *
2387  * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2388  * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2389  * on the related bits (if supported by the CPU) in the hope that
2390  * we can avoid VMWrites during vmx_set_efer().
2391  *
2392  * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
2393  * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
2394  * do the same for L2.
2395  */
2396  exec_control = __vm_entry_controls_get(vmcs01);
2397  exec_control |= (vmcs12->vm_entry_controls &
2398  ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
2399  exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
2400  if (cpu_has_load_ia32_efer()) {
2401  if (guest_efer & EFER_LMA)
2402  exec_control |= VM_ENTRY_IA32E_MODE;
2403  if (guest_efer != host_efer)
2404  exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2405  }
2406  vm_entry_controls_set(vmx, exec_control);
2407 
2408  /*
2409  * EXIT CONTROLS
2410  *
2411  * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2412  * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2413  * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2414  */
2415  exec_control = __vm_exit_controls_get(vmcs01);
2416  if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2417  exec_control |= VM_EXIT_LOAD_IA32_EFER;
2418  else
2419  exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
2420  vm_exit_controls_set(vmx, exec_control);
2421 
2422  /*
2423  * Interrupt/Exception Fields
2424  */
2425  if (vmx->nested.nested_run_pending) {
2426  vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2428  vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2430  vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2432  vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2435  !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2436  } else {
2437  vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2438  }
2439 }
static bool cpu_has_secondary_exec_ctrls(void)
Definition: capabilities.h:129
void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: sgx.c:468
bool nmi_known_unmasked
Definition: vmcs.h:66
static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
Definition: nested.c:2261
Here is the call graph for this function:

◆ prepare_vmcs02_early_rare()

static void prepare_vmcs02_early_rare ( struct vcpu_vmx vmx,
struct vmcs12 vmcs12 
)
static

Definition at line 2261 of file nested.c.

2263 {
2265 
2266  vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
2267 
2268  if (enable_vpid) {
2269  if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2270  vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2271  else
2272  vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2273  }
2274 }
int vpid
Definition: vmx.h:317
static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
Definition: nested.c:2200
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prepare_vmcs02_rare()

static void prepare_vmcs02_rare ( struct vcpu_vmx vmx,
struct vmcs12 vmcs12 
)
static

Definition at line 2441 of file nested.c.

2442 {
2443  struct hv_enlightened_vmcs *hv_evmcs = nested_vmx_evmcs(vmx);
2444 
2445  if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2446  HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2447  vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2448  vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2449  vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2450  vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2451  vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2452  vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2453  vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2454  vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2455  vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2456  vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2457  vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2458  vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2459  vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2460  vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2461  vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2462  vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2463  vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2464  vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2465  vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2466  vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2467  vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2468  vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2469  vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2470  vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2471  vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2472  vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2473  vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2474  vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2475  vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2476  vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2477  vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2478  vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2479  vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2480  vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2481  vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2482  vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2483 
2484  vmx->segment_cache.bitmask = 0;
2485  }
2486 
2487  if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2488  HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2489  vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2490  vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2492  vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2493  vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2494 
2495  /*
2496  * L1 may access the L2's PDPTR, so save them to construct
2497  * vmcs12
2498  */
2499  if (enable_ept) {
2500  vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2501  vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2502  vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2503  vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2504  }
2505 
2507  (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2508  vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2509  }
2510 
2512  vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2513 
2514  /*
2515  * Whether page-faults are trapped is determined by a combination of
2516  * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2517  * doesn't care about page faults then we should set all of these to
2518  * L1's desires. However, if L0 does care about (some) page faults, it
2519  * is not easy (if at all possible?) to merge L0 and L1's desires, we
2520  * simply ask to exit on each and every L2 page fault. This is done by
2521  * setting MASK=MATCH=0 and (see below) EB.PF=1.
2522  * Note that below we don't need special code to set EB.PF beyond the
2523  * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2524  * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2525  * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2526  */
2527  if (vmx_need_pf_intercept(&vmx->vcpu)) {
2528  /*
2529  * TODO: if both L0 and L1 need the same MASK and MATCH,
2530  * go ahead and use it?
2531  */
2532  vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2533  vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2534  } else {
2535  vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2536  vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2537  }
2538 
2539  if (cpu_has_vmx_apicv()) {
2540  vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2541  vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2542  vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2543  vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2544  }
2545 
2546  /*
2547  * Make sure the msr_autostore list is up to date before we set the
2548  * count in the vmcs02.
2549  */
2550  prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2551 
2552  vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2553  vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2554  vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2555 
2557 }
static bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
Definition: nested.h:170
struct vcpu_vmx::@41 segment_cache
u32 bitmask
Definition: vmx.h:309
u64 eoi_exit_bitmap0
Definition: vmcs12.h:48
u64 eoi_exit_bitmap2
Definition: vmcs12.h:50
u64 eoi_exit_bitmap1
Definition: vmcs12.h:49
u64 eoi_exit_bitmap3
Definition: vmcs12.h:51
static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, u32 msr_index)
Definition: nested.c:1073
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
Definition: vmx.c:4363
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prepare_vmcs12()

static void prepare_vmcs12 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
u32  vm_exit_reason,
u32  exit_intr_info,
unsigned long  exit_qualification 
)
static

Definition at line 4453 of file nested.c.

4456 {
4457  /* update exit information fields: */
4458  vmcs12->vm_exit_reason = vm_exit_reason;
4459  if (to_vmx(vcpu)->exit_reason.enclave_mode)
4460  vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
4461  vmcs12->exit_qualification = exit_qualification;
4462 
4463  /*
4464  * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
4465  * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
4466  * exit info fields are unmodified.
4467  */
4468  if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4469  vmcs12->launch_state = 1;
4470 
4471  /* vm_entry_intr_info_field is cleared on exit. Emulate this
4472  * instead of reading the real value. */
4473  vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4474 
4475  /*
4476  * Transfer the event that L0 or L1 may wanted to inject into
4477  * L2 to IDT_VECTORING_INFO_FIELD.
4478  */
4480  vm_exit_reason, exit_intr_info);
4481 
4482  vmcs12->vm_exit_intr_info = exit_intr_info;
4483  vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4484  vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4485 
4486  /*
4487  * According to spec, there's no need to store the guest's
4488  * MSRs if the exit is due to a VM-entry failure that occurs
4489  * during or after loading the guest state. Since this exit
4490  * does not fall in that category, we need to save the MSRs.
4491  */
4492  if (nested_vmx_store_msr(vcpu,
4495  nested_vmx_abort(vcpu,
4496  VMX_ABORT_SAVE_GUEST_MSR_FAIL);
4497  }
4498 }
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
Definition: nested.c:1025
static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info)
Definition: nested.c:3771
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prepare_vmx_msr_autostore_list()

static void prepare_vmx_msr_autostore_list ( struct kvm_vcpu *  vcpu,
u32  msr_index 
)
static

Definition at line 1073 of file nested.c.

1075 {
1076  struct vcpu_vmx *vmx = to_vmx(vcpu);
1077  struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1078  bool in_vmcs12_store_list;
1079  int msr_autostore_slot;
1080  bool in_autostore_list;
1081  int last;
1082 
1083  msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1084  in_autostore_list = msr_autostore_slot >= 0;
1085  in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1086 
1087  if (in_vmcs12_store_list && !in_autostore_list) {
1088  if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
1089  /*
1090  * Emulated VMEntry does not fail here. Instead a less
1091  * accurate value will be returned by
1092  * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1093  * instead of reading the value from the vmcs02 VMExit
1094  * MSR-store area.
1095  */
1096  pr_warn_ratelimited(
1097  "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1098  msr_index);
1099  return;
1100  }
1101  last = autostore->nr++;
1102  autostore->val[last].index = msr_index;
1103  } else if (!in_vmcs12_store_list && in_autostore_list) {
1104  last = --autostore->nr;
1105  autostore->val[msr_autostore_slot] = autostore->val[last];
1106  }
1107 }
Definition: vmx.h:33
static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
Definition: nested.c:1055
#define MAX_NR_LOADSTORE_MSRS
Definition: vmx.h:31
Here is the call graph for this function:
Here is the caller graph for this function:

◆ read_and_check_msr_entry()

static bool read_and_check_msr_entry ( struct kvm_vcpu *  vcpu,
u64  gpa,
int  i,
struct vmx_msr_entry *  e 
)
static

Definition at line 1005 of file nested.c.

1007 {
1008  if (kvm_vcpu_read_guest(vcpu,
1009  gpa + i * sizeof(*e),
1010  e, 2 * sizeof(u32))) {
1011  pr_debug_ratelimited(
1012  "%s cannot read MSR entry (%u, 0x%08llx)\n",
1013  __func__, i, gpa + i * sizeof(*e));
1014  return false;
1015  }
1016  if (nested_vmx_store_msr_check(vcpu, e)) {
1017  pr_debug_ratelimited(
1018  "%s check failed (%u, 0x%x, 0x%x)\n",
1019  __func__, i, e->index, e->reserved);
1020  return false;
1021  }
1022  return true;
1023 }
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e)
Definition: nested.c:911
Here is the call graph for this function:
Here is the caller graph for this function:

◆ set_current_vmptr()

static void set_current_vmptr ( struct vcpu_vmx vmx,
gpa_t  vmptr 
)
static

Definition at line 5590 of file nested.c.

5591 {
5592  vmx->nested.current_vmptr = vmptr;
5593  if (enable_shadow_vmcs) {
5594  secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5595  vmcs_write64(VMCS_LINK_POINTER,
5596  __pa(vmx->vmcs01.shadow_vmcs));
5597  vmx->nested.need_vmcs12_to_shadow_sync = true;
5598  }
5599  vmx->nested.dirty_vmcs12 = true;
5600  vmx->nested.force_msr_bitmap_recalc = true;
5601 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sync_vmcs02_to_vmcs12()

static void sync_vmcs02_to_vmcs12 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 4372 of file nested.c.

4373 {
4374  struct vcpu_vmx *vmx = to_vmx(vcpu);
4375 
4378 
4381 
4384 
4387  vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4388 
4389  vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4390  vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4391 
4393  vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
4394 
4395  if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4396  vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4397  else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4398  vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
4399  else
4400  vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4401 
4403  vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4404  !vmx->nested.nested_run_pending)
4407 
4408  /*
4409  * In some cases (usually, nested EPT), L2 is allowed to change its
4410  * own CR3 without exiting. If it has changed it, we must keep it.
4411  * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4412  * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4413  *
4414  * Additionally, restore L2's PDPTR to vmcs12.
4415  */
4416  if (enable_ept) {
4417  vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4419  vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4420  vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4421  vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4422  vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4423  }
4424  }
4425 
4426  vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4427 
4429  vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4430 
4432  (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4433  (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4434 
4435  if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4436  kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4437 
4438  if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4439  vmcs12->guest_ia32_efer = vcpu->arch.efer;
4440 }
static unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
u32 vmx_preemption_timer_value
Definition: vmcs12.h:167
static unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:3752
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
Definition: nested.c:4237
static unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:3762
static __always_inline u16 vmcs_read16(unsigned long field)
Definition: vmx_ops.h:153
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
Definition: x86.c:1402
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sync_vmcs02_to_vmcs12_rare()

static void sync_vmcs02_to_vmcs12_rare ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
static

Definition at line 4298 of file nested.c.

4300 {
4301  struct vcpu_vmx *vmx = to_vmx(vcpu);
4302 
4303  vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4304  vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4305  vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4306  vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4307  vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4308  vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4309  vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4310  vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4311  vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4312  vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4313  vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4314  vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4315  vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4316  vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4317  vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4318  vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4319  vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4320  vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4321  vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4322  vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4323  vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4324  vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4325  vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4326  vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4327  vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4328  vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4329  vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4330  vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4331  vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4332  vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4333  vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4334  vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4335  vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4336  vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4338  vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4339 
4341 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmcs12_guest_cr0()

static unsigned long vmcs12_guest_cr0 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
inlinestatic

Definition at line 3752 of file nested.c.

3753 {
3754  return
3755  /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3757  /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3758  vcpu->arch.cr0_guest_owned_bits));
3759 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmcs12_guest_cr4()

static unsigned long vmcs12_guest_cr4 ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12 
)
inlinestatic

Definition at line 3762 of file nested.c.

3763 {
3764  return
3765  /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3767  /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3768  vcpu->arch.cr4_guest_owned_bits));
3769 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmcs12_save_pending_event()

static void vmcs12_save_pending_event ( struct kvm_vcpu *  vcpu,
struct vmcs12 vmcs12,
u32  vm_exit_reason,
u32  exit_intr_info 
)
static

Definition at line 3771 of file nested.c.

3774 {
3775  u32 idt_vectoring;
3776  unsigned int nr;
3777 
3778  /*
3779  * Per the SDM, VM-Exits due to double and triple faults are never
3780  * considered to occur during event delivery, even if the double/triple
3781  * fault is the result of an escalating vectoring issue.
3782  *
3783  * Note, the SDM qualifies the double fault behavior with "The original
3784  * event results in a double-fault exception". It's unclear why the
3785  * qualification exists since exits due to double fault can occur only
3786  * while vectoring a different exception (injected events are never
3787  * subject to interception), i.e. there's _always_ an original event.
3788  *
3789  * The SDM also uses NMI as a confusing example for the "original event
3790  * causes the VM exit directly" clause. NMI isn't special in any way,
3791  * the same rule applies to all events that cause an exit directly.
3792  * NMI is an odd choice for the example because NMIs can only occur on
3793  * instruction boundaries, i.e. they _can't_ occur during vectoring.
3794  */
3795  if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
3796  ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
3797  is_double_fault(exit_intr_info))) {
3799  } else if (vcpu->arch.exception.injected) {
3800  nr = vcpu->arch.exception.vector;
3801  idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3802 
3803  if (kvm_exception_is_soft(nr)) {
3805  vcpu->arch.event_exit_inst_len;
3806  idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3807  } else
3808  idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3809 
3810  if (vcpu->arch.exception.has_error_code) {
3811  idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3813  vcpu->arch.exception.error_code;
3814  }
3815 
3816  vmcs12->idt_vectoring_info_field = idt_vectoring;
3817  } else if (vcpu->arch.nmi_injected) {
3819  INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3820  } else if (vcpu->arch.interrupt.injected) {
3821  nr = vcpu->arch.interrupt.nr;
3822  idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3823 
3824  if (vcpu->arch.interrupt.soft) {
3825  idt_vectoring |= INTR_TYPE_SOFT_INTR;
3827  vcpu->arch.event_exit_inst_len;
3828  } else
3829  idt_vectoring |= INTR_TYPE_EXT_INTR;
3830 
3831  vmcs12->idt_vectoring_info_field = idt_vectoring;
3832  } else {
3834  }
3835 }
static bool is_double_fault(u32 intr_info)
Definition: vmcs.h:108
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_calc_preemption_timer_value()

static u64 vmx_calc_preemption_timer_value ( struct kvm_vcpu *  vcpu)
static

Definition at line 2148 of file nested.c.

2149 {
2150  struct vcpu_vmx *vmx = to_vmx(vcpu);
2151  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2152 
2153  u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2155 
2158  vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2160  }
2161  return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2162 }
u64 preemption_timer_deadline
Definition: vmx.h:211
Here is the call graph for this function:

◆ vmx_check_nested_events()

static int vmx_check_nested_events ( struct kvm_vcpu *  vcpu)
static

Definition at line 4098 of file nested.c.

4099 {
4100  struct kvm_lapic *apic = vcpu->arch.apic;
4101  struct vcpu_vmx *vmx = to_vmx(vcpu);
4102  /*
4103  * Only a pending nested run blocks a pending exception. If there is a
4104  * previously injected event, the pending exception occurred while said
4105  * event was being delivered and thus needs to be handled.
4106  */
4107  bool block_nested_exceptions = vmx->nested.nested_run_pending;
4108  /*
4109  * New events (not exceptions) are only recognized at instruction
4110  * boundaries. If an event needs reinjection, then KVM is handling a
4111  * VM-Exit that occurred _during_ instruction execution; new events are
4112  * blocked until the instruction completes.
4113  */
4114  bool block_nested_events = block_nested_exceptions ||
4116 
4117  if (lapic_in_kernel(vcpu) &&
4118  test_bit(KVM_APIC_INIT, &apic->pending_events)) {
4119  if (block_nested_events)
4120  return -EBUSY;
4122  clear_bit(KVM_APIC_INIT, &apic->pending_events);
4123  if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
4124  nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
4125 
4126  /* MTF is discarded if the vCPU is in WFS. */
4127  vmx->nested.mtf_pending = false;
4128  return 0;
4129  }
4130 
4131  if (lapic_in_kernel(vcpu) &&
4132  test_bit(KVM_APIC_SIPI, &apic->pending_events)) {
4133  if (block_nested_events)
4134  return -EBUSY;
4135 
4136  clear_bit(KVM_APIC_SIPI, &apic->pending_events);
4137  if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
4138  nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
4139  apic->sipi_vector & 0xFFUL);
4140  return 0;
4141  }
4142  /* Fallthrough, the SIPI is completely ignored. */
4143  }
4144 
4145  /*
4146  * Process exceptions that are higher priority than Monitor Trap Flag:
4147  * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but
4148  * could theoretically come in from userspace), and ICEBP (INT1).
4149  *
4150  * TODO: SMIs have higher priority than MTF and trap-like #DBs (except
4151  * for TSS T flag #DBs). KVM also doesn't save/restore pending MTF
4152  * across SMI/RSM as it should; that needs to be addressed in order to
4153  * prioritize SMI over MTF and trap-like #DBs.
4154  */
4155  if (vcpu->arch.exception_vmexit.pending &&
4156  !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) {
4157  if (block_nested_exceptions)
4158  return -EBUSY;
4159 
4161  return 0;
4162  }
4163 
4164  if (vcpu->arch.exception.pending &&
4165  !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) {
4166  if (block_nested_exceptions)
4167  return -EBUSY;
4168  goto no_vmexit;
4169  }
4170 
4171  if (vmx->nested.mtf_pending) {
4172  if (block_nested_events)
4173  return -EBUSY;
4175  nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
4176  return 0;
4177  }
4178 
4179  if (vcpu->arch.exception_vmexit.pending) {
4180  if (block_nested_exceptions)
4181  return -EBUSY;
4182 
4184  return 0;
4185  }
4186 
4187  if (vcpu->arch.exception.pending) {
4188  if (block_nested_exceptions)
4189  return -EBUSY;
4190  goto no_vmexit;
4191  }
4192 
4194  if (block_nested_events)
4195  return -EBUSY;
4196  nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
4197  return 0;
4198  }
4199 
4200  if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
4201  if (block_nested_events)
4202  return -EBUSY;
4203  goto no_vmexit;
4204  }
4205 
4206  if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
4207  if (block_nested_events)
4208  return -EBUSY;
4209  if (!nested_exit_on_nmi(vcpu))
4210  goto no_vmexit;
4211 
4212  nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
4213  NMI_VECTOR | INTR_TYPE_NMI_INTR |
4214  INTR_INFO_VALID_MASK, 0);
4215  /*
4216  * The NMI-triggered VM exit counts as injection:
4217  * clear this one and block further NMIs.
4218  */
4219  vcpu->arch.nmi_pending = 0;
4220  vmx_set_nmi_mask(vcpu, true);
4221  return 0;
4222  }
4223 
4225  if (block_nested_events)
4226  return -EBUSY;
4227  if (!nested_exit_on_intr(vcpu))
4228  goto no_vmexit;
4229  nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
4230  return 0;
4231  }
4232 
4233 no_vmexit:
4235 }
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
Definition: irq.c:98
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
#define KVM_APIC_INIT
Definition: lapic.h:12
#define KVM_APIC_SIPI
Definition: lapic.h:13
static bool is_smm(struct kvm_vcpu *vcpu)
Definition: smm.h:160
struct kvm_vcpu * vcpu
Definition: lapic.h:64
unsigned int sipi_vector
Definition: lapic.h:82
unsigned long pending_events
Definition: lapic.h:81
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
Definition: svm.h:586
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
Definition: nested.c:3859
static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex)
Definition: nested.c:3982
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
Definition: nested.c:3901
static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
Definition: nested.c:4003
static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
Definition: nested.c:3994
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
Definition: vmx.c:5050
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
Definition: vmx.c:5005
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
Definition: vmx.c:5025
static bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
Definition: x86.h:127
Here is the call graph for this function:

◆ vmx_complete_nested_posted_interrupt()

static int vmx_complete_nested_posted_interrupt ( struct kvm_vcpu *  vcpu)
static

Definition at line 3859 of file nested.c.

3860 {
3861  struct vcpu_vmx *vmx = to_vmx(vcpu);
3862  int max_irr;
3863  void *vapic_page;
3864  u16 status;
3865 
3866  if (!vmx->nested.pi_pending)
3867  return 0;
3868 
3869  if (!vmx->nested.pi_desc)
3870  goto mmio_needed;
3871 
3872  vmx->nested.pi_pending = false;
3873 
3874  if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3875  return 0;
3876 
3877  max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3878  if (max_irr != 256) {
3879  vapic_page = vmx->nested.virtual_apic_map.hva;
3880  if (!vapic_page)
3881  goto mmio_needed;
3882 
3884  vapic_page, &max_irr);
3885  status = vmcs_read16(GUEST_INTR_STATUS);
3886  if ((u8)max_irr > ((u8)status & 0xff)) {
3887  status &= ~0xff;
3888  status |= (u8)max_irr;
3889  vmcs_write16(GUEST_INTR_STATUS, status);
3890  }
3891  }
3892 
3894  return 0;
3895 
3896 mmio_needed:
3898  return -ENXIO;
3899 }
#define X86EMUL_IO_NEEDED
Definition: kvm_emulate.h:88
bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
Definition: lapic.c:654
static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
Definition: posted_intr.h:39
u32 pir[8]
Definition: posted_intr.h:12
void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
Definition: nested.c:3838
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_control_msr()

static u64 vmx_control_msr ( u32  low,
u32  high 
)
inlinestatic

Definition at line 215 of file nested.c.

216 {
217  return low | ((u64)high << 32);
218 }
Here is the caller graph for this function:

◆ vmx_control_verify()

static bool vmx_control_verify ( u32  control,
u32  low,
u32  high 
)
inlinestatic

Definition at line 210 of file nested.c.

211 {
212  return fixed_bits_valid(control, low, high);
213 }
static bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
Definition: nested.h:252
u64 control
Definition: posted_intr.h:16
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_disable_shadow_vmcs()

static void vmx_disable_shadow_vmcs ( struct vcpu_vmx vmx)
static

Definition at line 220 of file nested.c.

221 {
222  secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
223  vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
224  vmx->nested.need_vmcs12_to_shadow_sync = false;
225 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_control_msr()

static void vmx_get_control_msr ( struct nested_vmx_msrs msrs,
u32  msr_index,
u32 **  low,
u32 **  high 
)
static

Definition at line 1259 of file nested.c.

1261 {
1262  switch (msr_index) {
1263  case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1264  *low = &msrs->pinbased_ctls_low;
1265  *high = &msrs->pinbased_ctls_high;
1266  break;
1267  case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1268  *low = &msrs->procbased_ctls_low;
1269  *high = &msrs->procbased_ctls_high;
1270  break;
1271  case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1272  *low = &msrs->exit_ctls_low;
1273  *high = &msrs->exit_ctls_high;
1274  break;
1275  case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1276  *low = &msrs->entry_ctls_low;
1277  *high = &msrs->entry_ctls_high;
1278  break;
1279  case MSR_IA32_VMX_PROCBASED_CTLS2:
1280  *low = &msrs->secondary_ctls_low;
1281  *high = &msrs->secondary_ctls_high;
1282  break;
1283  default:
1284  BUG();
1285  }
1286 }
Here is the caller graph for this function:

◆ vmx_get_fixed0_msr()

static u64* vmx_get_fixed0_msr ( struct nested_vmx_msrs msrs,
u32  msr_index 
)
static

Definition at line 1361 of file nested.c.

1362 {
1363  switch (msr_index) {
1364  case MSR_IA32_VMX_CR0_FIXED0:
1365  return &msrs->cr0_fixed0;
1366  case MSR_IA32_VMX_CR4_FIXED0:
1367  return &msrs->cr4_fixed0;
1368  default:
1369  BUG();
1370  }
1371 }
Here is the caller graph for this function:

◆ vmx_get_nested_state()

static int vmx_get_nested_state ( struct kvm_vcpu *  vcpu,
struct kvm_nested_state __user *  user_kvm_nested_state,
u32  user_data_size 
)
static

Definition at line 6445 of file nested.c.

6448 {
6449  struct vcpu_vmx *vmx;
6450  struct vmcs12 *vmcs12;
6451  struct kvm_nested_state kvm_state = {
6452  .flags = 0,
6453  .format = KVM_STATE_NESTED_FORMAT_VMX,
6454  .size = sizeof(kvm_state),
6455  .hdr.vmx.flags = 0,
6456  .hdr.vmx.vmxon_pa = INVALID_GPA,
6457  .hdr.vmx.vmcs12_pa = INVALID_GPA,
6458  .hdr.vmx.preemption_timer_deadline = 0,
6459  };
6460  struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6461  &user_kvm_nested_state->data.vmx[0];
6462 
6463  if (!vcpu)
6464  return kvm_state.size + sizeof(*user_vmx_nested_state);
6465 
6466  vmx = to_vmx(vcpu);
6467  vmcs12 = get_vmcs12(vcpu);
6468 
6469  if (guest_can_use(vcpu, X86_FEATURE_VMX) &&
6470  (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6471  kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6472  kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6473 
6474  if (vmx_has_valid_vmcs12(vcpu)) {
6475  kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6476 
6477  /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6478  if (nested_vmx_is_evmptr12_set(vmx))
6479  kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6480 
6481  if (is_guest_mode(vcpu) &&
6483  vmcs12->vmcs_link_pointer != INVALID_GPA)
6484  kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
6485  }
6486 
6487  if (vmx->nested.smm.vmxon)
6488  kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6489 
6490  if (vmx->nested.smm.guest_mode)
6491  kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6492 
6493  if (is_guest_mode(vcpu)) {
6494  kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6495 
6496  if (vmx->nested.nested_run_pending)
6497  kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
6498 
6499  if (vmx->nested.mtf_pending)
6500  kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
6501 
6504  kvm_state.hdr.vmx.flags |=
6505  KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6506  kvm_state.hdr.vmx.preemption_timer_deadline =
6508  }
6509  }
6510  }
6511 
6512  if (user_data_size < kvm_state.size)
6513  goto out;
6514 
6515  if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6516  return -EFAULT;
6517 
6518  if (!vmx_has_valid_vmcs12(vcpu))
6519  goto out;
6520 
6521  /*
6522  * When running L2, the authoritative vmcs12 state is in the
6523  * vmcs02. When running L1, the authoritative vmcs12 state is
6524  * in the shadow or enlightened vmcs linked to vmcs01, unless
6525  * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6526  * vmcs12 state is in the vmcs12 already.
6527  */
6528  if (is_guest_mode(vcpu)) {
6531  } else {
6533  if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6535  /*
6536  * L1 hypervisor is not obliged to keep eVMCS
6537  * clean fields data always up-to-date while
6538  * not in guest mode, 'hv_clean_fields' is only
6539  * supposed to be actual upon vmentry so we need
6540  * to ignore it here and do full copy.
6541  */
6543  else if (enable_shadow_vmcs)
6544  copy_shadow_to_vmcs12(vmx);
6545  }
6546  }
6547 
6548  BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6549  BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6550 
6551  /*
6552  * Copy over the full allocated size of vmcs12 rather than just the size
6553  * of the struct.
6554  */
6555  if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6556  return -EFAULT;
6557 
6559  vmcs12->vmcs_link_pointer != INVALID_GPA) {
6560  if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
6562  return -EFAULT;
6563  }
6564 out:
6565  return kvm_state.size;
6566 }
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:278
static int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
Definition: nested.h:55
bool guest_mode
Definition: vmx.h:241
Here is the call graph for this function:

◆ vmx_get_nested_state_pages()

static bool vmx_get_nested_state_pages ( struct kvm_vcpu *  vcpu)
static

Definition at line 3320 of file nested.c.

3321 {
3322 #ifdef CONFIG_KVM_HYPERV
3323  /*
3324  * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
3325  * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
3326  * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post
3327  * migration.
3328  */
3329  if (!nested_get_evmcs_page(vcpu)) {
3330  pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3331  __func__);
3332  vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3333  vcpu->run->internal.suberror =
3334  KVM_INTERNAL_ERROR_EMULATION;
3335  vcpu->run->internal.ndata = 0;
3336 
3337  return false;
3338  }
3339 #endif
3340 
3341  if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3342  return false;
3343 
3344  return true;
3345 }
Here is the call graph for this function:

◆ vmx_get_pending_dbg_trap()

static unsigned long vmx_get_pending_dbg_trap ( struct kvm_queued_exception *  ex)
static

Definition at line 3967 of file nested.c.

3968 {
3969  if (!ex->pending || ex->vector != DB_VECTOR)
3970  return 0;
3971 
3972  /* General Detect #DBs are always fault-like. */
3973  return ex->payload & ~DR6_BD;
3974 }
Here is the caller graph for this function:

◆ vmx_get_preemption_timer_value()

static u32 vmx_get_preemption_timer_value ( struct kvm_vcpu *  vcpu)
static

Definition at line 4237 of file nested.c.

4238 {
4239  ktime_t remaining =
4240  hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
4241  u64 value;
4242 
4243  if (ktime_to_ns(remaining) <= 0)
4244  return 0;
4245 
4246  value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
4247  do_div(value, 1000000);
4249 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_vmx_msr()

int vmx_get_vmx_msr ( struct nested_vmx_msrs msrs,
u32  msr_index,
u64 *  pdata 
)

Definition at line 1458 of file nested.c.

1459 {
1460  switch (msr_index) {
1461  case MSR_IA32_VMX_BASIC:
1462  *pdata = msrs->basic;
1463  break;
1464  case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1465  case MSR_IA32_VMX_PINBASED_CTLS:
1466  *pdata = vmx_control_msr(
1467  msrs->pinbased_ctls_low,
1468  msrs->pinbased_ctls_high);
1469  if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1470  *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1471  break;
1472  case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1473  case MSR_IA32_VMX_PROCBASED_CTLS:
1474  *pdata = vmx_control_msr(
1475  msrs->procbased_ctls_low,
1476  msrs->procbased_ctls_high);
1477  if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1478  *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1479  break;
1480  case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1481  case MSR_IA32_VMX_EXIT_CTLS:
1482  *pdata = vmx_control_msr(
1483  msrs->exit_ctls_low,
1484  msrs->exit_ctls_high);
1485  if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1486  *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1487  break;
1488  case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1489  case MSR_IA32_VMX_ENTRY_CTLS:
1490  *pdata = vmx_control_msr(
1491  msrs->entry_ctls_low,
1492  msrs->entry_ctls_high);
1493  if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1494  *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1495  break;
1496  case MSR_IA32_VMX_MISC:
1497  *pdata = vmx_control_msr(
1498  msrs->misc_low,
1499  msrs->misc_high);
1500  break;
1501  case MSR_IA32_VMX_CR0_FIXED0:
1502  *pdata = msrs->cr0_fixed0;
1503  break;
1504  case MSR_IA32_VMX_CR0_FIXED1:
1505  *pdata = msrs->cr0_fixed1;
1506  break;
1507  case MSR_IA32_VMX_CR4_FIXED0:
1508  *pdata = msrs->cr4_fixed0;
1509  break;
1510  case MSR_IA32_VMX_CR4_FIXED1:
1511  *pdata = msrs->cr4_fixed1;
1512  break;
1513  case MSR_IA32_VMX_VMCS_ENUM:
1514  *pdata = msrs->vmcs_enum;
1515  break;
1516  case MSR_IA32_VMX_PROCBASED_CTLS2:
1517  *pdata = vmx_control_msr(
1518  msrs->secondary_ctls_low,
1519  msrs->secondary_ctls_high);
1520  break;
1521  case MSR_IA32_VMX_EPT_VPID_CAP:
1522  *pdata = msrs->ept_caps |
1523  ((u64)msrs->vpid_caps << 32);
1524  break;
1525  case MSR_IA32_VMX_VMFUNC:
1526  *pdata = msrs->vmfunc_controls;
1527  break;
1528  default:
1529  return 1;
1530  }
1531 
1532  return 0;
1533 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_has_apicv_interrupt()

static u8 vmx_has_apicv_interrupt ( struct kvm_vcpu *  vcpu)
static

Definition at line 3406 of file nested.c.

3407 {
3408  u8 rvi = vmx_get_rvi();
3409  u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3410 
3411  return ((rvi & 0xf0) > (vppr & 0xf0));
3412 }
static u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
Definition: lapic.h:179
static u8 vmx_get_rvi(void)
Definition: vmx.h:466
Here is the call graph for this function:

◆ vmx_has_nested_events()

static bool vmx_has_nested_events ( struct kvm_vcpu *  vcpu)
static

Definition at line 4009 of file nested.c.

4010 {
4011  return nested_vmx_preemption_timer_pending(vcpu) ||
4012  to_vmx(vcpu)->nested.mtf_pending;
4013 }
Here is the call graph for this function:

◆ vmx_is_low_priority_db_trap()

static bool vmx_is_low_priority_db_trap ( struct kvm_queued_exception *  ex)
static

Definition at line 3982 of file nested.c.

3983 {
3984  return vmx_get_pending_dbg_trap(ex) & ~DR6_BT;
3985 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_leave_nested()

void vmx_leave_nested ( struct kvm_vcpu *  vcpu)

Definition at line 6568 of file nested.c.

6569 {
6570  if (is_guest_mode(vcpu)) {
6571  to_vmx(vcpu)->nested.nested_run_pending = 0;
6572  nested_vmx_vmexit(vcpu, -1, 0, 0);
6573  }
6574  free_nested(vcpu);
6575 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_preemption_timer_fn()

static enum hrtimer_restart vmx_preemption_timer_fn ( struct hrtimer *  timer)
static

Definition at line 2124 of file nested.c.

2137 {
2138  struct vcpu_vmx *vmx =
2139  container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2140 
2141  vmx->nested.preemption_timer_expired = true;
2142  kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2143  kvm_vcpu_kick(&vmx->vcpu);
2144 
2145  return HRTIMER_NORESTART;
2146 }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
Here is the caller graph for this function:

◆ vmx_restore_control_msr()

static int vmx_restore_control_msr ( struct vcpu_vmx vmx,
u32  msr_index,
u64  data 
)
static

Definition at line 1289 of file nested.c.

1290 {
1291  u32 *lowp, *highp;
1292  u64 supported;
1293 
1294  vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1295 
1296  supported = vmx_control_msr(*lowp, *highp);
1297 
1298  /* Check must-be-1 bits are still 1. */
1299  if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1300  return -EINVAL;
1301 
1302  /* Check must-be-0 bits are still 0. */
1303  if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1304  return -EINVAL;
1305 
1306  vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1307  *lowp = data;
1308  *highp = data >> 32;
1309  return 0;
1310 }
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
Definition: nested.c:1221
static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u32 **low, u32 **high)
Definition: nested.c:1259
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_restore_fixed0_msr()

static int vmx_restore_fixed0_msr ( struct vcpu_vmx vmx,
u32  msr_index,
u64  data 
)
static

Definition at line 1373 of file nested.c.

1374 {
1375  const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1376 
1377  /*
1378  * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1379  * must be 1 in the restored value.
1380  */
1381  if (!is_bitwise_subset(data, *msr, -1ULL))
1382  return -EINVAL;
1383 
1384  *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1385  return 0;
1386 }
static u64 * vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
Definition: nested.c:1361
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_restore_vmx_basic()

static int vmx_restore_vmx_basic ( struct vcpu_vmx vmx,
u64  data 
)
static

Definition at line 1229 of file nested.c.

1230 {
1231  const u64 feature_and_reserved =
1232  /* feature (except bit 48; see below) */
1233  BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1234  /* reserved */
1235  BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1236  u64 vmx_basic = vmcs_config.nested.basic;
1237 
1238  if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1239  return -EINVAL;
1240 
1241  /*
1242  * KVM does not emulate a version of VMX that constrains physical
1243  * addresses of VMX structures (e.g. VMCS) to 32-bits.
1244  */
1245  if (data & BIT_ULL(48))
1246  return -EINVAL;
1247 
1248  if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1249  vmx_basic_vmcs_revision_id(data))
1250  return -EINVAL;
1251 
1252  if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1253  return -EINVAL;
1254 
1255  vmx->nested.msrs.basic = data;
1256  return 0;
1257 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_restore_vmx_ept_vpid_cap()

static int vmx_restore_vmx_ept_vpid_cap ( struct vcpu_vmx vmx,
u64  data 
)
static

Definition at line 1347 of file nested.c.

1348 {
1349  u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1351 
1352  /* Every bit is either reserved or a feature bit. */
1353  if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1354  return -EINVAL;
1355 
1356  vmx->nested.msrs.ept_caps = data;
1357  vmx->nested.msrs.vpid_caps = data >> 32;
1358  return 0;
1359 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_restore_vmx_misc()

static int vmx_restore_vmx_misc ( struct vcpu_vmx vmx,
u64  data 
)
static

Definition at line 1312 of file nested.c.

1313 {
1314  const u64 feature_and_reserved_bits =
1315  /* feature */
1316  BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1317  BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1318  /* reserved */
1319  GENMASK_ULL(13, 9) | BIT_ULL(31);
1320  u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1322 
1323  if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1324  return -EINVAL;
1325 
1326  if ((vmx->nested.msrs.pinbased_ctls_high &
1327  PIN_BASED_VMX_PREEMPTION_TIMER) &&
1328  vmx_misc_preemption_timer_rate(data) !=
1329  vmx_misc_preemption_timer_rate(vmx_misc))
1330  return -EINVAL;
1331 
1332  if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1333  return -EINVAL;
1334 
1335  if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1336  return -EINVAL;
1337 
1338  if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1339  return -EINVAL;
1340 
1341  vmx->nested.msrs.misc_low = data;
1342  vmx->nested.msrs.misc_high = data >> 32;
1343 
1344  return 0;
1345 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_nested_state()

static int vmx_set_nested_state ( struct kvm_vcpu *  vcpu,
struct kvm_nested_state __user *  user_kvm_nested_state,
struct kvm_nested_state *  kvm_state 
)
static

Definition at line 6577 of file nested.c.

6580 {
6581  struct vcpu_vmx *vmx = to_vmx(vcpu);
6582  struct vmcs12 *vmcs12;
6583  enum vm_entry_failure_code ignored;
6584  struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6585  &user_kvm_nested_state->data.vmx[0];
6586  int ret;
6587 
6588  if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
6589  return -EINVAL;
6590 
6591  if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
6592  if (kvm_state->hdr.vmx.smm.flags)
6593  return -EINVAL;
6594 
6595  if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
6596  return -EINVAL;
6597 
6598  /*
6599  * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6600  * enable eVMCS capability on vCPU. However, since then
6601  * code was changed such that flag signals vmcs12 should
6602  * be copied into eVMCS in guest memory.
6603  *
6604  * To preserve backwards compatibility, allow user
6605  * to set this flag even when there is no VMXON region.
6606  */
6607  if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6608  return -EINVAL;
6609  } else {
6610  if (!guest_can_use(vcpu, X86_FEATURE_VMX))
6611  return -EINVAL;
6612 
6613  if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6614  return -EINVAL;
6615  }
6616 
6617  if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6618  (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6619  return -EINVAL;
6620 
6621  if (kvm_state->hdr.vmx.smm.flags &
6622  ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6623  return -EINVAL;
6624 
6625  if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6626  return -EINVAL;
6627 
6628  /*
6629  * SMM temporarily disables VMX, so we cannot be in guest mode,
6630  * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6631  * must be zero.
6632  */
6633  if (is_smm(vcpu) ?
6634  (kvm_state->flags &
6635  (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6636  : kvm_state->hdr.vmx.smm.flags)
6637  return -EINVAL;
6638 
6639  if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6640  !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6641  return -EINVAL;
6642 
6643  if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6644  (!guest_can_use(vcpu, X86_FEATURE_VMX) ||
6646  return -EINVAL;
6647 
6648  vmx_leave_nested(vcpu);
6649 
6650  if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
6651  return 0;
6652 
6653  vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6654  ret = enter_vmx_operation(vcpu);
6655  if (ret)
6656  return ret;
6657 
6658  /* Empty 'VMXON' state is permitted if no VMCS loaded */
6659  if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6660  /* See vmx_has_valid_vmcs12. */
6661  if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6662  (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
6663  (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
6664  return -EINVAL;
6665  else
6666  return 0;
6667  }
6668 
6669  if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
6670  if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6671  !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
6672  return -EINVAL;
6673 
6674  set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
6675 #ifdef CONFIG_KVM_HYPERV
6676  } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6677  /*
6678  * nested_vmx_handle_enlightened_vmptrld() cannot be called
6679  * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6680  * restored yet. EVMCS will be mapped from
6681  * nested_get_vmcs12_pages().
6682  */
6683  vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6684  kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
6685 #endif
6686  } else {
6687  return -EINVAL;
6688  }
6689 
6690  if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6691  vmx->nested.smm.vmxon = true;
6692  vmx->nested.vmxon = false;
6693 
6694  if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6695  vmx->nested.smm.guest_mode = true;
6696  }
6697 
6698  vmcs12 = get_vmcs12(vcpu);
6699  if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6700  return -EFAULT;
6701 
6703  return -EINVAL;
6704 
6705  if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6706  return 0;
6707 
6708  vmx->nested.nested_run_pending =
6709  !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6710 
6711  vmx->nested.mtf_pending =
6712  !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6713 
6714  ret = -EINVAL;
6716  vmcs12->vmcs_link_pointer != INVALID_GPA) {
6717  struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6718 
6719  if (kvm_state->size <
6720  sizeof(*kvm_state) +
6721  sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6722  goto error_guest_mode;
6723 
6724  if (copy_from_user(shadow_vmcs12,
6725  user_vmx_nested_state->shadow_vmcs12,
6726  sizeof(*shadow_vmcs12))) {
6727  ret = -EFAULT;
6728  goto error_guest_mode;
6729  }
6730 
6731  if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6732  !shadow_vmcs12->hdr.shadow_vmcs)
6733  goto error_guest_mode;
6734  }
6735 
6737  if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6740  kvm_state->hdr.vmx.preemption_timer_deadline;
6741  }
6742 
6743  if (nested_vmx_check_controls(vcpu, vmcs12) ||
6745  nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6746  goto error_guest_mode;
6747 
6748  vmx->nested.dirty_vmcs12 = true;
6749  vmx->nested.force_msr_bitmap_recalc = true;
6750  ret = nested_vmx_enter_non_root_mode(vcpu, false);
6751  if (ret)
6752  goto error_guest_mode;
6753 
6754  if (vmx->nested.mtf_pending)
6755  kvm_make_request(KVM_REQ_EVENT, vcpu);
6756 
6757  return 0;
6758 
6759 error_guest_mode:
6760  vmx->nested.nested_run_pending = 0;
6761  return ret;
6762 }
bool enlightened_vmcs_enabled
Definition: vmx.h:186
#define EVMPTR_MAP_PENDING
Definition: hyperv.h:10
Here is the call graph for this function:

◆ vmx_set_vmx_msr()

int vmx_set_vmx_msr ( struct kvm_vcpu *  vcpu,
u32  msr_index,
u64  data 
)

Definition at line 1393 of file nested.c.

1394 {
1395  struct vcpu_vmx *vmx = to_vmx(vcpu);
1396 
1397  /*
1398  * Don't allow changes to the VMX capability MSRs while the vCPU
1399  * is in VMX operation.
1400  */
1401  if (vmx->nested.vmxon)
1402  return -EBUSY;
1403 
1404  switch (msr_index) {
1405  case MSR_IA32_VMX_BASIC:
1406  return vmx_restore_vmx_basic(vmx, data);
1407  case MSR_IA32_VMX_PINBASED_CTLS:
1408  case MSR_IA32_VMX_PROCBASED_CTLS:
1409  case MSR_IA32_VMX_EXIT_CTLS:
1410  case MSR_IA32_VMX_ENTRY_CTLS:
1411  /*
1412  * The "non-true" VMX capability MSRs are generated from the
1413  * "true" MSRs, so we do not support restoring them directly.
1414  *
1415  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1416  * should restore the "true" MSRs with the must-be-1 bits
1417  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1418  * DEFAULT SETTINGS".
1419  */
1420  return -EINVAL;
1421  case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1422  case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1423  case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1424  case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1425  case MSR_IA32_VMX_PROCBASED_CTLS2:
1426  return vmx_restore_control_msr(vmx, msr_index, data);
1427  case MSR_IA32_VMX_MISC:
1428  return vmx_restore_vmx_misc(vmx, data);
1429  case MSR_IA32_VMX_CR0_FIXED0:
1430  case MSR_IA32_VMX_CR4_FIXED0:
1431  return vmx_restore_fixed0_msr(vmx, msr_index, data);
1432  case MSR_IA32_VMX_CR0_FIXED1:
1433  case MSR_IA32_VMX_CR4_FIXED1:
1434  /*
1435  * These MSRs are generated based on the vCPU's CPUID, so we
1436  * do not support restoring them directly.
1437  */
1438  return -EINVAL;
1439  case MSR_IA32_VMX_EPT_VPID_CAP:
1440  return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1441  case MSR_IA32_VMX_VMCS_ENUM:
1442  vmx->nested.msrs.vmcs_enum = data;
1443  return 0;
1444  case MSR_IA32_VMX_VMFUNC:
1445  if (data & ~vmcs_config.nested.vmfunc_controls)
1446  return -EINVAL;
1447  vmx->nested.msrs.vmfunc_controls = data;
1448  return 0;
1449  default:
1450  /*
1451  * The rest of the VMX capability MSRs do not support restore.
1452  */
1453  return -EINVAL;
1454  }
1455 }
static int vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
Definition: nested.c:1289
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
Definition: nested.c:1347
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
Definition: nested.c:1229
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
Definition: nested.c:1373
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
Definition: nested.c:1312
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_start_preemption_timer()

static void vmx_start_preemption_timer ( struct kvm_vcpu *  vcpu,
u64  preemption_timeout 
)
static

Definition at line 2164 of file nested.c.

2166 {
2167  struct vcpu_vmx *vmx = to_vmx(vcpu);
2168 
2169  /*
2170  * A timer value of zero is architecturally guaranteed to cause
2171  * a VMExit prior to executing any instructions in the guest.
2172  */
2173  if (preemption_timeout == 0) {
2175  return;
2176  }
2177 
2178  if (vcpu->arch.virtual_tsc_khz == 0)
2179  return;
2180 
2181  preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2182  preemption_timeout *= 1000000;
2183  do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2184  hrtimer_start(&vmx->nested.preemption_timer,
2185  ktime_add_ns(ktime_get(), preemption_timeout),
2186  HRTIMER_MODE_ABS_PINNED);
2187 }
Here is the call graph for this function:

◆ vmx_switch_vmcs()

static void vmx_switch_vmcs ( struct kvm_vcpu *  vcpu,
struct loaded_vmcs vmcs 
)
static

Definition at line 294 of file nested.c.

295 {
296  struct vcpu_vmx *vmx = to_vmx(vcpu);
297  struct loaded_vmcs *prev;
298  int cpu;
299 
300  if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
301  return;
302 
303  cpu = get_cpu();
304  prev = vmx->loaded_vmcs;
305  vmx->loaded_vmcs = vmcs;
306  vmx_vcpu_load_vmcs(vcpu, cpu, prev);
307  vmx_sync_vmcs_host_state(vmx, prev);
308  put_cpu();
309 
310  vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
311 
312  /*
313  * All lazily updated registers will be reloaded from VMCS12 on both
314  * vmentry and vmexit.
315  */
316  vcpu->arch.regs_dirty = 0;
317 }
int cpu
Definition: vmcs.h:64
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, struct loaded_vmcs *prev)
Definition: nested.c:275
#define VMX_REGS_LAZY_LOAD_SET
Definition: vmx.h:623
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_sync_vmcs_host_state()

static void vmx_sync_vmcs_host_state ( struct vcpu_vmx vmx,
struct loaded_vmcs prev 
)
static

Definition at line 275 of file nested.c.

277 {
278  struct vmcs_host_state *dest, *src;
279 
280  if (unlikely(!vmx->guest_state_loaded))
281  return;
282 
283  src = &prev->host_state;
284  dest = &vmx->loaded_vmcs->host_state;
285 
286  vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
287  dest->ldt_sel = src->ldt_sel;
288 #ifdef CONFIG_X86_64
289  dest->ds_sel = src->ds_sel;
290  dest->es_sel = src->es_sel;
291 #endif
292 }
bool guest_state_loaded
Definition: vmx.h:263
u16 fs_sel
Definition: vmcs.h:41
unsigned long fs_base
Definition: vmcs.h:38
u16 ldt_sel
Definition: vmcs.h:41
unsigned long gs_base
Definition: vmcs.h:37
u16 gs_sel
Definition: vmcs.h:41
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
Definition: vmx.c:1255
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ enable_shadow_vmcs

bool __read_mostly enable_shadow_vmcs = 1
static

Definition at line 21 of file nested.c.

◆ max_shadow_read_only_fields

int max_shadow_read_only_fields
static
Initial value:

Definition at line 59 of file nested.c.

◆ max_shadow_read_write_fields

int max_shadow_read_write_fields
static
Initial value:

Definition at line 66 of file nested.c.

◆ nested_early_check

bool __read_mostly nested_early_check = 0
static

Definition at line 24 of file nested.c.

◆ shadow_read_only_fields

struct shadow_vmcs_field shadow_read_only_fields[]
static
Initial value:
= {
#define SHADOW_FIELD_RO(x, y)
}

Definition at line 46 of file nested.c.

◆ shadow_read_write_fields

struct shadow_vmcs_field shadow_read_write_fields[]
static
Initial value:
= {
#define SHADOW_FIELD_RW(x, y)
}

Definition at line 59 of file nested.c.

◆ vmx_bitmap

unsigned long* vmx_bitmap[VMX_BITMAP_NR]
static

Definition at line 46 of file nested.c.

◆ vmx_nested_ops

struct kvm_x86_nested_ops vmx_nested_ops
Initial value:
= {
.leave_nested = vmx_leave_nested,
.is_exception_vmexit = nested_vmx_is_exception_vmexit,
.check_events = vmx_check_nested_events,
.has_events = vmx_has_nested_events,
.triple_fault = nested_vmx_triple_fault,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
.get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
}
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
Definition: nested.c:3347
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
Definition: nested.c:3320
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
Definition: nested.c:4009
static int vmx_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size)
Definition: nested.c:6445
static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, u32 error_code)
Definition: nested.c:482
static int vmx_set_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state *kvm_state)
Definition: nested.c:6577
static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
Definition: nested.c:4098
static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
Definition: nested.c:4951

Definition at line 7092 of file nested.c.