KVM
Macros | Enumerations | Functions | Variables
nested.h File Reference
#include "kvm_cache_regs.h"
#include "hyperv.h"
#include "vmcs12.h"
#include "vmx.h"
Include dependency graph for nested.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define nested_guest_cr4_valid   nested_cr4_valid
 
#define nested_host_cr4_valid   nested_cr4_valid
 

Enumerations

enum  nvmx_vmentry_status { NVMX_VMENTRY_SUCCESS , NVMX_VMENTRY_VMFAIL , NVMX_VMENTRY_VMEXIT , NVMX_VMENTRY_KVM_INTERNAL_ERROR }
 

Functions

void vmx_leave_nested (struct kvm_vcpu *vcpu)
 
void nested_vmx_setup_ctls_msrs (struct vmcs_config *vmcs_conf, u32 ept_caps)
 
void nested_vmx_hardware_unsetup (void)
 
__init int nested_vmx_hardware_setup (int(*exit_handlers[])(struct kvm_vcpu *))
 
void nested_vmx_set_vmcs_shadowing_bitmap (void)
 
void nested_vmx_free_vcpu (struct kvm_vcpu *vcpu)
 
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode (struct kvm_vcpu *vcpu, bool from_vmentry)
 
bool nested_vmx_reflect_vmexit (struct kvm_vcpu *vcpu)
 
void nested_vmx_vmexit (struct kvm_vcpu *vcpu, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
 
void nested_sync_vmcs12_to_shadow (struct kvm_vcpu *vcpu)
 
int vmx_set_vmx_msr (struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 
int vmx_get_vmx_msr (struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
 
int get_vmx_mem_address (struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
 
void nested_mark_vmcs12_pages_dirty (struct kvm_vcpu *vcpu)
 
bool nested_vmx_check_io_bitmaps (struct kvm_vcpu *vcpu, unsigned int port, int size)
 
static struct vmcs12get_vmcs12 (struct kvm_vcpu *vcpu)
 
static struct vmcs12get_shadow_vmcs12 (struct kvm_vcpu *vcpu)
 
static int vmx_has_valid_vmcs12 (struct kvm_vcpu *vcpu)
 
static u16 nested_get_vpid02 (struct kvm_vcpu *vcpu)
 
static unsigned long nested_ept_get_eptp (struct kvm_vcpu *vcpu)
 
static bool nested_ept_ad_enabled (struct kvm_vcpu *vcpu)
 
static unsigned long nested_read_cr0 (struct vmcs12 *fields)
 
static unsigned long nested_read_cr4 (struct vmcs12 *fields)
 
static unsigned nested_cpu_vmx_misc_cr3_count (struct kvm_vcpu *vcpu)
 
static bool nested_cpu_has_vmwrite_any_field (struct kvm_vcpu *vcpu)
 
static bool nested_cpu_has_zero_length_injection (struct kvm_vcpu *vcpu)
 
static bool nested_cpu_supports_monitor_trap_flag (struct kvm_vcpu *vcpu)
 
static bool nested_cpu_has_vmx_shadow_vmcs (struct kvm_vcpu *vcpu)
 
static bool nested_cpu_has (struct vmcs12 *vmcs12, u32 bit)
 
static bool nested_cpu_has2 (struct vmcs12 *vmcs12, u32 bit)
 
static bool nested_cpu_has_preemption_timer (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_nmi_exiting (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_virtual_nmis (struct vmcs12 *vmcs12)
 
static int nested_cpu_has_mtf (struct vmcs12 *vmcs12)
 
static int nested_cpu_has_ept (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_xsaves (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_pml (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_virt_x2apic_mode (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_vpid (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_apic_reg_virt (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_vid (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_posted_intr (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_vmfunc (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_eptp_switching (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_shadow_vmcs (struct vmcs12 *vmcs12)
 
static bool nested_cpu_has_save_preemption_timer (struct vmcs12 *vmcs12)
 
static bool nested_exit_on_nmi (struct kvm_vcpu *vcpu)
 
static bool nested_exit_on_intr (struct kvm_vcpu *vcpu)
 
static bool nested_cpu_has_encls_exit (struct vmcs12 *vmcs12)
 
static bool fixed_bits_valid (u64 val, u64 fixed0, u64 fixed1)
 
static bool nested_guest_cr0_valid (struct kvm_vcpu *vcpu, unsigned long val)
 
static bool nested_host_cr0_valid (struct kvm_vcpu *vcpu, unsigned long val)
 
static bool nested_cr4_valid (struct kvm_vcpu *vcpu, unsigned long val)
 

Variables

struct kvm_x86_nested_ops vmx_nested_ops
 

Macro Definition Documentation

◆ nested_guest_cr4_valid

#define nested_guest_cr4_valid   nested_cr4_valid

Definition at line 289 of file nested.h.

◆ nested_host_cr4_valid

#define nested_host_cr4_valid   nested_cr4_valid

Definition at line 290 of file nested.h.

Enumeration Type Documentation

◆ nvmx_vmentry_status

Enumerator
NVMX_VMENTRY_SUCCESS 
NVMX_VMENTRY_VMFAIL 
NVMX_VMENTRY_VMEXIT 
NVMX_VMENTRY_KVM_INTERNAL_ERROR 

Definition at line 13 of file nested.h.

13  {
14  NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */
15  NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */
16  NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */
17  NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
18 };
@ NVMX_VMENTRY_VMFAIL
Definition: nested.h:15
@ NVMX_VMENTRY_KVM_INTERNAL_ERROR
Definition: nested.h:17
@ NVMX_VMENTRY_VMEXIT
Definition: nested.h:16
@ NVMX_VMENTRY_SUCCESS
Definition: nested.h:14

Function Documentation

◆ fixed_bits_valid()

static bool fixed_bits_valid ( u64  val,
u64  fixed0,
u64  fixed1 
)
inlinestatic

Definition at line 252 of file nested.h.

253 {
254  return ((val & fixed1) | fixed0) == val;
255 }
Here is the caller graph for this function:

◆ get_shadow_vmcs12()

static struct vmcs12* get_shadow_vmcs12 ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 45 of file nested.h.

46 {
47  return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
48 }
struct vmcs12 * cached_shadow_vmcs12
Definition: vmx.h:135
struct nested_vmx nested
Definition: vmx.h:329
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
Definition: vmx.h:657
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_vmcs12()

static struct vmcs12* get_vmcs12 ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 40 of file nested.h.

41 {
42  return to_vmx(vcpu)->nested.cached_vmcs12;
43 }
struct vmcs12 * cached_vmcs12
Definition: vmx.h:129
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_vmx_mem_address()

int get_vmx_mem_address ( struct kvm_vcpu *  vcpu,
unsigned long  exit_qualification,
u32  vmx_instruction_info,
bool  wr,
int  len,
gva_t *  ret 
)

Definition at line 4963 of file nested.c.

4965 {
4966  gva_t off;
4967  bool exn;
4968  struct kvm_segment s;
4969 
4970  /*
4971  * According to Vol. 3B, "Information for VM Exits Due to Instruction
4972  * Execution", on an exit, vmx_instruction_info holds most of the
4973  * addressing components of the operand. Only the displacement part
4974  * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4975  * For how an actual address is calculated from all these components,
4976  * refer to Vol. 1, "Operand Addressing".
4977  */
4978  int scaling = vmx_instruction_info & 3;
4979  int addr_size = (vmx_instruction_info >> 7) & 7;
4980  bool is_reg = vmx_instruction_info & (1u << 10);
4981  int seg_reg = (vmx_instruction_info >> 15) & 7;
4982  int index_reg = (vmx_instruction_info >> 18) & 0xf;
4983  bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4984  int base_reg = (vmx_instruction_info >> 23) & 0xf;
4985  bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4986 
4987  if (is_reg) {
4988  kvm_queue_exception(vcpu, UD_VECTOR);
4989  return 1;
4990  }
4991 
4992  /* Addr = segment_base + offset */
4993  /* offset = base + [index * scale] + displacement */
4994  off = exit_qualification; /* holds the displacement */
4995  if (addr_size == 1)
4996  off = (gva_t)sign_extend64(off, 31);
4997  else if (addr_size == 0)
4998  off = (gva_t)sign_extend64(off, 15);
4999  if (base_is_valid)
5000  off += kvm_register_read(vcpu, base_reg);
5001  if (index_is_valid)
5002  off += kvm_register_read(vcpu, index_reg) << scaling;
5003  vmx_get_segment(vcpu, &s, seg_reg);
5004 
5005  /*
5006  * The effective address, i.e. @off, of a memory operand is truncated
5007  * based on the address size of the instruction. Note that this is
5008  * the *effective address*, i.e. the address prior to accounting for
5009  * the segment's base.
5010  */
5011  if (addr_size == 1) /* 32 bit */
5012  off &= 0xffffffff;
5013  else if (addr_size == 0) /* 16 bit */
5014  off &= 0xffff;
5015 
5016  /* Checks for #GP/#SS exceptions. */
5017  exn = false;
5018  if (is_long_mode(vcpu)) {
5019  /*
5020  * The virtual/linear address is never truncated in 64-bit
5021  * mode, e.g. a 32-bit address size can yield a 64-bit virtual
5022  * address when using FS/GS with a non-zero base.
5023  */
5024  if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
5025  *ret = s.base + off;
5026  else
5027  *ret = off;
5028 
5029  *ret = vmx_get_untagged_addr(vcpu, *ret, 0);
5030  /* Long mode: #GP(0)/#SS(0) if the memory address is in a
5031  * non-canonical form. This is the only check on the memory
5032  * destination for long mode!
5033  */
5034  exn = is_noncanonical_address(*ret, vcpu);
5035  } else {
5036  /*
5037  * When not in long mode, the virtual/linear address is
5038  * unconditionally truncated to 32 bits regardless of the
5039  * address size.
5040  */
5041  *ret = (s.base + off) & 0xffffffff;
5042 
5043  /* Protected mode: apply checks for segment validity in the
5044  * following order:
5045  * - segment type check (#GP(0) may be thrown)
5046  * - usability check (#GP(0)/#SS(0))
5047  * - limit check (#GP(0)/#SS(0))
5048  */
5049  if (wr)
5050  /* #GP(0) if the destination operand is located in a
5051  * read-only data segment or any code segment.
5052  */
5053  exn = ((s.type & 0xa) == 0 || (s.type & 8));
5054  else
5055  /* #GP(0) if the source operand is located in an
5056  * execute-only code segment
5057  */
5058  exn = ((s.type & 0xa) == 8);
5059  if (exn) {
5060  kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
5061  return 1;
5062  }
5063  /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
5064  */
5065  exn = (s.unusable != 0);
5066 
5067  /*
5068  * Protected mode: #GP(0)/#SS(0) if the memory operand is
5069  * outside the segment limit. All CPUs that support VMX ignore
5070  * limit checks for flat segments, i.e. segments with base==0,
5071  * limit==0xffffffff and of type expand-up data or code.
5072  */
5073  if (!(s.base == 0 && s.limit == 0xffffffff &&
5074  ((s.type & 8) || !(s.type & 4))))
5075  exn = exn || ((u64)off + len - 1 > s.limit);
5076  }
5077  if (exn) {
5078  kvm_queue_exception_e(vcpu,
5079  seg_reg == VCPU_SREG_SS ?
5080  SS_VECTOR : GP_VECTOR,
5081  0);
5082  return 1;
5083  }
5084 
5085  return 0;
5086 }
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
Definition: vmx.c:8250
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: vmx.c:3496
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
Definition: x86.c:824
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
Definition: x86.c:731
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
Definition: x86.h:273
static bool is_long_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:143
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
Definition: x86.h:213
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has()

static bool nested_cpu_has ( struct vmcs12 vmcs12,
u32  bit 
)
inlinestatic

Definition at line 132 of file nested.h.

133 {
134  return vmcs12->cpu_based_vm_exec_control & bit;
135 }
Definition: vmcs12.h:27
u32 cpu_based_vm_exec_control
Definition: vmcs12.h:122
Here is the caller graph for this function:

◆ nested_cpu_has2()

static bool nested_cpu_has2 ( struct vmcs12 vmcs12,
u32  bit 
)
inlinestatic

Definition at line 137 of file nested.h.

138 {
140  CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
142 }
u32 secondary_vm_exec_control
Definition: vmcs12.h:136
Here is the caller graph for this function:

◆ nested_cpu_has_apic_reg_virt()

static bool nested_cpu_has_apic_reg_virt ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 190 of file nested.h.

191 {
192  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
193 }
static bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
Definition: nested.h:137
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_encls_exit()

static bool nested_cpu_has_encls_exit ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 243 of file nested.h.

244 {
245  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
246 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_ept()

static int nested_cpu_has_ept ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 165 of file nested.h.

166 {
167  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
168 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_eptp_switching()

static bool nested_cpu_has_eptp_switching ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 210 of file nested.h.

211 {
212  return nested_cpu_has_vmfunc(vmcs12) &&
214  VMX_VMFUNC_EPTP_SWITCHING);
215 }
static bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
Definition: nested.h:205
u64 vm_function_control
Definition: vmcs12.h:69
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_mtf()

static int nested_cpu_has_mtf ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 160 of file nested.h.

161 {
162  return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
163 }
static bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
Definition: nested.h:132
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_nmi_exiting()

static bool nested_cpu_has_nmi_exiting ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 150 of file nested.h.

151 {
152  return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
153 }
u32 pin_based_vm_exec_control
Definition: vmcs12.h:121
Here is the caller graph for this function:

◆ nested_cpu_has_pml()

static bool nested_cpu_has_pml ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 175 of file nested.h.

176 {
177  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
178 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_posted_intr()

static bool nested_cpu_has_posted_intr ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 200 of file nested.h.

201 {
202  return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
203 }
Here is the caller graph for this function:

◆ nested_cpu_has_preemption_timer()

static bool nested_cpu_has_preemption_timer ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 144 of file nested.h.

145 {
147  PIN_BASED_VMX_PREEMPTION_TIMER;
148 }
Here is the caller graph for this function:

◆ nested_cpu_has_save_preemption_timer()

static bool nested_cpu_has_save_preemption_timer ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 222 of file nested.h.

223 {
224  return vmcs12->vm_exit_controls &
225  VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
226 }
u32 vm_exit_controls
Definition: vmcs12.h:127
Here is the caller graph for this function:

◆ nested_cpu_has_shadow_vmcs()

static bool nested_cpu_has_shadow_vmcs ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 217 of file nested.h.

218 {
219  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
220 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_vid()

static bool nested_cpu_has_vid ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 195 of file nested.h.

196 {
197  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
198 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_virt_x2apic_mode()

static bool nested_cpu_has_virt_x2apic_mode ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 180 of file nested.h.

181 {
182  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
183 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_virtual_nmis()

static bool nested_cpu_has_virtual_nmis ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 155 of file nested.h.

156 {
157  return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
158 }
Here is the caller graph for this function:

◆ nested_cpu_has_vmfunc()

static bool nested_cpu_has_vmfunc ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 205 of file nested.h.

206 {
207  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
208 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_vmwrite_any_field()

static bool nested_cpu_has_vmwrite_any_field ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 109 of file nested.h.

110 {
111  return to_vmx(vcpu)->nested.msrs.misc_low &
112  MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
113 }
struct nested_vmx_msrs msrs
Definition: vmx.h:234
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_vmx_shadow_vmcs()

static bool nested_cpu_has_vmx_shadow_vmcs ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 126 of file nested.h.

127 {
128  return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
129  SECONDARY_EXEC_SHADOW_VMCS;
130 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_vpid()

static bool nested_cpu_has_vpid ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 185 of file nested.h.

186 {
187  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
188 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_xsaves()

static bool nested_cpu_has_xsaves ( struct vmcs12 vmcs12)
inlinestatic

Definition at line 170 of file nested.h.

171 {
172  return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
173 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_has_zero_length_injection()

static bool nested_cpu_has_zero_length_injection ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 115 of file nested.h.

116 {
117  return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
118 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_supports_monitor_trap_flag()

static bool nested_cpu_supports_monitor_trap_flag ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 120 of file nested.h.

121 {
122  return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
123  CPU_BASED_MONITOR_TRAP_FLAG;
124 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cpu_vmx_misc_cr3_count()

static unsigned nested_cpu_vmx_misc_cr3_count ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 99 of file nested.h.

100 {
101  return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
102 }
static int nested
Definition: svm.c:202
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_cr4_valid()

static bool nested_cr4_valid ( struct kvm_vcpu *  vcpu,
unsigned long  val 
)
inlinestatic

Definition at line 279 of file nested.h.

280 {
281  u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
282  u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
283 
284  return fixed_bits_valid(val, fixed0, fixed1) &&
285  __kvm_is_valid_cr4(vcpu, val);
286 }
static bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
Definition: nested.h:252
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Definition: x86.c:1132
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_ad_enabled()

static bool nested_ept_ad_enabled ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 77 of file nested.h.

78 {
79  return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
80 }
static unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
Definition: nested.h:71
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_ept_get_eptp()

static unsigned long nested_ept_get_eptp ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 71 of file nested.h.

72 {
73  /* return the page table to be shadowed - in our case, EPT12 */
74  return get_vmcs12(vcpu)->ept_pointer;
75 }
static struct vmcs12 * get_vmcs12(struct kvm_vcpu *vcpu)
Definition: nested.h:40
u64 ept_pointer
Definition: vmcs12.h:47
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_exit_on_intr()

static bool nested_exit_on_intr ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 237 of file nested.h.

238 {
239  return get_vmcs12(vcpu)->pin_based_vm_exec_control &
240  PIN_BASED_EXT_INTR_MASK;
241 }
Here is the call graph for this function:

◆ nested_exit_on_nmi()

static bool nested_exit_on_nmi ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 228 of file nested.h.

229 {
231 }
static bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
Definition: nested.h:150
Here is the call graph for this function:

◆ nested_get_vpid02()

static u16 nested_get_vpid02 ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 64 of file nested.h.

65 {
66  struct vcpu_vmx *vmx = to_vmx(vcpu);
67 
68  return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
69 }
u16 vpid02
Definition: vmx.h:231
Definition: vmx.h:251
int vpid
Definition: vmx.h:317
struct kvm_vcpu vcpu
Definition: vmx.h:252
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_guest_cr0_valid()

static bool nested_guest_cr0_valid ( struct kvm_vcpu *  vcpu,
unsigned long  val 
)
inlinestatic

Definition at line 257 of file nested.h.

258 {
259  u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
260  u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
261  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
262 
263  if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
264  SECONDARY_EXEC_UNRESTRICTED_GUEST &&
265  nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
266  fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
267 
268  return fixed_bits_valid(val, fixed0, fixed1);
269 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_host_cr0_valid()

static bool nested_host_cr0_valid ( struct kvm_vcpu *  vcpu,
unsigned long  val 
)
inlinestatic

Definition at line 271 of file nested.h.

272 {
273  u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
274  u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
275 
276  return fixed_bits_valid(val, fixed0, fixed1);
277 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_mark_vmcs12_pages_dirty()

void nested_mark_vmcs12_pages_dirty ( struct kvm_vcpu *  vcpu)

Definition at line 3838 of file nested.c.

3839 {
3840  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3841  gfn_t gfn;
3842 
3843  /*
3844  * Don't need to mark the APIC access page dirty; it is never
3845  * written to by the CPU during APIC virtualization.
3846  */
3847 
3848  if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3849  gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3850  kvm_vcpu_mark_page_dirty(vcpu, gfn);
3851  }
3852 
3854  gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3855  kvm_vcpu_mark_page_dirty(vcpu, gfn);
3856  }
3857 }
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
Definition: kvm_main.c:3669
static bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
Definition: nested.h:200
u64 posted_intr_desc_addr
Definition: vmcs12.h:46
u64 virtual_apic_page_addr
Definition: vmcs12.h:44
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_read_cr0()

static unsigned long nested_read_cr0 ( struct vmcs12 fields)
inlinestatic

Definition at line 88 of file nested.h.

89 {
90  return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
91  (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
92 }
natural_width cr0_read_shadow
Definition: vmcs12.h:83
natural_width guest_cr0
Definition: vmcs12.h:88
natural_width cr0_guest_host_mask
Definition: vmcs12.h:81
Here is the caller graph for this function:

◆ nested_read_cr4()

static unsigned long nested_read_cr4 ( struct vmcs12 fields)
inlinestatic

Definition at line 93 of file nested.h.

94 {
95  return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
96  (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
97 }
natural_width guest_cr4
Definition: vmcs12.h:90
natural_width cr4_guest_host_mask
Definition: vmcs12.h:82
natural_width cr4_read_shadow
Definition: vmcs12.h:84
Here is the caller graph for this function:

◆ nested_sync_vmcs12_to_shadow()

void nested_sync_vmcs12_to_shadow ( struct kvm_vcpu *  vcpu)

Definition at line 2124 of file nested.c.

2125 {
2126  struct vcpu_vmx *vmx = to_vmx(vcpu);
2127 
2130  else
2131  copy_vmcs12_to_shadow(vmx);
2132 
2133  vmx->nested.need_vmcs12_to_shadow_sync = false;
2134 }
bool need_vmcs12_to_shadow_sync
Definition: vmx.h:151
static bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
Definition: hyperv.h:69
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
Definition: nested.c:1570
static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
Definition: nested.c:1852
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_check_io_bitmaps()

bool nested_vmx_check_io_bitmaps ( struct kvm_vcpu *  vcpu,
unsigned int  port,
int  size 
)

Definition at line 5963 of file nested.c.

5965 {
5966  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5967  gpa_t bitmap, last_bitmap;
5968  u8 b;
5969 
5970  last_bitmap = INVALID_GPA;
5971  b = -1;
5972 
5973  while (size > 0) {
5974  if (port < 0x8000)
5975  bitmap = vmcs12->io_bitmap_a;
5976  else if (port < 0x10000)
5977  bitmap = vmcs12->io_bitmap_b;
5978  else
5979  return true;
5980  bitmap += (port & 0x7fff) / 8;
5981 
5982  if (last_bitmap != bitmap)
5983  if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5984  return true;
5985  if (b & (1 << (port & 7)))
5986  return true;
5987 
5988  port++;
5989  size--;
5990  last_bitmap = bitmap;
5991  }
5992 
5993  return false;
5994 }
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
Definition: kvm_main.c:3366
u64 io_bitmap_a
Definition: vmcs12.h:37
u64 io_bitmap_b
Definition: vmcs12.h:38
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_enter_non_root_mode()

enum nvmx_vmentry_status nested_vmx_enter_non_root_mode ( struct kvm_vcpu *  vcpu,
bool  from_vmentry 
)

Definition at line 3414 of file nested.c.

3429 {
3430  struct vcpu_vmx *vmx = to_vmx(vcpu);
3431  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3432  enum vm_entry_failure_code entry_failure_code;
3433  bool evaluate_pending_interrupts;
3434  union vmx_exit_reason exit_reason = {
3435  .basic = EXIT_REASON_INVALID_STATE,
3436  .failed_vmentry = 1,
3437  };
3438  u32 failed_index;
3439 
3440  trace_kvm_nested_vmenter(kvm_rip_read(vcpu),
3441  vmx->nested.current_vmptr,
3442  vmcs12->guest_rip,
3445  vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT,
3447  vmcs12->guest_cr3,
3448  KVM_ISA_VMX);
3449 
3451 
3452  evaluate_pending_interrupts = exec_controls_get(vmx) &
3453  (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3454  if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3455  evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3456  if (!evaluate_pending_interrupts)
3457  evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu);
3458 
3459  if (!vmx->nested.nested_run_pending ||
3460  !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3461  vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3462  if (kvm_mpx_supported() &&
3463  (!vmx->nested.nested_run_pending ||
3464  !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3465  vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3466 
3467  /*
3468  * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3469  * nested early checks are disabled. In the event of a "late" VM-Fail,
3470  * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3471  * software model to the pre-VMEntry host state. When EPT is disabled,
3472  * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3473  * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3474  * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3475  * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3476  * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3477  * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3478  * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3479  * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3480  * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3481  * path would need to manually save/restore vmcs01.GUEST_CR3.
3482  */
3483  if (!enable_ept && !nested_early_check)
3484  vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3485 
3486  vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3487 
3488  prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3489 
3490  if (from_vmentry) {
3491  if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3492  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3494  }
3495 
3496  if (nested_vmx_check_vmentry_hw(vcpu)) {
3497  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3498  return NVMX_VMENTRY_VMFAIL;
3499  }
3500 
3502  &entry_failure_code)) {
3503  exit_reason.basic = EXIT_REASON_INVALID_STATE;
3504  vmcs12->exit_qualification = entry_failure_code;
3505  goto vmentry_fail_vmexit;
3506  }
3507  }
3508 
3509  enter_guest_mode(vcpu);
3510 
3511  if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
3512  exit_reason.basic = EXIT_REASON_INVALID_STATE;
3513  vmcs12->exit_qualification = entry_failure_code;
3514  goto vmentry_fail_vmexit_guest_mode;
3515  }
3516 
3517  if (from_vmentry) {
3518  failed_index = nested_vmx_load_msr(vcpu,
3521  if (failed_index) {
3522  exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
3523  vmcs12->exit_qualification = failed_index;
3524  goto vmentry_fail_vmexit_guest_mode;
3525  }
3526  } else {
3527  /*
3528  * The MMU is not initialized to point at the right entities yet and
3529  * "get pages" would need to read data from the guest (i.e. we will
3530  * need to perform gpa to hpa translation). Request a call
3531  * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3532  * have already been set at vmentry time and should not be reset.
3533  */
3534  kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3535  }
3536 
3537  /*
3538  * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI
3539  * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can
3540  * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit
3541  * unconditionally.
3542  */
3543  if (unlikely(evaluate_pending_interrupts))
3544  kvm_make_request(KVM_REQ_EVENT, vcpu);
3545 
3546  /*
3547  * Do not start the preemption timer hrtimer until after we know
3548  * we are successful, so that only nested_vmx_vmexit needs to cancel
3549  * the timer.
3550  */
3551  vmx->nested.preemption_timer_expired = false;
3553  u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3554  vmx_start_preemption_timer(vcpu, timer_value);
3555  }
3556 
3557  /*
3558  * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3559  * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3560  * returned as far as L1 is concerned. It will only return (and set
3561  * the success flag) when L2 exits (see nested_vmx_vmexit()).
3562  */
3563  return NVMX_VMENTRY_SUCCESS;
3564 
3565  /*
3566  * A failed consistency check that leads to a VMExit during L1's
3567  * VMEnter to L2 is a variation of a normal VMexit, as explained in
3568  * 26.7 "VM-entry failures during or after loading guest state".
3569  */
3570 vmentry_fail_vmexit_guest_mode:
3571  if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3572  vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3573  leave_guest_mode(vcpu);
3574 
3575 vmentry_fail_vmexit:
3576  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3577 
3578  if (!from_vmentry)
3579  return NVMX_VMENTRY_VMEXIT;
3580 
3582  vmcs12->vm_exit_reason = exit_reason.full;
3584  vmx->nested.need_vmcs12_to_shadow_sync = true;
3585  return NVMX_VMENTRY_VMEXIT;
3586 }
bool __read_mostly enable_ept
Definition: vmx.c:91
static void enter_guest_mode(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void leave_guest_mode(struct kvm_vcpu *vcpu)
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
Definition: lapic.h:231
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
Definition: lapic.h:226
static bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
Definition: nested.h:144
gpa_t current_vmptr
Definition: vmx.h:123
bool nested_run_pending
Definition: vmx.h:189
bool preemption_timer_expired
Definition: vmx.h:213
u64 pre_vmenter_debugctl
Definition: vmx.h:225
u64 pre_vmenter_bndcfgs
Definition: vmx.h:226
struct loaded_vmcs vmcs02
Definition: vmx.h:194
struct loaded_vmcs vmcs01
Definition: vmx.h:291
u32 vm_exit_reason
Definition: vmcs12.h:138
u32 vm_entry_msr_load_count
Definition: vmcs12.h:131
u32 vm_entry_controls
Definition: vmcs12.h:130
natural_width guest_rip
Definition: vmcs12.h:103
u32 vm_entry_intr_info_field
Definition: vmcs12.h:132
natural_width exit_qualification
Definition: vmcs12.h:86
natural_width guest_cr3
Definition: vmcs12.h:89
u64 vm_entry_msr_load_addr
Definition: vmcs12.h:42
u16 guest_intr_status
Definition: vmcs12.h:179
u64 tsc_offset
Definition: vmcs12.h:43
#define KVM_ISA_VMX
Definition: trace.h:283
u32 full
Definition: vmx.h:93
u32 basic
Definition: vmx.h:75
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:4509
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, enum vm_entry_failure_code *entry_failure_code)
Definition: nested.c:2570
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, struct vmcs12 *vmcs12)
Definition: nested.c:2276
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
Definition: nested.c:3232
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
Definition: nested.c:938
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
Definition: nested.c:294
static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enum vm_entry_failure_code *entry_failure_code)
Definition: nested.c:3058
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
Definition: nested.c:3124
static bool __read_mostly enable_shadow_vmcs
Definition: nested.c:21
static bool __read_mostly nested_early_check
Definition: nested.c:24
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
Definition: nested.c:3406
static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
Definition: nested.c:2148
static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, u64 preemption_timeout)
Definition: nested.c:2164
static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
Definition: vmx_ops.h:258
static __always_inline u64 vmcs_read64(unsigned long field)
Definition: vmx_ops.h:169
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
Definition: x86.c:3617
static bool kvm_mpx_supported(void)
Definition: x86.h:361
Here is the caller graph for this function:

◆ nested_vmx_free_vcpu()

void nested_vmx_free_vcpu ( struct kvm_vcpu *  vcpu)

Definition at line 372 of file nested.c.

373 {
374  vcpu_load(vcpu);
375  vmx_leave_nested(vcpu);
376  vcpu_put(vcpu);
377 }
void vcpu_put(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:219
void vcpu_load(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:208
void vmx_leave_nested(struct kvm_vcpu *vcpu)
Definition: nested.c:6568
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_hardware_setup()

__init int nested_vmx_hardware_setup ( int(*[])(struct kvm_vcpu *)  exit_handlers)

Definition at line 7092 of file nested.c.

7093 {
7094  int i;
7095 
7096  if (!cpu_has_vmx_shadow_vmcs())
7097  enable_shadow_vmcs = 0;
7098  if (enable_shadow_vmcs) {
7099  for (i = 0; i < VMX_BITMAP_NR; i++) {
7100  /*
7101  * The vmx_bitmap is not tied to a VM and so should
7102  * not be charged to a memcg.
7103  */
7104  vmx_bitmap[i] = (unsigned long *)
7105  __get_free_page(GFP_KERNEL);
7106  if (!vmx_bitmap[i]) {
7108  return -ENOMEM;
7109  }
7110  }
7111 
7113  }
7114 
7115  exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
7116  exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
7117  exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
7118  exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
7119  exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
7120  exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
7121  exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
7122  exit_handlers[EXIT_REASON_VMOFF] = handle_vmxoff;
7123  exit_handlers[EXIT_REASON_VMON] = handle_vmxon;
7124  exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
7125  exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
7126  exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
7127 
7128  return 0;
7129 }
static bool cpu_has_vmx_shadow_vmcs(void)
Definition: capabilities.h:225
static int handle_vmwrite(struct kvm_vcpu *vcpu)
Definition: nested.c:5483
static int handle_vmptrst(struct kvm_vcpu *vcpu)
Definition: nested.c:5674
static int handle_vmptrld(struct kvm_vcpu *vcpu)
Definition: nested.c:5604
static int handle_vmresume(struct kvm_vcpu *vcpu)
Definition: nested.c:5371
static int handle_vmread(struct kvm_vcpu *vcpu)
Definition: nested.c:5377
static int handle_vmlaunch(struct kvm_vcpu *vcpu)
Definition: nested.c:5365
static int handle_vmxoff(struct kvm_vcpu *vcpu)
Definition: nested.c:5309
static int handle_invept(struct kvm_vcpu *vcpu)
Definition: nested.c:5702
void nested_vmx_hardware_unsetup(void)
Definition: nested.c:7082
static int handle_invvpid(struct kvm_vcpu *vcpu)
Definition: nested.c:5782
static unsigned long * vmx_bitmap[VMX_BITMAP_NR]
Definition: nested.c:46
static int handle_vmxon(struct kvm_vcpu *vcpu)
Definition: nested.c:5190
static int handle_vmclear(struct kvm_vcpu *vcpu)
Definition: nested.c:5323
@ VMX_BITMAP_NR
Definition: nested.c:44
static int handle_vmfunc(struct kvm_vcpu *vcpu)
Definition: nested.c:5908
static void init_vmcs_shadow_fields(void)
Definition: nested.c:69
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_hardware_unsetup()

void nested_vmx_hardware_unsetup ( void  )

Definition at line 7082 of file nested.c.

7083 {
7084  int i;
7085 
7086  if (enable_shadow_vmcs) {
7087  for (i = 0; i < VMX_BITMAP_NR; i++)
7088  free_page((unsigned long)vmx_bitmap[i]);
7089  }
7090 }
Here is the caller graph for this function:

◆ nested_vmx_reflect_vmexit()

bool nested_vmx_reflect_vmexit ( struct kvm_vcpu *  vcpu)

Definition at line 6393 of file nested.c.

6394 {
6395  struct vcpu_vmx *vmx = to_vmx(vcpu);
6396  union vmx_exit_reason exit_reason = vmx->exit_reason;
6397  unsigned long exit_qual;
6398  u32 exit_intr_info;
6399 
6400  WARN_ON_ONCE(vmx->nested.nested_run_pending);
6401 
6402  /*
6403  * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6404  * has already loaded L2's state.
6405  */
6406  if (unlikely(vmx->fail)) {
6407  trace_kvm_nested_vmenter_failed(
6408  "hardware VM-instruction error: ",
6409  vmcs_read32(VM_INSTRUCTION_ERROR));
6410  exit_intr_info = 0;
6411  exit_qual = 0;
6412  goto reflect_vmexit;
6413  }
6414 
6415  trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX);
6416 
6417  /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6418  if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6419  return false;
6420 
6421  /* If L1 doesn't want the exit, handle it in L0. */
6422  if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
6423  return false;
6424 
6425  /*
6426  * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6427  * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6428  * need to be synthesized by querying the in-kernel LAPIC, but external
6429  * interrupts are never reflected to L1 so it's a non-issue.
6430  */
6431  exit_intr_info = vmx_get_intr_info(vcpu);
6432  if (is_exception_with_error_code(exit_intr_info)) {
6433  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6434 
6436  vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6437  }
6438  exit_qual = vmx_get_exit_qual(vcpu);
6439 
6440 reflect_vmexit:
6441  nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
6442  return true;
6443 }
u8 fail
Definition: vmx.h:253
union vmx_exit_reason exit_reason
Definition: vmx.h:320
u32 vm_exit_intr_error_code
Definition: vmcs12.h:140
static bool is_exception_with_error_code(u32 intr_info)
Definition: vmcs.h:159
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
Definition: nested.c:6266
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
Definition: nested.c:4767
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, union vmx_exit_reason exit_reason)
Definition: nested.c:6188
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
Definition: vmx.h:681
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
Definition: vmx.h:691
static __always_inline u32 vmcs_read32(unsigned long field)
Definition: vmx_ops.h:161
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_set_vmcs_shadowing_bitmap()

void nested_vmx_set_vmcs_shadowing_bitmap ( void  )

Definition at line 6764 of file nested.c.

6765 {
6766  if (enable_shadow_vmcs) {
6767  vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6768  vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6769  }
6770 }
#define vmx_vmwrite_bitmap
Definition: nested.c:49
#define vmx_vmread_bitmap
Definition: nested.c:48
static __always_inline void vmcs_write64(unsigned long field, u64 value)
Definition: vmx_ops.h:246
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_setup_ctls_msrs()

void nested_vmx_setup_ctls_msrs ( struct vmcs_config vmcs_conf,
u32  ept_caps 
)

Definition at line 7045 of file nested.c.

7046 {
7047  struct nested_vmx_msrs *msrs = &vmcs_conf->nested;
7048 
7049  /*
7050  * Note that as a general rule, the high half of the MSRs (bits in
7051  * the control fields which may be 1) should be initialized by the
7052  * intersection of the underlying hardware's MSR (i.e., features which
7053  * can be supported) and the list of features we want to expose -
7054  * because they are known to be properly supported in our code.
7055  * Also, usually, the low half of the MSRs (bits which must be 1) can
7056  * be set to 0, meaning that L1 may turn off any of these bits. The
7057  * reason is that if one of these bits is necessary, it will appear
7058  * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
7059  * fields of vmcs01 and vmcs02, will turn these bits off - and
7060  * nested_vmx_l1_wants_exit() will not pass related exits to L1.
7061  * These rules have exceptions below.
7062  */
7063  nested_vmx_setup_pinbased_ctls(vmcs_conf, msrs);
7064 
7065  nested_vmx_setup_exit_ctls(vmcs_conf, msrs);
7066 
7067  nested_vmx_setup_entry_ctls(vmcs_conf, msrs);
7068 
7069  nested_vmx_setup_cpubased_ctls(vmcs_conf, msrs);
7070 
7071  nested_vmx_setup_secondary_ctls(ept_caps, vmcs_conf, msrs);
7072 
7073  nested_vmx_setup_misc_data(vmcs_conf, msrs);
7074 
7075  nested_vmx_setup_basic(msrs);
7076 
7078 
7080 }
struct nested_vmx_msrs nested
Definition: capabilities.h:67
static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6806
static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
Definition: nested.c:6997
static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6846
static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6866
static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6823
static u64 nested_vmx_calc_vmcs_enum_msr(void)
Definition: nested.c:6778
static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
Definition: nested.c:7015
static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6985
static void nested_vmx_setup_secondary_ctls(u32 ept_caps, struct vmcs_config *vmcs_conf, struct nested_vmx_msrs *msrs)
Definition: nested.c:6902
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmx_vmexit()

void nested_vmx_vmexit ( struct kvm_vcpu *  vcpu,
u32  vm_exit_reason,
u32  exit_intr_info,
unsigned long  exit_qualification 
)

Definition at line 4767 of file nested.c.

4769 {
4770  struct vcpu_vmx *vmx = to_vmx(vcpu);
4771  struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4772 
4773  /* Pending MTF traps are discarded on VM-Exit. */
4774  vmx->nested.mtf_pending = false;
4775 
4776  /* trying to cancel vmlaunch/vmresume is a bug */
4777  WARN_ON_ONCE(vmx->nested.nested_run_pending);
4778 
4779 #ifdef CONFIG_KVM_HYPERV
4780  if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4781  /*
4782  * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4783  * Enlightened VMCS after migration and we still need to
4784  * do that when something is forcing L2->L1 exit prior to
4785  * the first L2 run.
4786  */
4787  (void)nested_get_evmcs_page(vcpu);
4788  }
4789 #endif
4790 
4791  /* Service pending TLB flush requests for L2 before switching to L1. */
4793 
4794  /*
4795  * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4796  * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4797  * up-to-date before switching to L1.
4798  */
4799  if (enable_ept && is_pae_paging(vcpu))
4800  vmx_ept_load_pdptrs(vcpu);
4801 
4802  leave_guest_mode(vcpu);
4803 
4805  hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4806 
4807  if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4808  vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4809  if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4810  vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4811  }
4812 
4813  if (likely(!vmx->fail)) {
4815 
4816  if (vm_exit_reason != -1)
4818  exit_intr_info, exit_qualification);
4819 
4820  /*
4821  * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4822  * also be used to capture vmcs12 cache as part of
4823  * capturing nVMX state for snapshot (migration).
4824  *
4825  * Otherwise, this flush will dirty guest memory at a
4826  * point it is already assumed by user-space to be
4827  * immutable.
4828  */
4830  } else {
4831  /*
4832  * The only expected VM-instruction error is "VM entry with
4833  * invalid control field(s)." Anything else indicates a
4834  * problem with L0. And we should never get here with a
4835  * VMFail of any type if early consistency checks are enabled.
4836  */
4837  WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4838  VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4839  WARN_ON_ONCE(nested_early_check);
4840  }
4841 
4842  /*
4843  * Drop events/exceptions that were queued for re-injection to L2
4844  * (picked up via vmx_complete_interrupts()), as well as exceptions
4845  * that were pending for L2. Note, this must NOT be hoisted above
4846  * prepare_vmcs12(), events/exceptions queued for re-injection need to
4847  * be captured in vmcs12 (see vmcs12_save_pending_event()).
4848  */
4849  vcpu->arch.nmi_injected = false;
4852 
4853  vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4854 
4855  /*
4856  * If IBRS is advertised to the vCPU, KVM must flush the indirect
4857  * branch predictors when transitioning from L2 to L1, as L1 expects
4858  * hardware (KVM in this case) to provide separate predictor modes.
4859  * Bare metal isolates VMX root (host) from VMX non-root (guest), but
4860  * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
4861  * separate modes for L2 vs L1.
4862  */
4863  if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4864  indirect_branch_prediction_barrier();
4865 
4866  /* Update any VMCS fields that might have changed while L2 ran */
4867  vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4868  vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4869  vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4871  vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4872 
4873  if (vmx->nested.l1_tpr_threshold != -1)
4874  vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4875 
4879  }
4880 
4884  }
4885 
4886  /* Unpin physical memory we referred to in vmcs02 */
4887  kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
4888  kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4889  kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4890  vmx->nested.pi_desc = NULL;
4891 
4894  kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4895  }
4896 
4898  vmx->nested.update_vmcs01_apicv_status = false;
4899  kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4900  }
4901 
4902  if ((vm_exit_reason != -1) &&
4904  vmx->nested.need_vmcs12_to_shadow_sync = true;
4905 
4906  /* in case we halted in L2 */
4907  vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4908 
4909  if (likely(!vmx->fail)) {
4910  if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4911  nested_exit_intr_ack_set(vcpu)) {
4912  int irq = kvm_cpu_get_interrupt(vcpu);
4913  WARN_ON(irq < 0);
4914  vmcs12->vm_exit_intr_info = irq |
4915  INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4916  }
4917 
4918  if (vm_exit_reason != -1)
4919  trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4924  KVM_ISA_VMX);
4925 
4927 
4928  return;
4929  }
4930 
4931  /*
4932  * After an early L2 VM-entry failure, we're now back
4933  * in L1 which thinks it just finished a VMLAUNCH or
4934  * VMRESUME instruction, so we need to set the failure
4935  * flag and the VM-instruction error field of the VMCS
4936  * accordingly, and skip the emulated instruction.
4937  */
4938  (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4939 
4940  /*
4941  * Restore L1's host state to KVM's software model. We're here
4942  * because a consistency check was caught by hardware, which
4943  * means some amount of guest state has been propagated to KVM's
4944  * model and needs to be unwound to the host's state.
4945  */
4947 
4948  vmx->fail = 0;
4949 }
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:83
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
Definition: irq.c:138
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
Definition: kvm_main.c:3186
Definition: x86.h:12
bool has_tsc_control
Definition: x86.h:14
struct kvm_host_map virtual_apic_map
Definition: vmx.h:201
bool reload_vmcs01_apic_access_page
Definition: vmx.h:177
int l1_tpr_threshold
Definition: vmx.h:229
bool mtf_pending
Definition: vmx.h:192
struct kvm_host_map pi_desc_map
Definition: vmx.h:202
struct pi_desc * pi_desc
Definition: vmx.h:206
bool update_vmcs01_apicv_status
Definition: vmx.h:179
struct kvm_host_map apic_access_page_map
Definition: vmx.h:200
bool update_vmcs01_cpu_dirty_logging
Definition: vmx.h:178
bool change_vmcs01_virtual_apic_mode
Definition: vmx.h:176
struct vmx_msrs host
Definition: vmx.h:296
struct vmx_msrs guest
Definition: vmx.h:295
struct vcpu_vmx::msr_autoload msr_autoload
u32 vm_exit_intr_info
Definition: vmcs12.h:139
u32 idt_vectoring_info_field
Definition: vmcs12.h:141
unsigned int nr
Definition: vmx.h:34
static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
Definition: nested.c:4657
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:720
static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Definition: nested.c:4372
static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
Definition: nested.c:188
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 vm_exit_reason, u32 exit_intr_info, unsigned long exit_qualification)
Definition: nested.c:4453
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
Definition: nested.c:743
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
Definition: vmx.c:6694
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
Definition: vmx.c:3231
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
Definition: vmx.c:8110
static __always_inline void vmcs_write32(unsigned long field, u32 value)
Definition: vmx_ops.h:237
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:122
static bool is_pae_paging(struct kvm_vcpu *vcpu)
Definition: x86.h:203
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:107
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_get_vmx_msr()

int vmx_get_vmx_msr ( struct nested_vmx_msrs msrs,
u32  msr_index,
u64 *  pdata 
)

Definition at line 1458 of file nested.c.

1459 {
1460  switch (msr_index) {
1461  case MSR_IA32_VMX_BASIC:
1462  *pdata = msrs->basic;
1463  break;
1464  case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1465  case MSR_IA32_VMX_PINBASED_CTLS:
1466  *pdata = vmx_control_msr(
1467  msrs->pinbased_ctls_low,
1468  msrs->pinbased_ctls_high);
1469  if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1470  *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1471  break;
1472  case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1473  case MSR_IA32_VMX_PROCBASED_CTLS:
1474  *pdata = vmx_control_msr(
1475  msrs->procbased_ctls_low,
1476  msrs->procbased_ctls_high);
1477  if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1478  *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1479  break;
1480  case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1481  case MSR_IA32_VMX_EXIT_CTLS:
1482  *pdata = vmx_control_msr(
1483  msrs->exit_ctls_low,
1484  msrs->exit_ctls_high);
1485  if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1486  *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1487  break;
1488  case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1489  case MSR_IA32_VMX_ENTRY_CTLS:
1490  *pdata = vmx_control_msr(
1491  msrs->entry_ctls_low,
1492  msrs->entry_ctls_high);
1493  if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1494  *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1495  break;
1496  case MSR_IA32_VMX_MISC:
1497  *pdata = vmx_control_msr(
1498  msrs->misc_low,
1499  msrs->misc_high);
1500  break;
1501  case MSR_IA32_VMX_CR0_FIXED0:
1502  *pdata = msrs->cr0_fixed0;
1503  break;
1504  case MSR_IA32_VMX_CR0_FIXED1:
1505  *pdata = msrs->cr0_fixed1;
1506  break;
1507  case MSR_IA32_VMX_CR4_FIXED0:
1508  *pdata = msrs->cr4_fixed0;
1509  break;
1510  case MSR_IA32_VMX_CR4_FIXED1:
1511  *pdata = msrs->cr4_fixed1;
1512  break;
1513  case MSR_IA32_VMX_VMCS_ENUM:
1514  *pdata = msrs->vmcs_enum;
1515  break;
1516  case MSR_IA32_VMX_PROCBASED_CTLS2:
1517  *pdata = vmx_control_msr(
1518  msrs->secondary_ctls_low,
1519  msrs->secondary_ctls_high);
1520  break;
1521  case MSR_IA32_VMX_EPT_VPID_CAP:
1522  *pdata = msrs->ept_caps |
1523  ((u64)msrs->vpid_caps << 32);
1524  break;
1525  case MSR_IA32_VMX_VMFUNC:
1526  *pdata = msrs->vmfunc_controls;
1527  break;
1528  default:
1529  return 1;
1530  }
1531 
1532  return 0;
1533 }
static u64 vmx_control_msr(u32 low, u32 high)
Definition: nested.c:215
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_has_valid_vmcs12()

static int vmx_has_valid_vmcs12 ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 55 of file nested.h.

56 {
57  struct vcpu_vmx *vmx = to_vmx(vcpu);
58 
59  /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
60  return vmx->nested.current_vmptr != -1ull ||
62 }
static bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
Definition: hyperv.h:79
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_leave_nested()

void vmx_leave_nested ( struct kvm_vcpu *  vcpu)

Definition at line 6568 of file nested.c.

6569 {
6570  if (is_guest_mode(vcpu)) {
6571  to_vmx(vcpu)->nested.nested_run_pending = 0;
6572  nested_vmx_vmexit(vcpu, -1, 0, 0);
6573  }
6574  free_nested(vcpu);
6575 }
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static void free_nested(struct kvm_vcpu *vcpu)
Definition: nested.c:323
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_set_vmx_msr()

int vmx_set_vmx_msr ( struct kvm_vcpu *  vcpu,
u32  msr_index,
u64  data 
)

Definition at line 1393 of file nested.c.

1394 {
1395  struct vcpu_vmx *vmx = to_vmx(vcpu);
1396 
1397  /*
1398  * Don't allow changes to the VMX capability MSRs while the vCPU
1399  * is in VMX operation.
1400  */
1401  if (vmx->nested.vmxon)
1402  return -EBUSY;
1403 
1404  switch (msr_index) {
1405  case MSR_IA32_VMX_BASIC:
1406  return vmx_restore_vmx_basic(vmx, data);
1407  case MSR_IA32_VMX_PINBASED_CTLS:
1408  case MSR_IA32_VMX_PROCBASED_CTLS:
1409  case MSR_IA32_VMX_EXIT_CTLS:
1410  case MSR_IA32_VMX_ENTRY_CTLS:
1411  /*
1412  * The "non-true" VMX capability MSRs are generated from the
1413  * "true" MSRs, so we do not support restoring them directly.
1414  *
1415  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1416  * should restore the "true" MSRs with the must-be-1 bits
1417  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1418  * DEFAULT SETTINGS".
1419  */
1420  return -EINVAL;
1421  case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1422  case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1423  case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1424  case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1425  case MSR_IA32_VMX_PROCBASED_CTLS2:
1426  return vmx_restore_control_msr(vmx, msr_index, data);
1427  case MSR_IA32_VMX_MISC:
1428  return vmx_restore_vmx_misc(vmx, data);
1429  case MSR_IA32_VMX_CR0_FIXED0:
1430  case MSR_IA32_VMX_CR4_FIXED0:
1431  return vmx_restore_fixed0_msr(vmx, msr_index, data);
1432  case MSR_IA32_VMX_CR0_FIXED1:
1433  case MSR_IA32_VMX_CR4_FIXED1:
1434  /*
1435  * These MSRs are generated based on the vCPU's CPUID, so we
1436  * do not support restoring them directly.
1437  */
1438  return -EINVAL;
1439  case MSR_IA32_VMX_EPT_VPID_CAP:
1440  return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1441  case MSR_IA32_VMX_VMCS_ENUM:
1442  vmx->nested.msrs.vmcs_enum = data;
1443  return 0;
1444  case MSR_IA32_VMX_VMFUNC:
1445  if (data & ~vmcs_config.nested.vmfunc_controls)
1446  return -EINVAL;
1447  vmx->nested.msrs.vmfunc_controls = data;
1448  return 0;
1449  default:
1450  /*
1451  * The rest of the VMX capability MSRs do not support restore.
1452  */
1453  return -EINVAL;
1454  }
1455 }
bool vmxon
Definition: vmx.h:118
static int vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
Definition: nested.c:1289
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
Definition: nested.c:1347
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
Definition: nested.c:1229
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
Definition: nested.c:1373
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
Definition: nested.c:1312
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ vmx_nested_ops

struct kvm_x86_nested_ops vmx_nested_ops
extern

Definition at line 7092 of file nested.c.