KVM
vmx.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4 
5 #include <linux/kvm_host.h>
6 
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9 #include <asm/perf_event.h>
10 
11 #include "capabilities.h"
12 #include "../kvm_cache_regs.h"
13 #include "posted_intr.h"
14 #include "vmcs.h"
15 #include "vmx_ops.h"
16 #include "../cpuid.h"
17 #include "run_flags.h"
18 
19 #define MSR_TYPE_R 1
20 #define MSR_TYPE_W 2
21 #define MSR_TYPE_RW 3
22 
23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24 
25 #ifdef CONFIG_X86_64
26 #define MAX_NR_USER_RETURN_MSRS 7
27 #else
28 #define MAX_NR_USER_RETURN_MSRS 4
29 #endif
30 
31 #define MAX_NR_LOADSTORE_MSRS 8
32 
33 struct vmx_msrs {
34  unsigned int nr;
35  struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
36 };
37 
38 struct vmx_uret_msr {
40  u64 data;
41  u64 mask;
42 };
43 
49 
50  SEG_FIELD_NR = 4
51 };
52 
53 #define RTIT_ADDR_RANGE 4
54 
55 struct pt_ctx {
56  u64 ctl;
57  u64 status;
60  u64 cr3_match;
63 };
64 
65 struct pt_desc {
68  u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
69  struct pt_ctx host;
70  struct pt_ctx guest;
71 };
72 
74  struct {
75  u32 basic : 16;
76  u32 reserved16 : 1;
77  u32 reserved17 : 1;
78  u32 reserved18 : 1;
79  u32 reserved19 : 1;
80  u32 reserved20 : 1;
81  u32 reserved21 : 1;
82  u32 reserved22 : 1;
83  u32 reserved23 : 1;
84  u32 reserved24 : 1;
85  u32 reserved25 : 1;
87  u32 enclave_mode : 1;
88  u32 smi_pending_mtf : 1;
90  u32 reserved30 : 1;
91  u32 failed_vmentry : 1;
92  };
93  u32 full;
94 };
95 
96 struct lbr_desc {
97  /* Basic info about guest LBR records. */
98  struct x86_pmu_lbr records;
99 
100  /*
101  * Emulate LBR feature via passthrough LBR registers when the
102  * per-vcpu guest LBR event is scheduled on the current pcpu.
103  *
104  * The records may be inaccurate if the host reclaims the LBR.
105  */
106  struct perf_event *event;
107 
108  /* True if LBRs are marked as not intercepted in the MSR bitmap */
110 };
111 
112 /*
113  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
114  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
115  */
116 struct nested_vmx {
117  /* Has the level1 guest done vmxon? */
118  bool vmxon;
119  gpa_t vmxon_ptr;
120  bool pml_full;
121 
122  /* The guest-physical address of the current VMCS L1 keeps for L2 */
124  /*
125  * Cache of the guest's VMCS, existing outside of guest memory.
126  * Loaded from guest memory during VMPTRLD. Flushed to guest
127  * memory during VMCLEAR and VMPTRLD.
128  */
130  /*
131  * Cache of the guest's shadow VMCS, existing outside of guest
132  * memory. Loaded from guest memory during VM entry. Flushed
133  * to guest memory during VM exit.
134  */
136 
137  /*
138  * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
139  */
140  struct gfn_to_hva_cache shadow_vmcs12_cache;
141 
142  /*
143  * GPA to HVA cache for VMCS12
144  */
145  struct gfn_to_hva_cache vmcs12_cache;
146 
147  /*
148  * Indicates if the shadow vmcs or enlightened vmcs must be updated
149  * with the data held by struct vmcs12.
150  */
153 
154  /*
155  * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
156  * changes in MSR bitmap for L1 or switching to a different L2. Note,
157  * this flag can only be used reliably in conjunction with a paravirt L1
158  * which informs L0 whether any changes to MSR bitmap for L2 were done
159  * on its side.
160  */
162 
163  /*
164  * Indicates lazily loaded guest state has not yet been decached from
165  * vmcs02.
166  */
168 
169  /*
170  * vmcs02 has been initialized, i.e. state that is constant for
171  * vmcs02 has been written to the backing VMCS. Initialization
172  * is delayed until L1 actually attempts to run a nested VM.
173  */
175 
180 
181  /*
182  * Enlightened VMCS has been enabled. It does not mean that L1 has to
183  * use it. However, VMX features available to L1 will be limited based
184  * on what the enlightened VMCS supports.
185  */
187 
188  /* L2 must run next, and mustn't decide to exit to L1. */
190 
191  /* Pending MTF VM-exit into L1. */
193 
194  struct loaded_vmcs vmcs02;
195 
196  /*
197  * Guest pages referred to in the vmcs02 with host-physical
198  * pointers, so we must keep them pinned while L2 runs.
199  */
200  struct kvm_host_map apic_access_page_map;
201  struct kvm_host_map virtual_apic_map;
202  struct kvm_host_map pi_desc_map;
203 
204  struct kvm_host_map msr_bitmap_map;
205 
206  struct pi_desc *pi_desc;
209 
210  struct hrtimer preemption_timer;
214 
215  /*
216  * Used to snapshot MSRs that are conditionally loaded on VM-Enter in
217  * order to propagate the guest's pre-VM-Enter value into vmcs02. For
218  * emulation of VMLAUNCH/VMRESUME, the snapshot will be of L1's value.
219  * For KVM_SET_NESTED_STATE, the snapshot is of L2's value, _if_
220  * userspace restores MSRs before nested state. If userspace restores
221  * MSRs after nested state, the snapshot holds garbage, but KVM can't
222  * detect that, and the garbage value in vmcs02 will be overwritten by
223  * MSR restoration in any case.
224  */
227 
228  /* to migrate it to L1 if L2 writes to L1's CR8 directly */
230 
231  u16 vpid02;
233 
234  struct nested_vmx_msrs msrs;
235 
236  /* SMM related state */
237  struct {
238  /* in VMX operation on SMM entry? */
239  bool vmxon;
240  /* in guest mode on SMM entry? */
242  } smm;
243 
244 #ifdef CONFIG_KVM_HYPERV
245  gpa_t hv_evmcs_vmptr;
246  struct kvm_host_map hv_evmcs_map;
247  struct hv_enlightened_vmcs *hv_evmcs;
248 #endif
249 };
250 
251 struct vcpu_vmx {
252  struct kvm_vcpu vcpu;
253  u8 fail;
255 
256  /*
257  * If true, host state has been stored in vmx->loaded_vmcs for
258  * the CPU registers that only need to be switched when transitioning
259  * to/from the kernel, and the registers have been loaded with guest
260  * values. If false, host state is loaded in the CPU registers
261  * and vmx->loaded_vmcs->host_state is invalid.
262  */
264 
265  unsigned long exit_qualification;
268  ulong rflags;
269 
270  /*
271  * User return MSRs are always emulated when enabled in the guest, but
272  * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
273  * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
274  * be loaded into hardware if those conditions aren't met.
275  */
278 #ifdef CONFIG_X86_64
279  u64 msr_host_kernel_gs_base;
280  u64 msr_guest_kernel_gs_base;
281 #endif
282 
285 
286  /*
287  * loaded_vmcs points to the VMCS currently used in this vcpu. For a
288  * non-nested (L1) guest, it always points to vmcs01. For a nested
289  * guest (L2), it points to a different VMCS.
290  */
291  struct loaded_vmcs vmcs01;
293 
294  struct msr_autoload {
295  struct vmx_msrs guest;
296  struct vmx_msrs host;
298 
299  struct msr_autostore {
300  struct vmx_msrs guest;
302 
303  struct {
305  ulong save_rflags;
306  struct kvm_segment segs[8];
307  } rmode;
308  struct {
309  u32 bitmask; /* 4 bits per segment (1 bit per field) */
310  struct kvm_save_segment {
311  u16 selector;
312  unsigned long base;
313  u32 limit;
314  u32 ar;
315  } seg[8];
317  int vpid;
319 
321 
322  /* Posted interrupt descriptor */
323  struct pi_desc pi_desc;
324 
325  /* Used if this vCPU is waiting for PI notification wakeup. */
326  struct list_head pi_wakeup_list;
327 
328  /* Support for a guest hypervisor (nested VMX) */
329  struct nested_vmx nested;
330 
331  /* Dynamic PLE window. */
332  unsigned int ple_window;
334 
336 
337  /* Support for PML */
338 #define PML_ENTITY_NUM 512
339  struct page *pml_pg;
340 
341  /* apic deadline value in host tsc */
343 
344  unsigned long host_debugctlmsr;
345 
346  /*
347  * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
348  * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
349  * in msr_ia32_feature_control_valid_bits.
350  */
353  /* SGX Launch Control public key hash */
357 
358  struct pt_desc pt_desc;
359  struct lbr_desc lbr_desc;
360 
361  /* Save desired MSR intercept (read: pass-through) state */
362 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 16
363  struct {
367 };
368 
369 struct kvm_vmx {
370  struct kvm kvm;
371 
372  unsigned int tss_addr;
375  /* Posted Interrupt Descriptor (PID) table for IPI virtualization */
376  u64 *pid_table;
377 };
378 
379 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
380  struct loaded_vmcs *buddy);
381 int allocate_vpid(void);
382 void free_vpid(int vpid);
383 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
384 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
385 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
386  unsigned long fs_base, unsigned long gs_base);
387 int vmx_get_cpl(struct kvm_vcpu *vcpu);
388 bool vmx_emulation_required(struct kvm_vcpu *vcpu);
389 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
390 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
391 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
392 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
393 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
394 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
395 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
396 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
397 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
398 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
399 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
400 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
401 
402 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
403 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
404 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
405 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
406 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
407 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
408 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
409 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
410 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
411 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
412 void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
413 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
414 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
415  unsigned int flags);
416 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
417 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
418 
419 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
420 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
421 
422 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
423 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
424 
425 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
426 
427 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
428  int type, bool value)
429 {
430  if (value)
431  vmx_enable_intercept_for_msr(vcpu, msr, type);
432  else
433  vmx_disable_intercept_for_msr(vcpu, msr, type);
434 }
435 
436 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
437 
438 /*
439  * Note, early Intel manuals have the write-low and read-high bitmap offsets
440  * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
441  * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
442  * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
443  * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
444  * VM-Exit.
445  */
446 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
447 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
448  u32 msr) \
449 { \
450  int f = sizeof(unsigned long); \
451  \
452  if (msr <= 0x1fff) \
453  return bitop##_bit(msr, bitmap + base / f); \
454  else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
455  return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
456  return (rtype)true; \
457 }
458 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
459  __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
460  __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
461 
462 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
463 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
464 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
465 
466 static inline u8 vmx_get_rvi(void)
467 {
468  return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
469 }
470 
471 #define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
472  (VM_ENTRY_LOAD_DEBUG_CONTROLS)
473 #ifdef CONFIG_X86_64
474  #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
475  (__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
476  VM_ENTRY_IA32E_MODE)
477 #else
478  #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
479  __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
480 #endif
481 #define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
482  (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
483  VM_ENTRY_LOAD_IA32_PAT | \
484  VM_ENTRY_LOAD_IA32_EFER | \
485  VM_ENTRY_LOAD_BNDCFGS | \
486  VM_ENTRY_PT_CONCEAL_PIP | \
487  VM_ENTRY_LOAD_IA32_RTIT_CTL)
488 
489 #define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
490  (VM_EXIT_SAVE_DEBUG_CONTROLS | \
491  VM_EXIT_ACK_INTR_ON_EXIT)
492 #ifdef CONFIG_X86_64
493  #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
494  (__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
495  VM_EXIT_HOST_ADDR_SPACE_SIZE)
496 #else
497  #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
498  __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
499 #endif
500 #define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
501  (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
502  VM_EXIT_SAVE_IA32_PAT | \
503  VM_EXIT_LOAD_IA32_PAT | \
504  VM_EXIT_SAVE_IA32_EFER | \
505  VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
506  VM_EXIT_LOAD_IA32_EFER | \
507  VM_EXIT_CLEAR_BNDCFGS | \
508  VM_EXIT_PT_CONCEAL_PIP | \
509  VM_EXIT_CLEAR_IA32_RTIT_CTL)
510 
511 #define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
512  (PIN_BASED_EXT_INTR_MASK | \
513  PIN_BASED_NMI_EXITING)
514 #define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
515  (PIN_BASED_VIRTUAL_NMIS | \
516  PIN_BASED_POSTED_INTR | \
517  PIN_BASED_VMX_PREEMPTION_TIMER)
518 
519 #define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
520  (CPU_BASED_HLT_EXITING | \
521  CPU_BASED_CR3_LOAD_EXITING | \
522  CPU_BASED_CR3_STORE_EXITING | \
523  CPU_BASED_UNCOND_IO_EXITING | \
524  CPU_BASED_MOV_DR_EXITING | \
525  CPU_BASED_USE_TSC_OFFSETTING | \
526  CPU_BASED_MWAIT_EXITING | \
527  CPU_BASED_MONITOR_EXITING | \
528  CPU_BASED_INVLPG_EXITING | \
529  CPU_BASED_RDPMC_EXITING | \
530  CPU_BASED_INTR_WINDOW_EXITING)
531 
532 #ifdef CONFIG_X86_64
533  #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
534  (__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
535  CPU_BASED_CR8_LOAD_EXITING | \
536  CPU_BASED_CR8_STORE_EXITING)
537 #else
538  #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
539  __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
540 #endif
541 
542 #define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
543  (CPU_BASED_RDTSC_EXITING | \
544  CPU_BASED_TPR_SHADOW | \
545  CPU_BASED_USE_IO_BITMAPS | \
546  CPU_BASED_MONITOR_TRAP_FLAG | \
547  CPU_BASED_USE_MSR_BITMAPS | \
548  CPU_BASED_NMI_WINDOW_EXITING | \
549  CPU_BASED_PAUSE_EXITING | \
550  CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
551  CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
552 
553 #define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
554 #define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
555  (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
556  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
557  SECONDARY_EXEC_WBINVD_EXITING | \
558  SECONDARY_EXEC_ENABLE_VPID | \
559  SECONDARY_EXEC_ENABLE_EPT | \
560  SECONDARY_EXEC_UNRESTRICTED_GUEST | \
561  SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
562  SECONDARY_EXEC_DESC | \
563  SECONDARY_EXEC_ENABLE_RDTSCP | \
564  SECONDARY_EXEC_ENABLE_INVPCID | \
565  SECONDARY_EXEC_APIC_REGISTER_VIRT | \
566  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
567  SECONDARY_EXEC_SHADOW_VMCS | \
568  SECONDARY_EXEC_ENABLE_XSAVES | \
569  SECONDARY_EXEC_RDSEED_EXITING | \
570  SECONDARY_EXEC_RDRAND_EXITING | \
571  SECONDARY_EXEC_ENABLE_PML | \
572  SECONDARY_EXEC_TSC_SCALING | \
573  SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
574  SECONDARY_EXEC_PT_USE_GPA | \
575  SECONDARY_EXEC_PT_CONCEAL_VMX | \
576  SECONDARY_EXEC_ENABLE_VMFUNC | \
577  SECONDARY_EXEC_BUS_LOCK_DETECTION | \
578  SECONDARY_EXEC_NOTIFY_VM_EXITING | \
579  SECONDARY_EXEC_ENCLS_EXITING)
580 
581 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
582 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
583  (TERTIARY_EXEC_IPI_VIRT)
584 
585 #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
586 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
587 { \
588  if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
589  vmcs_write##bits(uname, val); \
590  vmx->loaded_vmcs->controls_shadow.lname = val; \
591  } \
592 } \
593 static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
594 { \
595  return vmcs->controls_shadow.lname; \
596 } \
597 static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
598 { \
599  return __##lname##_controls_get(vmx->loaded_vmcs); \
600 } \
601 static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
602 { \
603  BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
604  lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
605 } \
606 static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
607 { \
608  BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
609  lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
610 }
611 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
612 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
613 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
614 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
615 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
616 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
617 
618 /*
619  * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
620  * cache on demand. Other registers not listed here are synced to
621  * the cache immediately after VM-Exit.
622  */
623 #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
624  (1 << VCPU_REGS_RSP) | \
625  (1 << VCPU_EXREG_RFLAGS) | \
626  (1 << VCPU_EXREG_PDPTR) | \
627  (1 << VCPU_EXREG_SEGMENTS) | \
628  (1 << VCPU_EXREG_CR0) | \
629  (1 << VCPU_EXREG_CR3) | \
630  (1 << VCPU_EXREG_CR4) | \
631  (1 << VCPU_EXREG_EXIT_INFO_1) | \
632  (1 << VCPU_EXREG_EXIT_INFO_2))
633 
634 static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
635 {
636  unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
637 
638  /*
639  * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
640  * in order to construct shadow PTEs with the correct protections.
641  * Note! CR0.WP technically can be passed through to the guest if
642  * paging is disabled, but checking CR0.PG would generate a cyclical
643  * dependency of sorts due to forcing the caller to ensure CR0 holds
644  * the correct value prior to determining which CR0 bits can be owned
645  * by L1. Keep it simple and limit the optimization to EPT.
646  */
647  if (!enable_ept)
648  bits &= ~X86_CR0_WP;
649  return bits;
650 }
651 
652 static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
653 {
654  return container_of(kvm, struct kvm_vmx, kvm);
655 }
656 
657 static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
658 {
659  return container_of(vcpu, struct vcpu_vmx, vcpu);
660 }
661 
662 static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
663 {
664  return &to_vmx(vcpu)->lbr_desc;
665 }
666 
667 static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
668 {
669  return &vcpu_to_lbr_desc(vcpu)->records;
670 }
671 
672 static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
673 {
674  return !!vcpu_to_lbr_records(vcpu)->nr;
675 }
676 
677 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
678 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
679 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
680 
681 static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
682 {
683  struct vcpu_vmx *vmx = to_vmx(vcpu);
684 
685  if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1))
686  vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
687 
688  return vmx->exit_qualification;
689 }
690 
691 static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
692 {
693  struct vcpu_vmx *vmx = to_vmx(vcpu);
694 
695  if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2))
696  vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
697 
698  return vmx->exit_intr_info;
699 }
700 
701 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
702 void free_vmcs(struct vmcs *vmcs);
706 
707 static inline struct vmcs *alloc_vmcs(bool shadow)
708 {
709  return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
710  GFP_KERNEL_ACCOUNT);
711 }
712 
713 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
714 {
715  return secondary_exec_controls_get(vmx) &
716  SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
717 }
718 
719 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
720 {
721  if (!enable_ept)
722  return true;
723 
724  return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
725 }
726 
727 static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
728 {
729  return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
730  (secondary_exec_controls_get(to_vmx(vcpu)) &
731  SECONDARY_EXEC_UNRESTRICTED_GUEST));
732 }
733 
734 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
735 static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
736 {
737  return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
738 }
739 
740 void dump_vmcs(struct kvm_vcpu *vcpu);
741 
742 static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
743 {
744  return (vmx_instr_info >> 28) & 0xf;
745 }
746 
747 static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
748 {
749  return lapic_in_kernel(vcpu) && enable_ipiv;
750 }
751 
752 #endif /* __KVM_X86_VMX_H */
bool __read_mostly enable_ept
Definition: vmx.c:91
bool __read_mostly enable_unrestricted_guest
Definition: vmx.c:94
bool __read_mostly enable_ipiv
Definition: vmx.c:109
static int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
Definition: cpuid.h:40
static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
#define KVM_POSSIBLE_CR0_GUEST_BITS
Definition: kvm_cache_regs.h:7
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
Definition: vmx.h:369
u64 * pid_table
Definition: vmx.h:376
struct kvm kvm
Definition: vmx.h:370
unsigned int tss_addr
Definition: vmx.h:372
bool ept_identity_pagetable_done
Definition: vmx.h:373
gpa_t ept_identity_map_addr
Definition: vmx.h:374
Definition: vmx.h:96
struct perf_event * event
Definition: vmx.h:106
struct x86_pmu_lbr records
Definition: vmx.h:98
bool msr_passthrough
Definition: vmx.h:109
struct hrtimer preemption_timer
Definition: vmx.h:210
struct vmcs12 * cached_shadow_vmcs12
Definition: vmx.h:135
u16 posted_intr_nv
Definition: vmx.h:208
bool vmxon
Definition: vmx.h:118
struct kvm_host_map virtual_apic_map
Definition: vmx.h:201
gpa_t current_vmptr
Definition: vmx.h:123
struct nested_vmx_msrs msrs
Definition: vmx.h:234
bool nested_run_pending
Definition: vmx.h:189
u16 vpid02
Definition: vmx.h:231
bool force_msr_bitmap_recalc
Definition: vmx.h:161
bool vmcs02_initialized
Definition: vmx.h:174
struct gfn_to_hva_cache vmcs12_cache
Definition: vmx.h:145
bool guest_mode
Definition: vmx.h:241
bool reload_vmcs01_apic_access_page
Definition: vmx.h:177
struct kvm_host_map msr_bitmap_map
Definition: vmx.h:204
int l1_tpr_threshold
Definition: vmx.h:229
bool has_preemption_timer_deadline
Definition: vmx.h:212
bool dirty_vmcs12
Definition: vmx.h:152
u64 preemption_timer_deadline
Definition: vmx.h:211
bool preemption_timer_expired
Definition: vmx.h:213
bool pi_pending
Definition: vmx.h:207
bool mtf_pending
Definition: vmx.h:192
bool pml_full
Definition: vmx.h:120
struct kvm_host_map pi_desc_map
Definition: vmx.h:202
struct gfn_to_hva_cache shadow_vmcs12_cache
Definition: vmx.h:140
struct pi_desc * pi_desc
Definition: vmx.h:206
bool need_vmcs12_to_shadow_sync
Definition: vmx.h:151
u64 pre_vmenter_debugctl
Definition: vmx.h:225
bool enlightened_vmcs_enabled
Definition: vmx.h:186
struct vmcs12 * cached_vmcs12
Definition: vmx.h:129
bool update_vmcs01_apicv_status
Definition: vmx.h:179
u64 pre_vmenter_bndcfgs
Definition: vmx.h:226
struct kvm_host_map apic_access_page_map
Definition: vmx.h:200
struct nested_vmx::@39 smm
bool update_vmcs01_cpu_dirty_logging
Definition: vmx.h:178
bool change_vmcs01_virtual_apic_mode
Definition: vmx.h:176
u16 last_vpid
Definition: vmx.h:232
struct loaded_vmcs vmcs02
Definition: vmx.h:194
gpa_t vmxon_ptr
Definition: vmx.h:119
bool need_sync_vmcs02_to_vmcs12_rare
Definition: vmx.h:167
Definition: vmx.h:55
u64 addr_a[RTIT_ADDR_RANGE]
Definition: vmx.h:61
u64 ctl
Definition: vmx.h:56
u64 addr_b[RTIT_ADDR_RANGE]
Definition: vmx.h:62
u64 output_mask
Definition: vmx.h:59
u64 status
Definition: vmx.h:57
u64 output_base
Definition: vmx.h:58
u64 cr3_match
Definition: vmx.h:60
Definition: vmx.h:65
u32 caps[PT_CPUID_REGS_NUM *PT_CPUID_LEAVES]
Definition: vmx.h:68
struct pt_ctx host
Definition: vmx.h:69
u64 ctl_bitmask
Definition: vmx.h:66
struct pt_ctx guest
Definition: vmx.h:70
u32 num_address_ranges
Definition: vmx.h:67
struct vmx_msrs host
Definition: vmx.h:296
struct vmx_msrs guest
Definition: vmx.h:295
struct vmx_msrs guest
Definition: vmx.h:300
Definition: vmx.h:251
u32 exit_intr_info
Definition: vmx.h:266
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]
Definition: vmx.h:276
int vpid
Definition: vmx.h:317
u32 limit
Definition: vmx.h:313
ulong rflags
Definition: vmx.h:268
struct vcpu_vmx::msr_autostore msr_autostore
unsigned long base
Definition: vmx.h:312
struct kvm_segment segs[8]
Definition: vmx.h:306
u8 x2apic_msr_bitmap_mode
Definition: vmx.h:254
struct vcpu_vmx::@41 segment_cache
u32 ar
Definition: vmx.h:314
struct loaded_vmcs vmcs01
Definition: vmx.h:291
u64 msr_ia32_mcu_opt_ctrl
Definition: vmx.h:355
ulong save_rflags
Definition: vmx.h:305
unsigned int ple_window
Definition: vmx.h:332
struct list_head pi_wakeup_list
Definition: vmx.h:326
u64 msr_ia32_feature_control_valid_bits
Definition: vmx.h:352
struct vcpu_vmx::@40 rmode
u32 bitmask
Definition: vmx.h:309
struct loaded_vmcs * loaded_vmcs
Definition: vmx.h:292
bool ple_window_dirty
Definition: vmx.h:333
bool emulation_required
Definition: vmx.h:318
u64 msr_ia32_sgxlepubkeyhash[4]
Definition: vmx.h:354
u64 spec_ctrl
Definition: vmx.h:283
struct page * pml_pg
Definition: vmx.h:339
struct kvm_vcpu vcpu
Definition: vmx.h:252
struct nested_vmx nested
Definition: vmx.h:329
struct lbr_desc lbr_desc
Definition: vmx.h:359
u8 fail
Definition: vmx.h:253
unsigned long host_debugctlmsr
Definition: vmx.h:344
u16 selector
Definition: vmx.h:311
bool disable_fb_clear
Definition: vmx.h:356
struct vcpu_vmx::msr_autoload msr_autoload
int vm86_active
Definition: vmx.h:304
u64 hv_deadline_tsc
Definition: vmx.h:342
struct vcpu_vmx::@42 shadow_msr_intercept
bool guest_uret_msrs_loaded
Definition: vmx.h:277
bool req_immediate_exit
Definition: vmx.h:335
struct vcpu_vmx::@41::kvm_save_segment seg[8]
u64 msr_ia32_feature_control
Definition: vmx.h:351
bool guest_state_loaded
Definition: vmx.h:263
u32 idt_vectoring_info
Definition: vmx.h:267
unsigned long exit_qualification
Definition: vmx.h:265
u32 msr_ia32_umwait_control
Definition: vmx.h:284
union vmx_exit_reason exit_reason
Definition: vmx.h:320
Definition: vmcs12.h:27
Definition: vmcs.h:21
Definition: vmx.h:33
unsigned int nr
Definition: vmx.h:34
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
Definition: vmx.h:35
u64 mask
Definition: vmx.h:41
bool load_into_hardware
Definition: vmx.h:39
u64 data
Definition: vmx.h:40
u32 reserved18
Definition: vmx.h:78
u32 reserved17
Definition: vmx.h:77
u32 reserved24
Definition: vmx.h:84
u32 full
Definition: vmx.h:93
u32 reserved23
Definition: vmx.h:83
u32 bus_lock_detected
Definition: vmx.h:86
u32 reserved22
Definition: vmx.h:82
u32 reserved30
Definition: vmx.h:90
u32 reserved21
Definition: vmx.h:81
u32 smi_from_vmx_root
Definition: vmx.h:89
u32 smi_pending_mtf
Definition: vmx.h:88
u32 reserved16
Definition: vmx.h:76
u32 enclave_mode
Definition: vmx.h:87
u32 basic
Definition: vmx.h:75
u32 reserved25
Definition: vmx.h:85
u32 failed_vmentry
Definition: vmx.h:91
u32 reserved20
Definition: vmx.h:80
u32 reserved19
Definition: vmx.h:79
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS)
bool __read_mostly allow_smaller_maxphyaddr
Definition: x86.c:232
static __always_inline struct kvm_vmx * to_kvm_vmx(struct kvm *kvm)
Definition: vmx.h:652
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: vmx.c:3572
static bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
Definition: vmx.h:719
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
Definition: vmx.c:969
segment_cache_field
Definition: vmx.h:44
@ SEG_FIELD_BASE
Definition: vmx.h:46
@ SEG_FIELD_SEL
Definition: vmx.h:45
@ SEG_FIELD_NR
Definition: vmx.h:50
@ SEG_FIELD_AR
Definition: vmx.h:48
@ SEG_FIELD_LIMIT
Definition: vmx.h:47
void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags)
Definition: vmx.c:7192
int allocate_vpid(void)
Definition: vmx.c:3919
void ept_save_pdptrs(struct kvm_vcpu *vcpu)
Definition: vmx.c:3246
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, unsigned int flags)
void free_vmcs(struct vmcs *vmcs)
Definition: vmx.c:2885
void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
Definition: vmx.c:4296
static bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
Definition: vmx.h:747
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Definition: vmx.c:1561
static bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
Definition: vmx.h:713
static struct vmcs * alloc_vmcs(bool shadow)
Definition: vmx.h:707
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
Definition: vmx.c:1504
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
Definition: vmx.c:3115
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
Definition: vmx.c:5050
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
Definition: vmx.c:4006
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
Definition: vmx.c:6694
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
Definition: vmx.h:681
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:2905
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
Definition: vmx.c:1897
static struct x86_pmu_lbr * vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
Definition: vmx.h:667
#define RTIT_ADDR_RANGE
Definition: vmx.h:53
#define MAX_NR_USER_RETURN_MSRS
Definition: vmx.h:28
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
Definition: vmx.c:944
static void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
Definition: vmx.h:427
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
Definition: vmx.c:4991
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, unsigned long fs_base, unsigned long gs_base)
Definition: vmx.c:1255
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
Definition: vmx.c:874
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
Definition: vmx.h:657
static int vmx_get_instr_info_reg2(u32 vmx_instr_info)
Definition: vmx.h:742
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
Definition: pmu_intel.c:746
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
Definition: vmx.c:3796
static bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
Definition: vmx.h:727
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, struct loaded_vmcs *buddy)
Definition: vmx.c:1415
#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)
Definition: vmx.h:458
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
Definition: vmx.c:4363
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
Definition: vmx.c:1282
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
Definition: vmx.c:3371
static unsigned long vmx_l1_guest_owned_cr0_bits(void)
Definition: vmx.h:634
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
Definition: vmx.c:3231
struct vmx_uret_msr * vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
Definition: vmx.c:713
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
Definition: vmx.c:3962
#define MAX_POSSIBLE_PASSTHROUGH_MSRS
Definition: vmx.h:362
int vmx_get_cpl(struct kvm_vcpu *vcpu)
Definition: vmx.c:3543
#define BUILD_CONTROLS_SHADOW(lname, uname, bits)
Definition: vmx.h:585
static struct lbr_desc * vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
Definition: vmx.h:662
struct vmcs * alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
Definition: vmx.c:2862
static bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
Definition: vmx.h:672
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:713
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
Definition: vmx.c:8110
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
Definition: vmx.c:5174
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
Definition: vmx.c:5005
static bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
Definition: vmx.h:735
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
Definition: vmx.c:1907
void dump_vmcs(struct kvm_vcpu *vcpu)
Definition: vmx.c:6232
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
Definition: vmx.c:4098
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
Definition: vmx.c:8250
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Definition: vmx.c:3275
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: vmx.c:3496
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:814
static u8 vmx_get_rvi(void)
Definition: vmx.h:466
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Definition: vmx.c:3432
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
Definition: vmx.c:1509
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
Definition: vmx.c:1574
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
Definition: vmx.c:7184
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
Definition: vmx.c:1527
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
Definition: vmx.c:5025
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:254
void free_vpid(int vpid)
Definition: vmx.c:3935
#define MAX_NR_LOADSTORE_MSRS
Definition: vmx.h:31
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
Definition: vmx.h:691
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
Definition: vmx.c:2893
static __always_inline u32 vmcs_read32(unsigned long field)
Definition: vmx_ops.h:161
static __always_inline unsigned long vmcs_readl(unsigned long field)
Definition: vmx_ops.h:181
static __always_inline u16 vmcs_read16(unsigned long field)
Definition: vmx_ops.h:153
uint32_t flags
Definition: xen.c:1