KVM
Classes | Macros | Enumerations | Functions | Variables
x86.h File Reference
#include <linux/kvm_host.h>
#include <asm/fpu/xstate.h>
#include <asm/mce.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
Include dependency graph for x86.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  kvm_caps
 

Macros

#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)
 
#define KVM_FIRST_EMULATED_VMX_MSR   MSR_IA32_VMX_BASIC
 
#define KVM_LAST_EMULATED_VMX_MSR   MSR_IA32_VMX_VMFUNC
 
#define KVM_DEFAULT_PLE_GAP   128
 
#define KVM_VMX_DEFAULT_PLE_WINDOW   4096
 
#define KVM_DEFAULT_PLE_WINDOW_GROW   2
 
#define KVM_DEFAULT_PLE_WINDOW_SHRINK   0
 
#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX   UINT_MAX
 
#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX   USHRT_MAX
 
#define KVM_SVM_DEFAULT_PLE_WINDOW   3000
 
#define MSR_IA32_CR_PAT_DEFAULT   0x0007040600070406ULL
 
#define MMIO_GVA_ANY   (~(gva_t)0)
 
#define do_shl32_div32(n, base)
 
#define KVM_MSR_RET_INVALID   2 /* in-kernel MSR emulation #GP condition */
 
#define KVM_MSR_RET_FILTERED   3 /* #GP due to userspace MSR filter */
 
#define __cr4_reserved_bits(__cpu_has, __c)
 

Enumerations

enum  kvm_intr_type { KVM_HANDLING_IRQ = 1 , KVM_HANDLING_NMI }
 

Functions

void kvm_spurious_fault (void)
 
static unsigned int __grow_ple_window (unsigned int val, unsigned int base, unsigned int modifier, unsigned int max)
 
static unsigned int __shrink_ple_window (unsigned int val, unsigned int base, unsigned int modifier, unsigned int min)
 
void kvm_service_local_tlb_flush_requests (struct kvm_vcpu *vcpu)
 
int kvm_check_nested_events (struct kvm_vcpu *vcpu)
 
static bool kvm_vcpu_has_run (struct kvm_vcpu *vcpu)
 
static bool kvm_is_exception_pending (struct kvm_vcpu *vcpu)
 
static void kvm_clear_exception_queue (struct kvm_vcpu *vcpu)
 
static void kvm_queue_interrupt (struct kvm_vcpu *vcpu, u8 vector, bool soft)
 
static void kvm_clear_interrupt_queue (struct kvm_vcpu *vcpu)
 
static bool kvm_event_needs_reinjection (struct kvm_vcpu *vcpu)
 
static bool kvm_exception_is_soft (unsigned int nr)
 
static bool is_protmode (struct kvm_vcpu *vcpu)
 
static bool is_long_mode (struct kvm_vcpu *vcpu)
 
static bool is_64_bit_mode (struct kvm_vcpu *vcpu)
 
static bool is_64_bit_hypercall (struct kvm_vcpu *vcpu)
 
static bool x86_exception_has_error_code (unsigned int vector)
 
static bool mmu_is_nested (struct kvm_vcpu *vcpu)
 
static bool is_pae (struct kvm_vcpu *vcpu)
 
static bool is_pse (struct kvm_vcpu *vcpu)
 
static bool is_paging (struct kvm_vcpu *vcpu)
 
static bool is_pae_paging (struct kvm_vcpu *vcpu)
 
static u8 vcpu_virt_addr_bits (struct kvm_vcpu *vcpu)
 
static bool is_noncanonical_address (u64 la, struct kvm_vcpu *vcpu)
 
static void vcpu_cache_mmio_info (struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access)
 
static bool vcpu_match_mmio_gen (struct kvm_vcpu *vcpu)
 
static void vcpu_clear_mmio_info (struct kvm_vcpu *vcpu, gva_t gva)
 
static bool vcpu_match_mmio_gva (struct kvm_vcpu *vcpu, unsigned long gva)
 
static bool vcpu_match_mmio_gpa (struct kvm_vcpu *vcpu, gpa_t gpa)
 
static unsigned long kvm_register_read (struct kvm_vcpu *vcpu, int reg)
 
static void kvm_register_write (struct kvm_vcpu *vcpu, int reg, unsigned long val)
 
static bool kvm_check_has_quirk (struct kvm *kvm, u64 quirk)
 
void kvm_inject_realmode_interrupt (struct kvm_vcpu *vcpu, int irq, int inc_eip)
 
u64 get_kvmclock_ns (struct kvm *kvm)
 
uint64_t kvm_get_wall_clock_epoch (struct kvm *kvm)
 
int kvm_read_guest_virt (struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
 
int kvm_write_guest_virt_system (struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
 
int handle_ud (struct kvm_vcpu *vcpu)
 
void kvm_deliver_exception_payload (struct kvm_vcpu *vcpu, struct kvm_queued_exception *ex)
 
void kvm_vcpu_mtrr_init (struct kvm_vcpu *vcpu)
 
u8 kvm_mtrr_get_guest_memory_type (struct kvm_vcpu *vcpu, gfn_t gfn)
 
int kvm_mtrr_set_msr (struct kvm_vcpu *vcpu, u32 msr, u64 data)
 
int kvm_mtrr_get_msr (struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 
bool kvm_mtrr_check_gfn_range_consistency (struct kvm_vcpu *vcpu, gfn_t gfn, int page_num)
 
bool kvm_vector_hashing_enabled (void)
 
void kvm_fixup_and_inject_pf_error (struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
 
int x86_decode_emulated_instruction (struct kvm_vcpu *vcpu, int emulation_type, void *insn, int insn_len)
 
int x86_emulate_instruction (struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len)
 
fastpath_t handle_fastpath_set_msr_irqoff (struct kvm_vcpu *vcpu)
 
static u64 kvm_get_filtered_xcr0 (void)
 
static bool kvm_mpx_supported (void)
 
static void kvm_pr_unimpl_wrmsr (struct kvm_vcpu *vcpu, u32 msr, u64 data)
 
static void kvm_pr_unimpl_rdmsr (struct kvm_vcpu *vcpu, u32 msr)
 
static u64 nsec_to_cycles (struct kvm_vcpu *vcpu, u64 nsec)
 
static bool kvm_mwait_in_guest (struct kvm *kvm)
 
static bool kvm_hlt_in_guest (struct kvm *kvm)
 
static bool kvm_pause_in_guest (struct kvm *kvm)
 
static bool kvm_cstate_in_guest (struct kvm *kvm)
 
static bool kvm_notify_vmexit_enabled (struct kvm *kvm)
 
static __always_inline void kvm_before_interrupt (struct kvm_vcpu *vcpu, enum kvm_intr_type intr)
 
static __always_inline void kvm_after_interrupt (struct kvm_vcpu *vcpu)
 
static bool kvm_handling_nmi_from_guest (struct kvm_vcpu *vcpu)
 
static bool kvm_pat_valid (u64 data)
 
static bool kvm_dr7_valid (u64 data)
 
static bool kvm_dr6_valid (u64 data)
 
static void kvm_machine_check (void)
 
void kvm_load_guest_xsave_state (struct kvm_vcpu *vcpu)
 
void kvm_load_host_xsave_state (struct kvm_vcpu *vcpu)
 
int kvm_spec_ctrl_test_value (u64 value)
 
bool __kvm_is_valid_cr4 (struct kvm_vcpu *vcpu, unsigned long cr4)
 
int kvm_handle_memory_failure (struct kvm_vcpu *vcpu, int r, struct x86_exception *e)
 
int kvm_handle_invpcid (struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
 
bool kvm_msr_allowed (struct kvm_vcpu *vcpu, u32 index, u32 type)
 
int kvm_sev_es_mmio_write (struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, void *dst)
 
int kvm_sev_es_mmio_read (struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, void *dst)
 
int kvm_sev_es_string_io (struct kvm_vcpu *vcpu, unsigned int size, unsigned int port, void *data, unsigned int count, int in)
 

Variables

u64 host_xcr0
 
u64 host_xss
 
u64 host_arch_capabilities
 
struct kvm_caps kvm_caps
 
bool enable_pmu
 
unsigned int min_timer_period_us
 
bool enable_vmware_backdoor
 
int pi_inject_timer
 
bool report_ignored_msrs
 
bool eager_page_split
 

Macro Definition Documentation

◆ __cr4_reserved_bits

#define __cr4_reserved_bits (   __cpu_has,
  __c 
)
Value:
({ \
u64 __reserved_bits = CR4_RESERVED_BITS; \
\
if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
__reserved_bits |= X86_CR4_OSXSAVE; \
if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
__reserved_bits |= X86_CR4_SMEP; \
if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
__reserved_bits |= X86_CR4_SMAP; \
if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
__reserved_bits |= X86_CR4_FSGSBASE; \
if (!__cpu_has(__c, X86_FEATURE_PKU)) \
__reserved_bits |= X86_CR4_PKE; \
if (!__cpu_has(__c, X86_FEATURE_LA57)) \
__reserved_bits |= X86_CR4_LA57; \
if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
__reserved_bits |= X86_CR4_UMIP; \
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
__reserved_bits |= X86_CR4_VMXE; \
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
__reserved_bits |= X86_CR4_PCIDE; \
if (!__cpu_has(__c, X86_FEATURE_LAM)) \
__reserved_bits |= X86_CR4_LAM_SUP; \
__reserved_bits; \
})

Definition at line 511 of file x86.h.

◆ do_shl32_div32

#define do_shl32_div32 (   n,
  base 
)
Value:
({ \
u32 __quot, __rem; \
asm("divl %2" : "=a" (__quot), "=d" (__rem) \
: "rm" (base), "0" (0), "1" ((u32) n)); \
n = __quot; \
__rem; \
})

Definition at line 400 of file x86.h.

◆ KVM_DEFAULT_PLE_GAP

#define KVM_DEFAULT_PLE_GAP   128

Definition at line 52 of file x86.h.

◆ KVM_DEFAULT_PLE_WINDOW_GROW

#define KVM_DEFAULT_PLE_WINDOW_GROW   2

Definition at line 54 of file x86.h.

◆ KVM_DEFAULT_PLE_WINDOW_SHRINK

#define KVM_DEFAULT_PLE_WINDOW_SHRINK   0

Definition at line 55 of file x86.h.

◆ KVM_FIRST_EMULATED_VMX_MSR

#define KVM_FIRST_EMULATED_VMX_MSR   MSR_IA32_VMX_BASIC

Definition at line 49 of file x86.h.

◆ KVM_LAST_EMULATED_VMX_MSR

#define KVM_LAST_EMULATED_VMX_MSR   MSR_IA32_VMX_VMFUNC

Definition at line 50 of file x86.h.

◆ KVM_MSR_RET_FILTERED

#define KVM_MSR_RET_FILTERED   3 /* #GP due to userspace MSR filter */

Definition at line 509 of file x86.h.

◆ KVM_MSR_RET_INVALID

#define KVM_MSR_RET_INVALID   2 /* in-kernel MSR emulation #GP condition */

Definition at line 508 of file x86.h.

◆ KVM_NESTED_VMENTER_CONSISTENCY_CHECK

#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK (   consistency_check)
Value:
({ \
bool failed = (consistency_check); \
if (failed) \
trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
failed; \
})

Definition at line 36 of file x86.h.

◆ KVM_SVM_DEFAULT_PLE_WINDOW

#define KVM_SVM_DEFAULT_PLE_WINDOW   3000

Definition at line 58 of file x86.h.

◆ KVM_SVM_DEFAULT_PLE_WINDOW_MAX

#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX   USHRT_MAX

Definition at line 57 of file x86.h.

◆ KVM_VMX_DEFAULT_PLE_WINDOW

#define KVM_VMX_DEFAULT_PLE_WINDOW   4096

Definition at line 53 of file x86.h.

◆ KVM_VMX_DEFAULT_PLE_WINDOW_MAX

#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX   UINT_MAX

Definition at line 56 of file x86.h.

◆ MMIO_GVA_ANY

#define MMIO_GVA_ANY   (~(gva_t)0)

Definition at line 245 of file x86.h.

◆ MSR_IA32_CR_PAT_DEFAULT

#define MSR_IA32_CR_PAT_DEFAULT   0x0007040600070406ULL

Definition at line 90 of file x86.h.

Enumeration Type Documentation

◆ kvm_intr_type

Enumerator
KVM_HANDLING_IRQ 
KVM_HANDLING_NMI 

Definition at line 434 of file x86.h.

434  {
435  /* Values are arbitrary, but must be non-zero. */
436  KVM_HANDLING_IRQ = 1,
438 };
@ KVM_HANDLING_IRQ
Definition: x86.h:436
@ KVM_HANDLING_NMI
Definition: x86.h:437

Function Documentation

◆ __grow_ple_window()

static unsigned int __grow_ple_window ( unsigned int  val,
unsigned int  base,
unsigned int  modifier,
unsigned int  max 
)
inlinestatic

Definition at line 60 of file x86.h.

62 {
63  u64 ret = val;
64 
65  if (modifier < 1)
66  return base;
67 
68  if (modifier < base)
69  ret *= modifier;
70  else
71  ret += modifier;
72 
73  return min(ret, (u64)max);
74 }
Here is the caller graph for this function:

◆ __kvm_is_valid_cr4()

bool __kvm_is_valid_cr4 ( struct kvm_vcpu *  vcpu,
unsigned long  cr4 
)

Definition at line 1132 of file x86.c.

1133 {
1134  if (cr4 & cr4_reserved_bits)
1135  return false;
1136 
1137  if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
1138  return false;
1139 
1140  return true;
1141 }
static u64 __read_mostly cr4_reserved_bits
Definition: x86.c:116
Here is the caller graph for this function:

◆ __shrink_ple_window()

static unsigned int __shrink_ple_window ( unsigned int  val,
unsigned int  base,
unsigned int  modifier,
unsigned int  min 
)
inlinestatic

Definition at line 76 of file x86.h.

78 {
79  if (modifier < 1)
80  return base;
81 
82  if (modifier < base)
83  val /= modifier;
84  else
85  val -= modifier;
86 
87  return max(val, min);
88 }
Here is the caller graph for this function:

◆ get_kvmclock_ns()

u64 get_kvmclock_ns ( struct kvm *  kvm)

Definition at line 3105 of file x86.c.

3106 {
3107  struct kvm_clock_data data;
3108 
3109  get_kvmclock(kvm, &data);
3110  return data.clock;
3111 }
static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
Definition: x86.c:3094
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_fastpath_set_msr_irqoff()

fastpath_t handle_fastpath_set_msr_irqoff ( struct kvm_vcpu *  vcpu)

Definition at line 2185 of file x86.c.

2186 {
2187  u32 msr = kvm_rcx_read(vcpu);
2188  u64 data;
2189  fastpath_t ret = EXIT_FASTPATH_NONE;
2190 
2191  kvm_vcpu_srcu_read_lock(vcpu);
2192 
2193  switch (msr) {
2194  case APIC_BASE_MSR + (APIC_ICR >> 4):
2195  data = kvm_read_edx_eax(vcpu);
2196  if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
2198  ret = EXIT_FASTPATH_EXIT_HANDLED;
2199  }
2200  break;
2201  case MSR_IA32_TSC_DEADLINE:
2202  data = kvm_read_edx_eax(vcpu);
2203  if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
2205  ret = EXIT_FASTPATH_REENTER_GUEST;
2206  }
2207  break;
2208  default:
2209  break;
2210  }
2211 
2212  if (ret != EXIT_FASTPATH_NONE)
2213  trace_kvm_msr_write(msr, data);
2214 
2215  kvm_vcpu_srcu_read_unlock(vcpu);
2216 
2217  return ret;
2218 }
static u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
#define trace_kvm_msr_write(ecx, data)
Definition: trace.h:450
static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
Definition: x86.c:2176
static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
Definition: x86.c:2162
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
Definition: x86.c:8916
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_ud()

int handle_ud ( struct kvm_vcpu *  vcpu)

Definition at line 7669 of file x86.c.

7670 {
7671  static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
7672  int fep_flags = READ_ONCE(force_emulation_prefix);
7673  int emul_type = EMULTYPE_TRAP_UD;
7674  char sig[5]; /* ud2; .ascii "kvm" */
7675  struct x86_exception e;
7676  int r;
7677 
7678  r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0);
7679  if (r != X86EMUL_CONTINUE)
7680  return 1;
7681 
7682  if (fep_flags &&
7684  sig, sizeof(sig), &e) == 0 &&
7685  memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
7686  if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
7687  kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
7688  kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
7689  emul_type = EMULTYPE_TRAP_UD_FORCED;
7690  }
7691 
7692  return kvm_emulate_instruction(vcpu, emul_type);
7693 }
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
#define X86EMUL_CONTINUE
Definition: kvm_emulate.h:81
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
Definition: x86.c:13151
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
Definition: x86.c:9262
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
Definition: x86.c:13189
static int __read_mostly force_emulation_prefix
Definition: x86.c:185
static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len)
Definition: x86.c:7662
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
Definition: x86.c:7572
#define KVM_FEP_CLEAR_RFLAGS_RF
Definition: x86.c:184
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
Definition: x86.c:13170
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_64_bit_hypercall()

static bool is_64_bit_hypercall ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 164 of file x86.h.

165 {
166  /*
167  * If running with protected guest state, the CS register is not
168  * accessible. The hypercall register values will have had to been
169  * provided in 64-bit mode, so assume the guest is in 64-bit.
170  */
171  return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
172 }
static bool is_64_bit_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:152
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_64_bit_mode()

static bool is_64_bit_mode ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 152 of file x86.h.

153 {
154  int cs_db, cs_l;
155 
156  WARN_ON_ONCE(vcpu->arch.guest_state_protected);
157 
158  if (!is_long_mode(vcpu))
159  return false;
160  static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
161  return cs_l;
162 }
static bool is_long_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:143
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_long_mode()

static bool is_long_mode ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 143 of file x86.h.

144 {
145 #ifdef CONFIG_X86_64
146  return !!(vcpu->arch.efer & EFER_LMA);
147 #else
148  return false;
149 #endif
150 }
Here is the caller graph for this function:

◆ is_noncanonical_address()

static bool is_noncanonical_address ( u64  la,
struct kvm_vcpu *  vcpu 
)
inlinestatic

Definition at line 213 of file x86.h.

214 {
215  return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
216 }
static u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
Definition: x86.h:208
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_pae()

static bool is_pae ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 188 of file x86.h.

189 {
190  return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
191 }
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_pae_paging()

static bool is_pae_paging ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 203 of file x86.h.

204 {
205  return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
206 }
static bool is_paging(struct kvm_vcpu *vcpu)
Definition: x86.h:198
static bool is_pae(struct kvm_vcpu *vcpu)
Definition: x86.h:188
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_paging()

static bool is_paging ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 198 of file x86.h.

199 {
200  return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
201 }
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_protmode()

static bool is_protmode ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 138 of file x86.h.

139 {
140  return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
141 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_pse()

static bool is_pse ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 193 of file x86.h.

194 {
195  return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
196 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_after_interrupt()

static __always_inline void kvm_after_interrupt ( struct kvm_vcpu *  vcpu)
static

Definition at line 446 of file x86.h.

447 {
448  WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
449 }
Here is the caller graph for this function:

◆ kvm_before_interrupt()

static __always_inline void kvm_before_interrupt ( struct kvm_vcpu *  vcpu,
enum kvm_intr_type  intr 
)
static

Definition at line 440 of file x86.h.

442 {
443  WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
444 }
Here is the caller graph for this function:

◆ kvm_check_has_quirk()

static bool kvm_check_has_quirk ( struct kvm *  kvm,
u64  quirk 
)
inlinestatic

Definition at line 288 of file x86.h.

289 {
290  return !(kvm->arch.disabled_quirks & quirk);
291 }
Here is the caller graph for this function:

◆ kvm_check_nested_events()

int kvm_check_nested_events ( struct kvm_vcpu *  vcpu)

Definition at line 10197 of file x86.c.

10198 {
10199  if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10200  kvm_x86_ops.nested_ops->triple_fault(vcpu);
10201  return 1;
10202  }
10203 
10204  return kvm_x86_ops.nested_ops->check_events(vcpu);
10205 }
Here is the caller graph for this function:

◆ kvm_clear_exception_queue()

static void kvm_clear_exception_queue ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 107 of file x86.h.

108 {
109  vcpu->arch.exception.pending = false;
110  vcpu->arch.exception.injected = false;
111  vcpu->arch.exception_vmexit.pending = false;
112 }
Here is the caller graph for this function:

◆ kvm_clear_interrupt_queue()

static void kvm_clear_interrupt_queue ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 122 of file x86.h.

123 {
124  vcpu->arch.interrupt.injected = false;
125 }
Here is the caller graph for this function:

◆ kvm_cstate_in_guest()

static bool kvm_cstate_in_guest ( struct kvm *  kvm)
inlinestatic

Definition at line 424 of file x86.h.

425 {
426  return kvm->arch.cstate_in_guest;
427 }
Here is the caller graph for this function:

◆ kvm_deliver_exception_payload()

void kvm_deliver_exception_payload ( struct kvm_vcpu *  vcpu,
struct kvm_queued_exception *  ex 
)

Definition at line 573 of file x86.c.

575 {
576  if (!ex->has_payload)
577  return;
578 
579  switch (ex->vector) {
580  case DB_VECTOR:
581  /*
582  * "Certain debug exceptions may clear bit 0-3. The
583  * remaining contents of the DR6 register are never
584  * cleared by the processor".
585  */
586  vcpu->arch.dr6 &= ~DR_TRAP_BITS;
587  /*
588  * In order to reflect the #DB exception payload in guest
589  * dr6, three components need to be considered: active low
590  * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
591  * DR6_BS and DR6_BT)
592  * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
593  * In the target guest dr6:
594  * FIXED_1 bits should always be set.
595  * Active low bits should be cleared if 1-setting in payload.
596  * Active high bits should be set if 1-setting in payload.
597  *
598  * Note, the payload is compatible with the pending debug
599  * exceptions/exit qualification under VMX, that active_low bits
600  * are active high in payload.
601  * So they need to be flipped for DR6.
602  */
603  vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
604  vcpu->arch.dr6 |= ex->payload;
605  vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
606 
607  /*
608  * The #DB payload is defined as compatible with the 'pending
609  * debug exceptions' field under VMX, not DR6. While bit 12 is
610  * defined in the 'pending debug exceptions' field (enabled
611  * breakpoint), it is reserved and must be zero in DR6.
612  */
613  vcpu->arch.dr6 &= ~BIT(12);
614  break;
615  case PF_VECTOR:
616  vcpu->arch.cr2 = ex->payload;
617  break;
618  }
619 
620  ex->has_payload = false;
621  ex->payload = 0;
622 }
Here is the caller graph for this function:

◆ kvm_dr6_valid()

static bool kvm_dr6_valid ( u64  data)
inlinestatic

Definition at line 469 of file x86.h.

470 {
471  /* Bits [63:32] are reserved */
472  return !(data >> 32);
473 }
Here is the caller graph for this function:

◆ kvm_dr7_valid()

static bool kvm_dr7_valid ( u64  data)
inlinestatic

Definition at line 464 of file x86.h.

465 {
466  /* Bits [63:32] are reserved */
467  return !(data >> 32);
468 }
Here is the caller graph for this function:

◆ kvm_event_needs_reinjection()

static bool kvm_event_needs_reinjection ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 127 of file x86.h.

128 {
129  return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
130  vcpu->arch.nmi_injected;
131 }
Here is the caller graph for this function:

◆ kvm_exception_is_soft()

static bool kvm_exception_is_soft ( unsigned int  nr)
inlinestatic

Definition at line 133 of file x86.h.

134 {
135  return (nr == BP_VECTOR) || (nr == OF_VECTOR);
136 }
Here is the caller graph for this function:

◆ kvm_fixup_and_inject_pf_error()

void kvm_fixup_and_inject_pf_error ( struct kvm_vcpu *  vcpu,
gva_t  gva,
u16  error_code 
)

Definition at line 13558 of file x86.c.

13559 {
13560  struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
13561  struct x86_exception fault;
13562  u64 access = error_code &
13563  (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
13564 
13565  if (!(error_code & PFERR_PRESENT_MASK) ||
13566  mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
13567  /*
13568  * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
13569  * tables probably do not match the TLB. Just proceed
13570  * with the error code that the processor gave.
13571  */
13572  fault.vector = PF_VECTOR;
13573  fault.error_code_valid = true;
13574  fault.error_code = error_code;
13575  fault.nested_page_fault = false;
13576  fault.address = gva;
13577  fault.async_page_fault = false;
13578  }
13579  vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
13580 }
Here is the caller graph for this function:

◆ kvm_get_filtered_xcr0()

static u64 kvm_get_filtered_xcr0 ( void  )
inlinestatic

Definition at line 341 of file x86.h.

342 {
343  u64 permitted_xcr0 = kvm_caps.supported_xcr0;
344 
345  BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
346 
347  if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
348  permitted_xcr0 &= xstate_get_guest_group_perm();
349 
350  /*
351  * Treat XTILE_CFG as unsupported if the current process isn't
352  * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
353  * XCR0 without setting XTILE_DATA is architecturally illegal.
354  */
355  if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
356  permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
357  }
358  return permitted_xcr0;
359 }
Definition: x86.h:12
u64 supported_xcr0
Definition: x86.h:29
Here is the caller graph for this function:

◆ kvm_get_wall_clock_epoch()

uint64_t kvm_get_wall_clock_epoch ( struct kvm *  kvm)

Definition at line 3298 of file x86.c.

3299 {
3300 #ifdef CONFIG_X86_64
3301  struct pvclock_vcpu_time_info hv_clock;
3302  struct kvm_arch *ka = &kvm->arch;
3303  unsigned long seq, local_tsc_khz;
3304  struct timespec64 ts;
3305  uint64_t host_tsc;
3306 
3307  do {
3308  seq = read_seqcount_begin(&ka->pvclock_sc);
3309 
3310  local_tsc_khz = 0;
3311  if (!ka->use_master_clock)
3312  break;
3313 
3314  /*
3315  * The TSC read and the call to get_cpu_tsc_khz() must happen
3316  * on the same CPU.
3317  */
3318  get_cpu();
3319 
3320  local_tsc_khz = get_cpu_tsc_khz();
3321 
3322  if (local_tsc_khz &&
3323  !kvm_get_walltime_and_clockread(&ts, &host_tsc))
3324  local_tsc_khz = 0; /* Fall back to old method */
3325 
3326  put_cpu();
3327 
3328  /*
3329  * These values must be snapshotted within the seqcount loop.
3330  * After that, it's just mathematics which can happen on any
3331  * CPU at any time.
3332  */
3333  hv_clock.tsc_timestamp = ka->master_cycle_now;
3334  hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3335 
3336  } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3337 
3338  /*
3339  * If the conditions were right, and obtaining the wallclock+TSC was
3340  * successful, calculate the KVM clock at the corresponding time and
3341  * subtract one from the other to get the guest's epoch in nanoseconds
3342  * since 1970-01-01.
3343  */
3344  if (local_tsc_khz) {
3345  kvm_get_time_scale(NSEC_PER_SEC, local_tsc_khz * NSEC_PER_USEC,
3346  &hv_clock.tsc_shift,
3347  &hv_clock.tsc_to_system_mul);
3348  return ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec -
3349  __pvclock_read_cycles(&hv_clock, host_tsc);
3350  }
3351 #endif
3352  return ktime_get_real_ns() - get_kvmclock_ns(kvm);
3353 }
static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, s8 *pshift, u32 *pmultiplier)
Definition: x86.c:2388
u64 get_kvmclock_ns(struct kvm *kvm)
Definition: x86.c:3105
static unsigned long get_cpu_tsc_khz(void)
Definition: x86.c:3050
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_handle_invpcid()

int kvm_handle_invpcid ( struct kvm_vcpu *  vcpu,
unsigned long  type,
gva_t  gva 
)

Definition at line 13612 of file x86.c.

13613 {
13614  bool pcid_enabled;
13615  struct x86_exception e;
13616  struct {
13617  u64 pcid;
13618  u64 gla;
13619  } operand;
13620  int r;
13621 
13622  r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
13623  if (r != X86EMUL_CONTINUE)
13624  return kvm_handle_memory_failure(vcpu, r, &e);
13625 
13626  if (operand.pcid >> 12 != 0) {
13627  kvm_inject_gp(vcpu, 0);
13628  return 1;
13629  }
13630 
13631  pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
13632 
13633  switch (type) {
13634  case INVPCID_TYPE_INDIV_ADDR:
13635  /*
13636  * LAM doesn't apply to addresses that are inputs to TLB
13637  * invalidation.
13638  */
13639  if ((!pcid_enabled && (operand.pcid != 0)) ||
13640  is_noncanonical_address(operand.gla, vcpu)) {
13641  kvm_inject_gp(vcpu, 0);
13642  return 1;
13643  }
13644  kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
13645  return kvm_skip_emulated_instruction(vcpu);
13646 
13647  case INVPCID_TYPE_SINGLE_CTXT:
13648  if (!pcid_enabled && (operand.pcid != 0)) {
13649  kvm_inject_gp(vcpu, 0);
13650  return 1;
13651  }
13652 
13653  kvm_invalidate_pcid(vcpu, operand.pcid);
13654  return kvm_skip_emulated_instruction(vcpu);
13655 
13656  case INVPCID_TYPE_ALL_NON_GLOBAL:
13657  /*
13658  * Currently, KVM doesn't mark global entries in the shadow
13659  * page tables, so a non-global flush just degenerates to a
13660  * global flush. If needed, we could optimize this later by
13661  * keeping track of global entries in shadow page tables.
13662  */
13663 
13664  fallthrough;
13665  case INVPCID_TYPE_ALL_INCL_GLOBAL:
13666  kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13667  return kvm_skip_emulated_instruction(vcpu);
13668 
13669  default:
13670  kvm_inject_gp(vcpu, 0);
13671  return 1;
13672  }
13673 }
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
Definition: mmu.c:5986
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e)
Definition: x86.c:13588
static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
Definition: x86.c:1223
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
Definition: x86.h:213
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_handle_memory_failure()

int kvm_handle_memory_failure ( struct kvm_vcpu *  vcpu,
int  r,
struct x86_exception e 
)

Definition at line 13588 of file x86.c.

13590 {
13591  if (r == X86EMUL_PROPAGATE_FAULT) {
13592  if (KVM_BUG_ON(!e, vcpu->kvm))
13593  return -EIO;
13594 
13596  return 1;
13597  }
13598 
13599  /*
13600  * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
13601  * while handling a VMX instruction KVM could've handled the request
13602  * correctly by exiting to userspace and performing I/O but there
13603  * doesn't seem to be a real use-case behind such requests, just return
13604  * KVM_EXIT_INTERNAL_ERROR for now.
13605  */
13607 
13608  return 0;
13609 }
#define X86EMUL_PROPAGATE_FAULT
Definition: kvm_emulate.h:85
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
Definition: x86.c:796
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
Definition: x86.c:8727
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_handling_nmi_from_guest()

static bool kvm_handling_nmi_from_guest ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 451 of file x86.h.

452 {
453  return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
454 }
Here is the caller graph for this function:

◆ kvm_hlt_in_guest()

static bool kvm_hlt_in_guest ( struct kvm *  kvm)
inlinestatic

Definition at line 414 of file x86.h.

415 {
416  return kvm->arch.hlt_in_guest;
417 }
Here is the caller graph for this function:

◆ kvm_inject_realmode_interrupt()

void kvm_inject_realmode_interrupt ( struct kvm_vcpu *  vcpu,
int  irq,
int  inc_eip 
)

Definition at line 8639 of file x86.c.

8640 {
8641  struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8642  int ret;
8643 
8645 
8646  ctxt->op_bytes = 2;
8647  ctxt->ad_bytes = 2;
8648  ctxt->_eip = ctxt->eip + inc_eip;
8649  ret = emulate_int_real(ctxt, irq);
8650 
8651  if (ret != X86EMUL_CONTINUE) {
8652  kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8653  } else {
8654  ctxt->eip = ctxt->_eip;
8655  kvm_rip_write(vcpu, ctxt->eip);
8656  kvm_set_rflags(vcpu, ctxt->eflags);
8657  }
8658 }
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Definition: emulate.c:2069
unsigned long eflags
Definition: kvm_emulate.h:312
unsigned long _eip
Definition: kvm_emulate.h:362
unsigned long eip
Definition: kvm_emulate.h:313
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
Definition: x86.c:8613
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_is_exception_pending()

static bool kvm_is_exception_pending ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 100 of file x86.h.

101 {
102  return vcpu->arch.exception.pending ||
103  vcpu->arch.exception_vmexit.pending ||
104  kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
105 }
Here is the caller graph for this function:

◆ kvm_load_guest_xsave_state()

void kvm_load_guest_xsave_state ( struct kvm_vcpu *  vcpu)

Definition at line 1018 of file x86.c.

1019 {
1020  if (vcpu->arch.guest_state_protected)
1021  return;
1022 
1023  if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
1024 
1025  if (vcpu->arch.xcr0 != host_xcr0)
1026  xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
1027 
1028  if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
1029  vcpu->arch.ia32_xss != host_xss)
1030  wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
1031  }
1032 
1033  if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1034  vcpu->arch.pkru != vcpu->arch.host_pkru &&
1035  ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1036  kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
1037  write_pkru(vcpu->arch.pkru);
1038 }
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:278
u64 __read_mostly host_xss
Definition: x86.c:238
u64 __read_mostly host_xcr0
Definition: x86.c:317
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_load_host_xsave_state()

void kvm_load_host_xsave_state ( struct kvm_vcpu *  vcpu)

Definition at line 1041 of file x86.c.

1042 {
1043  if (vcpu->arch.guest_state_protected)
1044  return;
1045 
1046  if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1047  ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1048  kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
1049  vcpu->arch.pkru = rdpkru();
1050  if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1051  write_pkru(vcpu->arch.host_pkru);
1052  }
1053 
1054  if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
1055 
1056  if (vcpu->arch.xcr0 != host_xcr0)
1057  xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
1058 
1059  if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
1060  vcpu->arch.ia32_xss != host_xss)
1061  wrmsrl(MSR_IA32_XSS, host_xss);
1062  }
1063 
1064 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_machine_check()

static void kvm_machine_check ( void  )
inlinestatic

Definition at line 482 of file x86.h.

483 {
484 #if defined(CONFIG_X86_MCE)
485  struct pt_regs regs = {
486  .cs = 3, /* Fake ring 3 no matter what the guest ran on */
487  .flags = X86_EFLAGS_IF,
488  };
489 
490  do_machine_check(&regs);
491 #endif
492 }
Here is the caller graph for this function:

◆ kvm_mpx_supported()

static bool kvm_mpx_supported ( void  )
inlinestatic

Definition at line 361 of file x86.h.

362 {
363  return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
364  == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
365 }
Here is the caller graph for this function:

◆ kvm_msr_allowed()

bool kvm_msr_allowed ( struct kvm_vcpu *  vcpu,
u32  index,
u32  type 
)

Definition at line 1796 of file x86.c.

1797 {
1798  struct kvm_x86_msr_filter *msr_filter;
1799  struct msr_bitmap_range *ranges;
1800  struct kvm *kvm = vcpu->kvm;
1801  bool allowed;
1802  int idx;
1803  u32 i;
1804 
1805  /* x2APIC MSRs do not support filtering. */
1806  if (index >= 0x800 && index <= 0x8ff)
1807  return true;
1808 
1809  idx = srcu_read_lock(&kvm->srcu);
1810 
1811  msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1812  if (!msr_filter) {
1813  allowed = true;
1814  goto out;
1815  }
1816 
1817  allowed = msr_filter->default_allow;
1818  ranges = msr_filter->ranges;
1819 
1820  for (i = 0; i < msr_filter->count; i++) {
1821  u32 start = ranges[i].base;
1822  u32 end = start + ranges[i].nmsrs;
1823  u32 flags = ranges[i].flags;
1824  unsigned long *bitmap = ranges[i].bitmap;
1825 
1826  if ((index >= start) && (index < end) && (flags & type)) {
1827  allowed = test_bit(index - start, bitmap);
1828  break;
1829  }
1830  }
1831 
1832 out:
1833  srcu_read_unlock(&kvm->srcu, idx);
1834 
1835  return allowed;
1836 }
uint32_t flags
Definition: xen.c:1
Here is the caller graph for this function:

◆ kvm_mtrr_check_gfn_range_consistency()

bool kvm_mtrr_check_gfn_range_consistency ( struct kvm_vcpu *  vcpu,
gfn_t  gfn,
int  page_num 
)

Definition at line 690 of file mtrr.c.

692 {
693  struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
694  struct mtrr_iter iter;
695  u64 start, end;
696  int type = -1;
697 
698  start = gfn_to_gpa(gfn);
699  end = gfn_to_gpa(gfn + page_num);
701  if (type == -1) {
702  type = iter.mem_type;
703  continue;
704  }
705 
706  if (type != iter.mem_type)
707  return false;
708  }
709 
710  if (iter.mtrr_disabled)
711  return true;
712 
713  if (!iter.partial_map)
714  return true;
715 
716  if (type == -1)
717  return true;
718 
719  return type == mtrr_default_type(mtrr_state);
720 }
#define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_)
Definition: mtrr.c:610
static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
Definition: mtrr.c:114
struct kvm_mtrr * mtrr_state
Definition: mtrr.c:441
u64 start
Definition: mtrr.c:442
u64 end
Definition: mtrr.c:443
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mtrr_get_guest_memory_type()

u8 kvm_mtrr_get_guest_memory_type ( struct kvm_vcpu *  vcpu,
gfn_t  gfn 
)

Definition at line 614 of file mtrr.c.

615 {
616  struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
617  struct mtrr_iter iter;
618  u64 start, end;
619  int type = -1;
620  const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
621  | (1 << MTRR_TYPE_WRTHROUGH);
622 
623  start = gfn_to_gpa(gfn);
624  end = start + PAGE_SIZE;
625 
627  int curr_type = iter.mem_type;
628 
629  /*
630  * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
631  * Precedences.
632  */
633 
634  if (type == -1) {
635  type = curr_type;
636  continue;
637  }
638 
639  /*
640  * If two or more variable memory ranges match and the
641  * memory types are identical, then that memory type is
642  * used.
643  */
644  if (type == curr_type)
645  continue;
646 
647  /*
648  * If two or more variable memory ranges match and one of
649  * the memory types is UC, the UC memory type used.
650  */
651  if (curr_type == MTRR_TYPE_UNCACHABLE)
652  return MTRR_TYPE_UNCACHABLE;
653 
654  /*
655  * If two or more variable memory ranges match and the
656  * memory types are WT and WB, the WT memory type is used.
657  */
658  if (((1 << type) & wt_wb_mask) &&
659  ((1 << curr_type) & wt_wb_mask)) {
660  type = MTRR_TYPE_WRTHROUGH;
661  continue;
662  }
663 
664  /*
665  * For overlaps not defined by the above rules, processor
666  * behavior is undefined.
667  */
668 
669  /* We use WB for this undefined behavior. :( */
670  return MTRR_TYPE_WRBACK;
671  }
672 
673  if (iter.mtrr_disabled)
674  return mtrr_disabled_type(vcpu);
675 
676  /* not contained in any MTRRs. */
677  if (type == -1)
679 
680  /*
681  * We just check one page, partially covered by MTRRs is
682  * impossible.
683  */
684  WARN_ON(iter.partial_map);
685 
686  return type;
687 }
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
Definition: mtrr.c:119
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mtrr_get_msr()

int kvm_mtrr_get_msr ( struct kvm_vcpu *  vcpu,
u32  msr,
u64 *  pdata 
)

Definition at line 397 of file mtrr.c.

398 {
399  int index;
400 
401  /* MSR_MTRRcap is a readonly MSR. */
402  if (msr == MSR_MTRRcap) {
403  /*
404  * SMRR = 0
405  * WC = 1
406  * FIX = 1
407  * VCNT = KVM_NR_VAR_MTRR
408  */
409  *pdata = 0x500 | KVM_NR_VAR_MTRR;
410  return 0;
411  }
412 
413  if (!msr_mtrr_valid(msr))
414  return 1;
415 
416  index = fixed_msr_to_range_index(msr);
417  if (index >= 0) {
418  *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
419  } else if (msr == MSR_MTRRdefType) {
420  *pdata = vcpu->arch.mtrr_state.deftype;
421  } else {
422  /* Variable MTRRs */
423  if (is_mtrr_base_msr(msr))
424  *pdata = var_mtrr_msr_to_range(vcpu, msr)->base;
425  else
426  *pdata = var_mtrr_msr_to_range(vcpu, msr)->mask;
427 
428  *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
429  }
430 
431  return 0;
432 }
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
Definition: cpuid.c:409
static bool is_mtrr_base_msr(unsigned int msr)
Definition: mtrr.c:28
static int fixed_msr_to_range_index(u32 msr)
Definition: mtrr.c:261
static struct kvm_mtrr_range * var_mtrr_msr_to_range(struct kvm_vcpu *vcpu, unsigned int msr)
Definition: mtrr.c:34
static bool msr_mtrr_valid(unsigned msr)
Definition: mtrr.c:42
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mtrr_set_msr()

int kvm_mtrr_set_msr ( struct kvm_vcpu *  vcpu,
u32  msr,
u64  data 
)

Definition at line 378 of file mtrr.c.

379 {
380  int index;
381 
382  if (!kvm_mtrr_valid(vcpu, msr, data))
383  return 1;
384 
385  index = fixed_msr_to_range_index(msr);
386  if (index >= 0)
387  *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
388  else if (msr == MSR_MTRRdefType)
389  vcpu->arch.mtrr_state.deftype = data;
390  else
391  set_var_mtrr_msr(vcpu, msr, data);
392 
393  update_mtrr(vcpu, msr);
394  return 0;
395 }
static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
Definition: mtrr.c:68
static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
Definition: mtrr.c:318
static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
Definition: mtrr.c:349
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mwait_in_guest()

static bool kvm_mwait_in_guest ( struct kvm *  kvm)
inlinestatic

Definition at line 409 of file x86.h.

410 {
411  return kvm->arch.mwait_in_guest;
412 }
Here is the caller graph for this function:

◆ kvm_notify_vmexit_enabled()

static bool kvm_notify_vmexit_enabled ( struct kvm *  kvm)
inlinestatic

Definition at line 429 of file x86.h.

430 {
431  return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
432 }
Here is the caller graph for this function:

◆ kvm_pat_valid()

static bool kvm_pat_valid ( u64  data)
inlinestatic

Definition at line 456 of file x86.h.

457 {
458  if (data & 0xF8F8F8F8F8F8F8F8ull)
459  return false;
460  /* 0, 1, 4, 5, 6, 7 are valid values. */
461  return (data | ((data & 0x0202020202020202ull) << 1)) == data;
462 }
Here is the caller graph for this function:

◆ kvm_pause_in_guest()

static bool kvm_pause_in_guest ( struct kvm *  kvm)
inlinestatic

Definition at line 419 of file x86.h.

420 {
421  return kvm->arch.pause_in_guest;
422 }
Here is the caller graph for this function:

◆ kvm_pr_unimpl_rdmsr()

static void kvm_pr_unimpl_rdmsr ( struct kvm_vcpu *  vcpu,
u32  msr 
)
inlinestatic

Definition at line 383 of file x86.h.

384 {
386  vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
387 }
bool report_ignored_msrs
Definition: x86.c:150
Here is the caller graph for this function:

◆ kvm_pr_unimpl_wrmsr()

static void kvm_pr_unimpl_wrmsr ( struct kvm_vcpu *  vcpu,
u32  msr,
u64  data 
)
inlinestatic

Definition at line 377 of file x86.h.

378 {
380  vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
381 }
Here is the caller graph for this function:

◆ kvm_queue_interrupt()

static void kvm_queue_interrupt ( struct kvm_vcpu *  vcpu,
u8  vector,
bool  soft 
)
inlinestatic

Definition at line 114 of file x86.h.

116 {
117  vcpu->arch.interrupt.injected = true;
118  vcpu->arch.interrupt.soft = soft;
119  vcpu->arch.interrupt.nr = vector;
120 }
Here is the caller graph for this function:

◆ kvm_read_guest_virt()

int kvm_read_guest_virt ( struct kvm_vcpu *  vcpu,
gva_t  addr,
void *  val,
unsigned int  bytes,
struct x86_exception exception 
)

Definition at line 7572 of file x86.c.

7575 {
7576  u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7577 
7578  /*
7579  * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7580  * is returned, but our callers are not ready for that and they blindly
7581  * call kvm_inject_page_fault. Ensure that they at least do not leak
7582  * uninitialized kernel stack memory into cr2 and error code.
7583  */
7584  memset(exception, 0, sizeof(*exception));
7585  return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7586  exception);
7587 }
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u64 access, struct x86_exception *exception)
Definition: x86.c:7513
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_register_read()

static unsigned long kvm_register_read ( struct kvm_vcpu *  vcpu,
int  reg 
)
inlinestatic

Definition at line 273 of file x86.h.

274 {
275  unsigned long val = kvm_register_read_raw(vcpu, reg);
276 
277  return is_64_bit_mode(vcpu) ? val : (u32)val;
278 }
static unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_register_write()

static void kvm_register_write ( struct kvm_vcpu *  vcpu,
int  reg,
unsigned long  val 
)
inlinestatic

Definition at line 280 of file x86.h.

282 {
283  if (!is_64_bit_mode(vcpu))
284  val = (u32)val;
285  return kvm_register_write_raw(vcpu, reg, val);
286 }
static void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, unsigned long val)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_service_local_tlb_flush_requests()

void kvm_service_local_tlb_flush_requests ( struct kvm_vcpu *  vcpu)

Definition at line 3617 of file x86.c.

3618 {
3619  if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3621 
3622  if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
3624 }
static void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
Definition: x86.c:3605
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
Definition: x86.c:3580
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_sev_es_mmio_read()

int kvm_sev_es_mmio_read ( struct kvm_vcpu *  vcpu,
gpa_t  src,
unsigned int  bytes,
void *  dst 
)

Definition at line 13761 of file x86.c.

13763 {
13764  int handled;
13765  struct kvm_mmio_fragment *frag;
13766 
13767  if (!data)
13768  return -EINVAL;
13769 
13770  handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13771  if (handled == bytes)
13772  return 1;
13773 
13774  bytes -= handled;
13775  gpa += handled;
13776  data += handled;
13777 
13778  /*TODO: Check if need to increment number of frags */
13779  frag = vcpu->mmio_fragments;
13780  vcpu->mmio_nr_fragments = 1;
13781  frag->len = bytes;
13782  frag->gpa = gpa;
13783  frag->data = data;
13784 
13785  vcpu->mmio_needed = 1;
13786  vcpu->mmio_cur_fragment = 0;
13787 
13788  vcpu->run->mmio.phys_addr = gpa;
13789  vcpu->run->mmio.len = min(8u, frag->len);
13790  vcpu->run->mmio.is_write = 0;
13791  vcpu->run->exit_reason = KVM_EXIT_MMIO;
13792 
13793  vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13794 
13795  return 0;
13796 }
int(* read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
Definition: x86.c:7758
static const struct read_write_emulator_ops read_emultor
Definition: x86.c:7811
static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
Definition: x86.c:13676
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_sev_es_mmio_write()

int kvm_sev_es_mmio_write ( struct kvm_vcpu *  vcpu,
gpa_t  src,
unsigned int  bytes,
void *  dst 
)

Definition at line 13722 of file x86.c.

13724 {
13725  int handled;
13726  struct kvm_mmio_fragment *frag;
13727 
13728  if (!data)
13729  return -EINVAL;
13730 
13731  handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13732  if (handled == bytes)
13733  return 1;
13734 
13735  bytes -= handled;
13736  gpa += handled;
13737  data += handled;
13738 
13739  /*TODO: Check if need to increment number of frags */
13740  frag = vcpu->mmio_fragments;
13741  vcpu->mmio_nr_fragments = 1;
13742  frag->len = bytes;
13743  frag->gpa = gpa;
13744  frag->data = data;
13745 
13746  vcpu->mmio_needed = 1;
13747  vcpu->mmio_cur_fragment = 0;
13748 
13749  vcpu->run->mmio.phys_addr = gpa;
13750  vcpu->run->mmio.len = min(8u, frag->len);
13751  vcpu->run->mmio.is_write = 1;
13752  memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
13753  vcpu->run->exit_reason = KVM_EXIT_MMIO;
13754 
13755  vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13756 
13757  return 0;
13758 }
static const struct read_write_emulator_ops write_emultor
Definition: x86.c:7818
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_sev_es_string_io()

int kvm_sev_es_string_io ( struct kvm_vcpu *  vcpu,
unsigned int  size,
unsigned int  port,
void *  data,
unsigned int  count,
int  in 
)

Definition at line 13876 of file x86.c.

13879 {
13880  vcpu->arch.sev_pio_data = data;
13881  vcpu->arch.sev_pio_count = count;
13882  return in ? kvm_sev_es_ins(vcpu, size, port)
13883  : kvm_sev_es_outs(vcpu, size, port);
13884 }
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, unsigned int port)
Definition: x86.c:13819
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, unsigned int port)
Definition: x86.c:13857
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_spec_ctrl_test_value()

int kvm_spec_ctrl_test_value ( u64  value)

Definition at line 13532 of file x86.c.

13533 {
13534  /*
13535  * test that setting IA32_SPEC_CTRL to given value
13536  * is allowed by the host processor
13537  */
13538 
13539  u64 saved_value;
13540  unsigned long flags;
13541  int ret = 0;
13542 
13543  local_irq_save(flags);
13544 
13545  if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
13546  ret = 1;
13547  else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
13548  ret = 1;
13549  else
13550  wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
13551 
13552  local_irq_restore(flags);
13553 
13554  return ret;
13555 }
Here is the caller graph for this function:

◆ kvm_spurious_fault()

void kvm_spurious_fault ( void  )

Definition at line 513 of file x86.c.

514 {
515  /* Fault while not rebooting. We want the trace. */
516  BUG_ON(!kvm_rebooting);
517 }
Here is the caller graph for this function:

◆ kvm_vcpu_has_run()

static bool kvm_vcpu_has_run ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 95 of file x86.h.

96 {
97  return vcpu->arch.last_vmentry_cpu != -1;
98 }
Here is the caller graph for this function:

◆ kvm_vcpu_mtrr_init()

void kvm_vcpu_mtrr_init ( struct kvm_vcpu *  vcpu)

Definition at line 434 of file mtrr.c.

435 {
436  INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
437 }
Here is the caller graph for this function:

◆ kvm_vector_hashing_enabled()

bool kvm_vector_hashing_enabled ( void  )

Definition at line 13520 of file x86.c.

13521 {
13522  return vector_hashing;
13523 }
static bool __read_mostly vector_hashing
Definition: x86.c:173
Here is the caller graph for this function:

◆ kvm_write_guest_virt_system()

int kvm_write_guest_virt_system ( struct kvm_vcpu *  vcpu,
gva_t  addr,
void *  val,
unsigned int  bytes,
struct x86_exception exception 
)

Definition at line 7651 of file x86.c.

7653 {
7654  /* kvm_write_guest_virt_system can pull in tons of pages. */
7655  vcpu->arch.l1tf_flush_l1d = true;
7656 
7657  return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7658  PFERR_WRITE_MASK, exception);
7659 }
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u64 access, struct x86_exception *exception)
Definition: x86.c:7605
Here is the call graph for this function:
Here is the caller graph for this function:

◆ mmu_is_nested()

static bool mmu_is_nested ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 183 of file x86.h.

184 {
185  return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
186 }
Here is the caller graph for this function:

◆ nsec_to_cycles()

static u64 nsec_to_cycles ( struct kvm_vcpu *  vcpu,
u64  nsec 
)
inlinestatic

Definition at line 389 of file x86.h.

390 {
391  return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
392  vcpu->arch.virtual_tsc_shift);
393 }
Here is the caller graph for this function:

◆ vcpu_cache_mmio_info()

static void vcpu_cache_mmio_info ( struct kvm_vcpu *  vcpu,
gva_t  gva,
gfn_t  gfn,
unsigned  access 
)
inlinestatic

Definition at line 218 of file x86.h.

220 {
221  u64 gen = kvm_memslots(vcpu->kvm)->generation;
222 
223  if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
224  return;
225 
226  /*
227  * If this is a shadow nested page table, the "GVA" is
228  * actually a nGPA.
229  */
230  vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
231  vcpu->arch.mmio_access = access;
232  vcpu->arch.mmio_gfn = gfn;
233  vcpu->arch.mmio_gen = gen;
234 }
static bool mmu_is_nested(struct kvm_vcpu *vcpu)
Definition: x86.h:183
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vcpu_clear_mmio_info()

static void vcpu_clear_mmio_info ( struct kvm_vcpu *  vcpu,
gva_t  gva 
)
inlinestatic

Definition at line 247 of file x86.h.

248 {
249  if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
250  return;
251 
252  vcpu->arch.mmio_gva = 0;
253 }
#define MMIO_GVA_ANY
Definition: x86.h:245
Here is the caller graph for this function:

◆ vcpu_match_mmio_gen()

static bool vcpu_match_mmio_gen ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 236 of file x86.h.

237 {
238  return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
239 }
Here is the caller graph for this function:

◆ vcpu_match_mmio_gpa()

static bool vcpu_match_mmio_gpa ( struct kvm_vcpu *  vcpu,
gpa_t  gpa 
)
inlinestatic

Definition at line 264 of file x86.h.

265 {
266  if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
267  vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
268  return true;
269 
270  return false;
271 }
static bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
Definition: x86.h:236
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vcpu_match_mmio_gva()

static bool vcpu_match_mmio_gva ( struct kvm_vcpu *  vcpu,
unsigned long  gva 
)
inlinestatic

Definition at line 255 of file x86.h.

256 {
257  if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
258  vcpu->arch.mmio_gva == (gva & PAGE_MASK))
259  return true;
260 
261  return false;
262 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vcpu_virt_addr_bits()

static u8 vcpu_virt_addr_bits ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 208 of file x86.h.

209 {
210  return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
211 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ x86_decode_emulated_instruction()

int x86_decode_emulated_instruction ( struct kvm_vcpu *  vcpu,
int  emulation_type,
void *  insn,
int  insn_len 
)

Definition at line 9057 of file x86.c.

9059 {
9060  struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9061  int r;
9062 
9064 
9065  r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
9066 
9068  ++vcpu->stat.insn_emulation;
9069 
9070  return r;
9071 }
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
Definition: emulate.c:4763
#define trace_kvm_emulate_insn_start(vcpu)
Definition: trace.h:848
Here is the call graph for this function:
Here is the caller graph for this function:

◆ x86_emulate_instruction()

int x86_emulate_instruction ( struct kvm_vcpu *  vcpu,
gpa_t  cr2_or_gpa,
int  emulation_type,
void *  insn,
int  insn_len 
)

Definition at line 9074 of file x86.c.

9076 {
9077  int r;
9078  struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
9079  bool writeback = true;
9080 
9081  r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
9082  if (r != X86EMUL_CONTINUE) {
9084  return 1;
9085 
9086  WARN_ON_ONCE(r != X86EMUL_UNHANDLEABLE);
9087  return handle_emulation_failure(vcpu, emulation_type);
9088  }
9089 
9090  vcpu->arch.l1tf_flush_l1d = true;
9091 
9092  if (!(emulation_type & EMULTYPE_NO_DECODE)) {
9094 
9095  /*
9096  * Return immediately if RIP hits a code breakpoint, such #DBs
9097  * are fault-like and are higher priority than any faults on
9098  * the code fetch itself.
9099  */
9100  if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
9101  return r;
9102 
9103  r = x86_decode_emulated_instruction(vcpu, emulation_type,
9104  insn, insn_len);
9105  if (r != EMULATION_OK) {
9106  if ((emulation_type & EMULTYPE_TRAP_UD) ||
9107  (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
9108  kvm_queue_exception(vcpu, UD_VECTOR);
9109  return 1;
9110  }
9111  if (reexecute_instruction(vcpu, cr2_or_gpa,
9112  emulation_type))
9113  return 1;
9114 
9115  if (ctxt->have_exception &&
9116  !(emulation_type & EMULTYPE_SKIP)) {
9117  /*
9118  * #UD should result in just EMULATION_FAILED, and trap-like
9119  * exception should not be encountered during decode.
9120  */
9121  WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
9124  return 1;
9125  }
9126  return handle_emulation_failure(vcpu, emulation_type);
9127  }
9128  }
9129 
9130  if ((emulation_type & EMULTYPE_VMWARE_GP) &&
9131  !is_vmware_backdoor_opcode(ctxt)) {
9132  kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
9133  return 1;
9134  }
9135 
9136  /*
9137  * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
9138  * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
9139  * The caller is responsible for updating interruptibility state and
9140  * injecting single-step #DBs.
9141  */
9142  if (emulation_type & EMULTYPE_SKIP) {
9143  if (ctxt->mode != X86EMUL_MODE_PROT64)
9144  ctxt->eip = (u32)ctxt->_eip;
9145  else
9146  ctxt->eip = ctxt->_eip;
9147 
9148  if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
9149  r = 1;
9150  goto writeback;
9151  }
9152 
9153  kvm_rip_write(vcpu, ctxt->eip);
9154  if (ctxt->eflags & X86_EFLAGS_RF)
9155  kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
9156  return 1;
9157  }
9158 
9159  if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
9160  return 1;
9161 
9162  /* this is needed for vmware backdoor interface to work since it
9163  changes registers values during IO operation */
9164  if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
9165  vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
9167  }
9168 
9169 restart:
9170  if (emulation_type & EMULTYPE_PF) {
9171  /* Save the faulting GPA (cr2) in the address field */
9172  ctxt->exception.address = cr2_or_gpa;
9173 
9174  /* With shadow page tables, cr2 contains a GVA or nGPA. */
9175  if (vcpu->arch.mmu->root_role.direct) {
9176  ctxt->gpa_available = true;
9177  ctxt->gpa_val = cr2_or_gpa;
9178  }
9179  } else {
9180  /* Sanitize the address out of an abundance of paranoia. */
9181  ctxt->exception.address = 0;
9182  }
9183 
9184  r = x86_emulate_insn(ctxt);
9185 
9186  if (r == EMULATION_INTERCEPTED)
9187  return 1;
9188 
9189  if (r == EMULATION_FAILED) {
9190  if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type))
9191  return 1;
9192 
9193  return handle_emulation_failure(vcpu, emulation_type);
9194  }
9195 
9196  if (ctxt->have_exception) {
9197  WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
9198  vcpu->mmio_needed = false;
9199  r = 1;
9201  } else if (vcpu->arch.pio.count) {
9202  if (!vcpu->arch.pio.in) {
9203  /* FIXME: return into emulator if single-stepping. */
9204  vcpu->arch.pio.count = 0;
9205  } else {
9206  writeback = false;
9207  vcpu->arch.complete_userspace_io = complete_emulated_pio;
9208  }
9209  r = 0;
9210  } else if (vcpu->mmio_needed) {
9211  ++vcpu->stat.mmio_exits;
9212 
9213  if (!vcpu->mmio_is_write)
9214  writeback = false;
9215  r = 0;
9216  vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9217  } else if (vcpu->arch.complete_userspace_io) {
9218  writeback = false;
9219  r = 0;
9220  } else if (r == EMULATION_RESTART)
9221  goto restart;
9222  else
9223  r = 1;
9224 
9225 writeback:
9226  if (writeback) {
9227  unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
9229  vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9230 
9231  /*
9232  * Note, EXCPT_DB is assumed to be fault-like as the emulator
9233  * only supports code breakpoints and general detect #DB, both
9234  * of which are fault-like.
9235  */
9236  if (!ctxt->have_exception ||
9238  kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
9239  if (ctxt->is_branch)
9240  kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
9241  kvm_rip_write(vcpu, ctxt->eip);
9242  if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
9244  static_call_cond(kvm_x86_update_emulated_instruction)(vcpu);
9245  __kvm_set_rflags(vcpu, ctxt->eflags);
9246  }
9247 
9248  /*
9249  * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
9250  * do nothing, and it will be requested again as soon as
9251  * the shadow expires. But we still need to check here,
9252  * because POPF has no interrupt shadow.
9253  */
9254  if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
9255  kvm_make_request(KVM_REQ_EVENT, vcpu);
9256  } else
9257  vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
9258 
9259  return r;
9260 }
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5140
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1785
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5492
#define EMULATION_INTERCEPTED
Definition: kvm_emulate.h:508
#define X86EMUL_UNHANDLEABLE
Definition: kvm_emulate.h:83
#define EMULATION_RESTART
Definition: kvm_emulate.h:507
#define EMULATION_OK
Definition: kvm_emulate.h:506
@ X86EMUL_MODE_PROT64
Definition: kvm_emulate.h:284
#define EMULATION_FAILED
Definition: kvm_emulate.h:505
#define X86EMUL_RETRY_INSTR
Definition: kvm_emulate.h:86
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
Definition: pmu.c:828
enum x86emul_mode mode
Definition: kvm_emulate.h:315
struct x86_exception exception
Definition: kvm_emulate.h:324
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
Definition: x86.c:13181
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
Definition: x86.c:8583
#define EXCPT_TRAP
Definition: x86.c:542
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, void *insn, int insn_len)
Definition: x86.c:9057
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
Definition: x86.c:8564
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
Definition: x86.c:11249
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type)
Definition: x86.c:8761
static bool retry_instruction(struct x86_emulate_ctxt *ctxt, gpa_t cr2_or_gpa, int emulation_type)
Definition: x86.c:8835
static int exception_type(int vector)
Definition: x86.c:547
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
Definition: x86.c:11224
static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
Definition: x86.c:9016
static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int emulation_type, int *r)
Definition: x86.c:8958
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
Definition: x86.c:8733
static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
Definition: x86.c:8901
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
Definition: x86.c:824
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
Definition: x86.c:731
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:107
Here is the call graph for this function:
Here is the caller graph for this function:

◆ x86_exception_has_error_code()

static bool x86_exception_has_error_code ( unsigned int  vector)
inlinestatic

Definition at line 174 of file x86.h.

175 {
176  static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
177  BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
178  BIT(PF_VECTOR) | BIT(AC_VECTOR);
179 
180  return (1U << vector) & exception_has_error_code;
181 }
Here is the caller graph for this function:

Variable Documentation

◆ eager_page_split

bool eager_page_split
extern

Definition at line 196 of file x86.c.

◆ enable_pmu

bool enable_pmu
extern

Definition at line 192 of file x86.c.

◆ enable_vmware_backdoor

bool enable_vmware_backdoor
extern

Definition at line 176 of file x86.c.

◆ host_arch_capabilities

u64 host_arch_capabilities
extern

Definition at line 241 of file x86.c.

◆ host_xcr0

u64 host_xcr0
extern

Definition at line 317 of file x86.c.

◆ host_xss

u64 host_xss
extern

Definition at line 238 of file x86.c.

◆ kvm_caps

struct kvm_caps kvm_caps
extern

◆ min_timer_period_us

unsigned int min_timer_period_us
extern

Definition at line 154 of file x86.c.

◆ pi_inject_timer

int pi_inject_timer
extern

Definition at line 188 of file x86.c.

◆ report_ignored_msrs

bool report_ignored_msrs
extern

Definition at line 150 of file x86.c.