KVM
Macros | Functions | Variables
mmu.h File Reference
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include "cpuid.h"
Include dependency graph for mmu.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define PT_WRITABLE_SHIFT   1
 
#define PT_USER_SHIFT   2
 
#define PT_PRESENT_MASK   (1ULL << 0)
 
#define PT_WRITABLE_MASK   (1ULL << PT_WRITABLE_SHIFT)
 
#define PT_USER_MASK   (1ULL << PT_USER_SHIFT)
 
#define PT_PWT_MASK   (1ULL << 3)
 
#define PT_PCD_MASK   (1ULL << 4)
 
#define PT_ACCESSED_SHIFT   5
 
#define PT_ACCESSED_MASK   (1ULL << PT_ACCESSED_SHIFT)
 
#define PT_DIRTY_SHIFT   6
 
#define PT_DIRTY_MASK   (1ULL << PT_DIRTY_SHIFT)
 
#define PT_PAGE_SIZE_SHIFT   7
 
#define PT_PAGE_SIZE_MASK   (1ULL << PT_PAGE_SIZE_SHIFT)
 
#define PT_PAT_MASK   (1ULL << 7)
 
#define PT_GLOBAL_MASK   (1ULL << 8)
 
#define PT64_NX_SHIFT   63
 
#define PT64_NX_MASK   (1ULL << PT64_NX_SHIFT)
 
#define PT_PAT_SHIFT   7
 
#define PT_DIR_PAT_SHIFT   12
 
#define PT_DIR_PAT_MASK   (1ULL << PT_DIR_PAT_SHIFT)
 
#define PT64_ROOT_5LEVEL   5
 
#define PT64_ROOT_4LEVEL   4
 
#define PT32_ROOT_LEVEL   2
 
#define PT32E_ROOT_LEVEL   3
 
#define KVM_MMU_CR4_ROLE_BITS
 
#define KVM_MMU_CR0_ROLE_BITS   (X86_CR0_PG | X86_CR0_WP)
 
#define KVM_MMU_EFER_ROLE_BITS   (EFER_LME | EFER_NX)
 
#define tdp_mmu_enabled   false
 

Functions

static __always_inline u64 rsvd_bits (int s, int e)
 
static gfn_t kvm_mmu_max_gfn (void)
 
static u8 kvm_get_shadow_phys_bits (void)
 
void kvm_mmu_set_mmio_spte_mask (u64 mmio_value, u64 mmio_mask, u64 access_mask)
 
void kvm_mmu_set_me_spte_mask (u64 me_value, u64 me_mask)
 
void kvm_mmu_set_ept_masks (bool has_ad_bits, bool has_exec_only)
 
void kvm_init_mmu (struct kvm_vcpu *vcpu)
 
void kvm_init_shadow_npt_mmu (struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3)
 
void kvm_init_shadow_ept_mmu (struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp)
 
bool kvm_can_do_async_pf (struct kvm_vcpu *vcpu)
 
int kvm_handle_page_fault (struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len)
 
void __kvm_mmu_refresh_passthrough_bits (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
 
int kvm_mmu_load (struct kvm_vcpu *vcpu)
 
void kvm_mmu_unload (struct kvm_vcpu *vcpu)
 
void kvm_mmu_free_obsolete_roots (struct kvm_vcpu *vcpu)
 
void kvm_mmu_sync_roots (struct kvm_vcpu *vcpu)
 
void kvm_mmu_sync_prev_roots (struct kvm_vcpu *vcpu)
 
void kvm_mmu_track_write (struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes)
 
static int kvm_mmu_reload (struct kvm_vcpu *vcpu)
 
static unsigned long kvm_get_pcid (struct kvm_vcpu *vcpu, gpa_t cr3)
 
static unsigned long kvm_get_active_pcid (struct kvm_vcpu *vcpu)
 
static unsigned long kvm_get_active_cr3_lam_bits (struct kvm_vcpu *vcpu)
 
static void kvm_mmu_load_pgd (struct kvm_vcpu *vcpu)
 
static void kvm_mmu_refresh_passthrough_bits (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
 
static u8 permission_fault (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey, u64 access)
 
bool __kvm_mmu_honors_guest_mtrrs (bool vm_has_noncoherent_dma)
 
static bool kvm_mmu_honors_guest_mtrrs (struct kvm *kvm)
 
void kvm_zap_gfn_range (struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 
int kvm_arch_write_log_dirty (struct kvm_vcpu *vcpu)
 
int kvm_mmu_post_init_vm (struct kvm *kvm)
 
void kvm_mmu_pre_destroy_vm (struct kvm *kvm)
 
static bool kvm_shadow_root_allocated (struct kvm *kvm)
 
static bool kvm_memslots_have_rmaps (struct kvm *kvm)
 
static gfn_t gfn_to_index (gfn_t gfn, gfn_t base_gfn, int level)
 
static unsigned long __kvm_mmu_slot_lpages (struct kvm_memory_slot *slot, unsigned long npages, int level)
 
static unsigned long kvm_mmu_slot_lpages (struct kvm_memory_slot *slot, int level)
 
static void kvm_update_page_stats (struct kvm *kvm, int level, int count)
 
gpa_t translate_nested_gpa (struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, struct x86_exception *exception)
 
static gpa_t kvm_translate_gpa (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t gpa, u64 access, struct x86_exception *exception)
 

Variables

bool __read_mostly enable_mmio_caching
 
u8 __read_mostly shadow_phys_bits
 

Macro Definition Documentation

◆ KVM_MMU_CR0_ROLE_BITS

#define KVM_MMU_CR0_ROLE_BITS   (X86_CR0_PG | X86_CR0_WP)

Definition at line 42 of file mmu.h.

◆ KVM_MMU_CR4_ROLE_BITS

#define KVM_MMU_CR4_ROLE_BITS
Value:
(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)

Definition at line 39 of file mmu.h.

◆ KVM_MMU_EFER_ROLE_BITS

#define KVM_MMU_EFER_ROLE_BITS   (EFER_LME | EFER_NX)

Definition at line 43 of file mmu.h.

◆ PT32_ROOT_LEVEL

#define PT32_ROOT_LEVEL   2

Definition at line 36 of file mmu.h.

◆ PT32E_ROOT_LEVEL

#define PT32E_ROOT_LEVEL   3

Definition at line 37 of file mmu.h.

◆ PT64_NX_MASK

#define PT64_NX_MASK   (1ULL << PT64_NX_SHIFT)

Definition at line 28 of file mmu.h.

◆ PT64_NX_SHIFT

#define PT64_NX_SHIFT   63

Definition at line 27 of file mmu.h.

◆ PT64_ROOT_4LEVEL

#define PT64_ROOT_4LEVEL   4

Definition at line 35 of file mmu.h.

◆ PT64_ROOT_5LEVEL

#define PT64_ROOT_5LEVEL   5

Definition at line 34 of file mmu.h.

◆ PT_ACCESSED_MASK

#define PT_ACCESSED_MASK   (1ULL << PT_ACCESSED_SHIFT)

Definition at line 20 of file mmu.h.

◆ PT_ACCESSED_SHIFT

#define PT_ACCESSED_SHIFT   5

Definition at line 19 of file mmu.h.

◆ PT_DIR_PAT_MASK

#define PT_DIR_PAT_MASK   (1ULL << PT_DIR_PAT_SHIFT)

Definition at line 32 of file mmu.h.

◆ PT_DIR_PAT_SHIFT

#define PT_DIR_PAT_SHIFT   12

Definition at line 31 of file mmu.h.

◆ PT_DIRTY_MASK

#define PT_DIRTY_MASK   (1ULL << PT_DIRTY_SHIFT)

Definition at line 22 of file mmu.h.

◆ PT_DIRTY_SHIFT

#define PT_DIRTY_SHIFT   6

Definition at line 21 of file mmu.h.

◆ PT_GLOBAL_MASK

#define PT_GLOBAL_MASK   (1ULL << 8)

Definition at line 26 of file mmu.h.

◆ PT_PAGE_SIZE_MASK

#define PT_PAGE_SIZE_MASK   (1ULL << PT_PAGE_SIZE_SHIFT)

Definition at line 24 of file mmu.h.

◆ PT_PAGE_SIZE_SHIFT

#define PT_PAGE_SIZE_SHIFT   7

Definition at line 23 of file mmu.h.

◆ PT_PAT_MASK

#define PT_PAT_MASK   (1ULL << 7)

Definition at line 25 of file mmu.h.

◆ PT_PAT_SHIFT

#define PT_PAT_SHIFT   7

Definition at line 30 of file mmu.h.

◆ PT_PCD_MASK

#define PT_PCD_MASK   (1ULL << 4)

Definition at line 18 of file mmu.h.

◆ PT_PRESENT_MASK

#define PT_PRESENT_MASK   (1ULL << 0)

Definition at line 14 of file mmu.h.

◆ PT_PWT_MASK

#define PT_PWT_MASK   (1ULL << 3)

Definition at line 17 of file mmu.h.

◆ PT_USER_MASK

#define PT_USER_MASK   (1ULL << PT_USER_SHIFT)

Definition at line 16 of file mmu.h.

◆ PT_USER_SHIFT

#define PT_USER_SHIFT   2

Definition at line 12 of file mmu.h.

◆ PT_WRITABLE_MASK

#define PT_WRITABLE_MASK   (1ULL << PT_WRITABLE_SHIFT)

Definition at line 15 of file mmu.h.

◆ PT_WRITABLE_SHIFT

#define PT_WRITABLE_SHIFT   1

Definition at line 11 of file mmu.h.

◆ tdp_mmu_enabled

#define tdp_mmu_enabled   false

Definition at line 276 of file mmu.h.

Function Documentation

◆ __kvm_mmu_honors_guest_mtrrs()

bool __kvm_mmu_honors_guest_mtrrs ( bool  vm_has_noncoherent_dma)

Definition at line 4609 of file mmu.c.

4610 {
4611  /*
4612  * If host MTRRs are ignored (shadow_memtype_mask is non-zero), and the
4613  * VM has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is
4614  * to honor the memtype from the guest's MTRRs so that guest accesses
4615  * to memory that is DMA'd aren't cached against the guest's wishes.
4616  *
4617  * Note, KVM may still ultimately ignore guest MTRRs for certain PFNs,
4618  * e.g. KVM will force UC memtype for host MMIO.
4619  */
4620  return vm_has_noncoherent_dma && shadow_memtype_mask;
4621 }
u64 __read_mostly shadow_memtype_mask
Definition: spte.c:38
Here is the caller graph for this function:

◆ __kvm_mmu_refresh_passthrough_bits()

void __kvm_mmu_refresh_passthrough_bits ( struct kvm_vcpu *  vcpu,
struct kvm_mmu *  mmu 
)

Definition at line 5284 of file mmu.c.

5286 {
5287  const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
5288 
5289  BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
5291 
5292  if (is_cr0_wp(mmu) == cr0_wp)
5293  return;
5294 
5295  mmu->cpu_role.base.cr0_wp = cr0_wp;
5296  reset_guest_paging_metadata(vcpu, mmu);
5297 }
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
#define KVM_POSSIBLE_CR4_GUEST_BITS
Definition: kvm_cache_regs.h:8
#define KVM_POSSIBLE_CR0_GUEST_BITS
Definition: kvm_cache_regs.h:7
static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
Definition: mmu.c:5219
#define KVM_MMU_CR0_ROLE_BITS
Definition: mmu.h:42
#define KVM_MMU_CR4_ROLE_BITS
Definition: mmu.h:39
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_mmu_slot_lpages()

static unsigned long __kvm_mmu_slot_lpages ( struct kvm_memory_slot *  slot,
unsigned long  npages,
int  level 
)
inlinestatic

Definition at line 292 of file mmu.h.

294 {
295  return gfn_to_index(slot->base_gfn + npages - 1,
296  slot->base_gfn, level) + 1;
297 }
static gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
Definition: mmu.h:284
Here is the call graph for this function:
Here is the caller graph for this function:

◆ gfn_to_index()

static gfn_t gfn_to_index ( gfn_t  gfn,
gfn_t  base_gfn,
int  level 
)
inlinestatic

Definition at line 284 of file mmu.h.

285 {
286  /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
287  return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
288  (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
289 }
Here is the caller graph for this function:

◆ kvm_arch_write_log_dirty()

int kvm_arch_write_log_dirty ( struct kvm_vcpu *  vcpu)

◆ kvm_can_do_async_pf()

bool kvm_can_do_async_pf ( struct kvm_vcpu *  vcpu)

Definition at line 13317 of file x86.c.

13318 {
13319  if (unlikely(!lapic_in_kernel(vcpu) ||
13321  kvm_is_exception_pending(vcpu)))
13322  return false;
13323 
13324  if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
13325  return false;
13326 
13327  /*
13328  * If interrupts are off we cannot even use an artificial
13329  * halt state.
13330  */
13331  return kvm_arch_interrupt_allowed(vcpu);
13332 }
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
Definition: x86.c:13146
static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
Definition: x86.c:13291
static bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
Definition: x86.h:127
static bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
Definition: x86.h:100
static bool kvm_hlt_in_guest(struct kvm *kvm)
Definition: x86.h:414
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_get_active_cr3_lam_bits()

static unsigned long kvm_get_active_cr3_lam_bits ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 149 of file mmu.h.

150 {
151  if (!guest_can_use(vcpu, X86_FEATURE_LAM))
152  return 0;
153 
154  return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
155 }
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:278
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_get_active_pcid()

static unsigned long kvm_get_active_pcid ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 144 of file mmu.h.

145 {
146  return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
147 }
static unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
Definition: mmu.h:135
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_get_pcid()

static unsigned long kvm_get_pcid ( struct kvm_vcpu *  vcpu,
gpa_t  cr3 
)
inlinestatic

Definition at line 135 of file mmu.h.

136 {
137  BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
138 
139  return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
140  ? cr3 & X86_CR3_PCID_MASK
141  : 0;
142 }
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_get_shadow_phys_bits()

static u8 kvm_get_shadow_phys_bits ( void  )
inlinestatic

Definition at line 84 of file mmu.h.

85 {
86  /*
87  * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
88  * in CPU detection code, but the processor treats those reduced bits as
89  * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
90  * the physical address bits reported by CPUID.
91  */
92  if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
93  return cpuid_eax(0x80000008) & 0xff;
94 
95  /*
96  * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
97  * custom CPUID. Proceed with whatever the kernel found since these features
98  * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
99  */
100  return boot_cpu_data.x86_phys_bits;
101 }
Here is the caller graph for this function:

◆ kvm_handle_page_fault()

int kvm_handle_page_fault ( struct kvm_vcpu *  vcpu,
u64  error_code,
u64  fault_address,
char *  insn,
int  insn_len 
)

Definition at line 4540 of file mmu.c.

4542 {
4543  int r = 1;
4544  u32 flags = vcpu->arch.apf.host_apf_flags;
4545 
4546 #ifndef CONFIG_X86_64
4547  /* A 64-bit CR2 should be impossible on 32-bit KVM. */
4548  if (WARN_ON_ONCE(fault_address >> 32))
4549  return -EFAULT;
4550 #endif
4551 
4552  vcpu->arch.l1tf_flush_l1d = true;
4553  if (!flags) {
4554  trace_kvm_page_fault(vcpu, fault_address, error_code);
4555 
4556  if (kvm_event_needs_reinjection(vcpu))
4557  kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4558  r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4559  insn_len);
4560  } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4561  vcpu->arch.apf.host_apf_flags = 0;
4562  local_irq_disable();
4563  kvm_async_pf_task_wait_schedule(fault_address);
4564  local_irq_enable();
4565  } else {
4566  WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4567  }
4568 
4569  return r;
4570 }
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len)
Definition: mmu.c:5830
static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
Definition: mmu.c:2775
uint32_t flags
Definition: xen.c:1
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_init_mmu()

void kvm_init_mmu ( struct kvm_vcpu *  vcpu)

Definition at line 5538 of file mmu.c.

5539 {
5540  struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5541  union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
5542 
5543  if (mmu_is_nested(vcpu))
5544  init_kvm_nested_mmu(vcpu, cpu_role);
5545  else if (tdp_enabled)
5546  init_kvm_tdp_mmu(vcpu, cpu_role);
5547  else
5548  init_kvm_softmmu(vcpu, cpu_role);
5549 }
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role)
Definition: mmu.c:5331
static void init_kvm_softmmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role)
Definition: mmu.c:5487
static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
Definition: mmu.c:5244
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role new_mode)
Definition: mmu.c:5499
bool tdp_enabled
Definition: mmu.c:106
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
Definition: mmu.c:247
static bool mmu_is_nested(struct kvm_vcpu *vcpu)
Definition: x86.h:183
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_init_shadow_ept_mmu()

void kvm_init_shadow_ept_mmu ( struct kvm_vcpu *  vcpu,
bool  execonly,
int  huge_page_level,
bool  accessed_dirty,
gpa_t  new_eptp 
)

Definition at line 5458 of file mmu.c.

5461 {
5462  struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5463  u8 level = vmx_eptp_page_walk_level(new_eptp);
5464  union kvm_cpu_role new_mode =
5465  kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5466  execonly, level);
5467 
5468  if (new_mode.as_u64 != context->cpu_role.as_u64) {
5469  /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
5470  context->cpu_role.as_u64 = new_mode.as_u64;
5471  context->root_role.word = new_mode.base.word;
5472 
5473  context->page_fault = ept_page_fault;
5474  context->gva_to_gpa = ept_gva_to_gpa;
5475  context->sync_spte = ept_sync_spte;
5476 
5477  update_permission_bitmask(context, true);
5478  context->pkru_mask = 0;
5479  reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5480  reset_ept_shadow_zero_bits_mask(context, execonly);
5481  }
5482 
5483  kvm_mmu_new_pgd(vcpu, new_eptp);
5484 }
static void reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
Definition: mmu.c:5062
static union kvm_cpu_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, bool execonly, u8 level)
Definition: mmu.c:5434
static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
Definition: mmu.c:5079
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
Definition: mmu.c:4753
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly, int huge_page_level)
Definition: mmu.c:4966
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_init_shadow_npt_mmu()

void kvm_init_shadow_npt_mmu ( struct kvm_vcpu *  vcpu,
unsigned long  cr0,
unsigned long  cr4,
u64  efer,
gpa_t  nested_cr3 
)

Definition at line 5407 of file mmu.c.

5409 {
5410  struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5411  struct kvm_mmu_role_regs regs = {
5412  .cr0 = cr0,
5413  .cr4 = cr4 & ~X86_CR4_PKE,
5414  .efer = efer,
5415  };
5416  union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
5417  union kvm_mmu_page_role root_role;
5418 
5419  /* NPT requires CR0.PG=1. */
5420  WARN_ON_ONCE(cpu_role.base.direct);
5421 
5422  root_role = cpu_role.base;
5423  root_role.level = kvm_mmu_get_tdp_level(vcpu);
5424  if (root_role.level == PT64_ROOT_5LEVEL &&
5425  cpu_role.base.level == PT64_ROOT_4LEVEL)
5426  root_role.passthrough = 1;
5427 
5428  shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5429  kvm_mmu_new_pgd(vcpu, nested_cr3);
5430 }
static int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
Definition: mmu.c:5299
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, union kvm_cpu_role cpu_role, union kvm_mmu_page_role root_role)
Definition: mmu.c:5360
#define PT64_ROOT_5LEVEL
Definition: mmu.h:34
#define PT64_ROOT_4LEVEL
Definition: mmu.h:35
const unsigned long cr4
Definition: mmu.c:188
const u64 efer
Definition: mmu.c:189
const unsigned long cr0
Definition: mmu.c:187
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_memslots_have_rmaps()

static bool kvm_memslots_have_rmaps ( struct kvm *  kvm)
inlinestatic

Definition at line 279 of file mmu.h.

280 {
282 }
#define tdp_mmu_enabled
Definition: mmu.h:276
static bool kvm_shadow_root_allocated(struct kvm *kvm)
Definition: mmu.h:262
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_free_obsolete_roots()

void kvm_mmu_free_obsolete_roots ( struct kvm_vcpu *  vcpu)

Definition at line 5676 of file mmu.c.

5677 {
5678  __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5679  __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5680 }
static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
Definition: mmu.c:5659
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_honors_guest_mtrrs()

static bool kvm_mmu_honors_guest_mtrrs ( struct kvm *  kvm)
inlinestatic

Definition at line 250 of file mmu.h.

251 {
253 }
bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)
Definition: mmu.c:4609
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
Definition: x86.c:13452
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_load()

int kvm_mmu_load ( struct kvm_vcpu *  vcpu)

Definition at line 5588 of file mmu.c.

5589 {
5590  int r;
5591 
5592  r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5593  if (r)
5594  goto out;
5595  r = mmu_alloc_special_roots(vcpu);
5596  if (r)
5597  goto out;
5598  if (vcpu->arch.mmu->root_role.direct)
5599  r = mmu_alloc_direct_roots(vcpu);
5600  else
5601  r = mmu_alloc_shadow_roots(vcpu);
5602  if (r)
5603  goto out;
5604 
5605  kvm_mmu_sync_roots(vcpu);
5606 
5607  kvm_mmu_load_pgd(vcpu);
5608 
5609  /*
5610  * Flush any TLB entries for the new root, the provenance of the root
5611  * is unknown. Even if KVM ensures there are no stale TLB entries
5612  * for a freed root, in theory another hypervisor could have left
5613  * stale entries. Flushing on alloc also allows KVM to skip the TLB
5614  * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5615  */
5616  static_call(kvm_x86_flush_tlb_current)(vcpu);
5617 out:
5618  return r;
5619 }
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
Definition: mmu.c:3688
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
Definition: mmu.c:3914
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
Definition: mmu.c:679
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
Definition: mmu.c:3796
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
Definition: mmu.c:4021
static void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
Definition: mmu.h:157
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_load_pgd()

static void kvm_mmu_load_pgd ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 157 of file mmu.h.

158 {
159  u64 root_hpa = vcpu->arch.mmu->root.hpa;
160 
161  if (!VALID_PAGE(root_hpa))
162  return;
163 
164  static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
165  vcpu->arch.mmu->root_role.level);
166 }
Here is the caller graph for this function:

◆ kvm_mmu_max_gfn()

static gfn_t kvm_mmu_max_gfn ( void  )
inlinestatic

Definition at line 66 of file mmu.h.

67 {
68  /*
69  * Note that this uses the host MAXPHYADDR, not the guest's.
70  * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
71  * assuming KVM is running on bare metal, guest accesses beyond
72  * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
73  * (either EPT Violation/Misconfig or #NPF), and so KVM will never
74  * install a SPTE for such addresses. If KVM is running as a VM
75  * itself, on the other hand, it might see a MAXPHYADDR that is less
76  * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR
77  * disallows such SPTEs entirely and simplifies the TDP MMU.
78  */
79  int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
80 
81  return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
82 }
u8 __read_mostly shadow_phys_bits
Definition: spte.c:46
Here is the caller graph for this function:

◆ kvm_mmu_post_init_vm()

int kvm_mmu_post_init_vm ( struct kvm *  kvm)

Definition at line 7279 of file mmu.c.

7280 {
7281  int err;
7282 
7284  return 0;
7285 
7287  "kvm-nx-lpage-recovery",
7288  &kvm->arch.nx_huge_page_recovery_thread);
7289  if (!err)
7290  kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
7291 
7292  return err;
7293 }
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr)
Definition: kvm_main.c:6593
static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
Definition: mmu.c:7254
static bool nx_hugepage_mitigation_hard_disabled
Definition: mmu.c:62
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_pre_destroy_vm()

void kvm_mmu_pre_destroy_vm ( struct kvm *  kvm)

Definition at line 7295 of file mmu.c.

7296 {
7297  if (kvm->arch.nx_huge_page_recovery_thread)
7298  kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
7299 }
Here is the caller graph for this function:

◆ kvm_mmu_refresh_passthrough_bits()

static void kvm_mmu_refresh_passthrough_bits ( struct kvm_vcpu *  vcpu,
struct kvm_mmu *  mmu 
)
inlinestatic

Definition at line 168 of file mmu.h.

170 {
171  /*
172  * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
173  * @mmu's snapshot of CR0.WP and thus all related paging metadata may
174  * be stale. Refresh CR0.WP and the metadata on-demand when checking
175  * for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing
176  * nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does
177  * need to refresh nested_mmu, a.k.a. the walker used to translate L2
178  * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
179  */
180  if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
181  return;
182 
184 }
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
Definition: mmu.c:5284
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_reload()

static int kvm_mmu_reload ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 127 of file mmu.h.

128 {
129  if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
130  return 0;
131 
132  return kvm_mmu_load(vcpu);
133 }
int kvm_mmu_load(struct kvm_vcpu *vcpu)
Definition: mmu.c:5588
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_set_ept_masks()

void kvm_mmu_set_ept_masks ( bool  has_ad_bits,
bool  has_exec_only 
)

Definition at line 425 of file spte.c.

426 {
427  shadow_user_mask = VMX_EPT_READABLE_MASK;
428  shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
429  shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
430  shadow_nx_mask = 0ull;
431  shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
432  shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
433  /*
434  * EPT overrides the host MTRRs, and so KVM must program the desired
435  * memtype directly into the SPTEs. Note, this mask is just the mask
436  * of all bits that factor into the memtype, the actual memtype must be
437  * dynamically calculated, e.g. to ensure host MMIO is mapped UC.
438  */
439  shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
440  shadow_acc_track_mask = VMX_EPT_RWX_MASK;
443 
444  /*
445  * EPT Misconfigurations are generated if the value of bits 2:0
446  * of an EPT paging-structure entry is 110b (write/execute).
447  */
448  kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
449  VMX_EPT_RWX_MASK, 0);
450 }
u64 __read_mostly shadow_accessed_mask
Definition: spte.c:32
u64 __read_mostly shadow_acc_track_mask
Definition: spte.c:41
u64 __read_mostly shadow_host_writable_mask
Definition: spte.c:27
u64 __read_mostly shadow_dirty_mask
Definition: spte.c:33
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
Definition: spte.c:362
u64 __read_mostly shadow_user_mask
Definition: spte.c:31
u64 __read_mostly shadow_nx_mask
Definition: spte.c:29
u64 __read_mostly shadow_x_mask
Definition: spte.c:30
u64 __read_mostly shadow_mmu_writable_mask
Definition: spte.c:28
u64 __read_mostly shadow_present_mask
Definition: spte.c:37
#define EPT_SPTE_MMU_WRITABLE
Definition: spte.h:89
#define EPT_SPTE_HOST_WRITABLE
Definition: spte.h:88
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_set_me_spte_mask()

void kvm_mmu_set_me_spte_mask ( u64  me_value,
u64  me_mask 
)

Definition at line 414 of file spte.c.

415 {
416  /* shadow_me_value must be a subset of shadow_me_mask */
417  if (WARN_ON(me_value & ~me_mask))
418  me_value = me_mask = 0;
419 
420  shadow_me_value = me_value;
421  shadow_me_mask = me_mask;
422 }
u64 __read_mostly shadow_me_value
Definition: spte.c:39
u64 __read_mostly shadow_me_mask
Definition: spte.c:40
Here is the caller graph for this function:

◆ kvm_mmu_set_mmio_spte_mask()

void kvm_mmu_set_mmio_spte_mask ( u64  mmio_value,
u64  mmio_mask,
u64  access_mask 
)

Definition at line 362 of file spte.c.

363 {
364  BUG_ON((u64)(unsigned)access_mask != access_mask);
365  WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
366 
367  /*
368  * Reset to the original module param value to honor userspace's desire
369  * to (dis)allow MMIO caching. Update the param itself so that
370  * userspace can see whether or not KVM is actually using MMIO caching.
371  */
373  if (!enable_mmio_caching)
374  mmio_value = 0;
375 
376  /*
377  * The mask must contain only bits that are carved out specifically for
378  * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO
379  * generation.
380  */
381  if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK))
382  mmio_value = 0;
383 
384  /*
385  * Disable MMIO caching if the MMIO value collides with the bits that
386  * are used to hold the relocated GFN when the L1TF mitigation is
387  * enabled. This should never fire as there is no known hardware that
388  * can trigger this condition, e.g. SME/SEV CPUs that require a custom
389  * MMIO value are not susceptible to L1TF.
390  */
391  if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
393  mmio_value = 0;
394 
395  /*
396  * The masked MMIO value must obviously match itself and a removed SPTE
397  * must not get a false positive. Removed SPTEs and MMIO SPTEs should
398  * never collide as MMIO must set some RWX bits, and removed SPTEs must
399  * not set any RWX bits.
400  */
401  if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
402  WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
403  mmio_value = 0;
404 
405  if (!mmio_value)
406  enable_mmio_caching = false;
407 
408  shadow_mmio_value = mmio_value;
409  shadow_mmio_mask = mmio_mask;
410  shadow_mmio_access_mask = access_mask;
411 }
u64 __read_mostly shadow_nonpresent_or_rsvd_mask
Definition: spte.c:43
static bool __ro_after_init allow_mmio_caching
Definition: spte.c:23
u64 __read_mostly shadow_mmio_access_mask
Definition: spte.c:36
u64 __read_mostly shadow_mmio_mask
Definition: spte.c:35
bool __read_mostly enable_mmio_caching
Definition: spte.c:22
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask
Definition: spte.c:44
u64 __read_mostly shadow_mmio_value
Definition: spte.c:34
#define SPTE_MMIO_ALLOWED_MASK
Definition: spte.h:137
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN
Definition: spte.h:183
#define REMOVED_SPTE
Definition: spte.h:197
Here is the caller graph for this function:

◆ kvm_mmu_slot_lpages()

static unsigned long kvm_mmu_slot_lpages ( struct kvm_memory_slot *  slot,
int  level 
)
inlinestatic

Definition at line 300 of file mmu.h.

301 {
302  return __kvm_mmu_slot_lpages(slot, slot->npages, level);
303 }
static unsigned long __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages, int level)
Definition: mmu.h:292
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_sync_prev_roots()

void kvm_mmu_sync_prev_roots ( struct kvm_vcpu *  vcpu)

Definition at line 4062 of file mmu.c.

4063 {
4064  unsigned long roots_to_free = 0;
4065  int i;
4066 
4067  for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4068  if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4069  roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4070 
4071  /* sync prev_roots by simply freeing them */
4072  kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4073 }
static bool is_unsync_root(hpa_t root)
Definition: mmu.c:3986
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free)
Definition: mmu.c:3587
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_sync_roots()

void kvm_mmu_sync_roots ( struct kvm_vcpu *  vcpu)

Definition at line 4021 of file mmu.c.

4022 {
4023  int i;
4024  struct kvm_mmu_page *sp;
4025 
4026  if (vcpu->arch.mmu->root_role.direct)
4027  return;
4028 
4029  if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4030  return;
4031 
4033 
4034  if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4035  hpa_t root = vcpu->arch.mmu->root.hpa;
4036 
4037  if (!is_unsync_root(root))
4038  return;
4039 
4040  sp = root_to_sp(root);
4041 
4042  write_lock(&vcpu->kvm->mmu_lock);
4043  mmu_sync_children(vcpu, sp, true);
4044  write_unlock(&vcpu->kvm->mmu_lock);
4045  return;
4046  }
4047 
4048  write_lock(&vcpu->kvm->mmu_lock);
4049 
4050  for (i = 0; i < 4; ++i) {
4051  hpa_t root = vcpu->arch.mmu->pae_root[i];
4052 
4053  if (IS_VALID_PAE_ROOT(root)) {
4054  sp = spte_to_child_sp(root);
4055  mmu_sync_children(vcpu, sp, true);
4056  }
4057  }
4058 
4059  write_unlock(&vcpu->kvm->mmu_lock);
4060 }
static int mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *parent, bool can_yield)
Definition: mmu.c:2093
#define IS_VALID_PAE_ROOT(x)
Definition: mmu_internal.h:38
static struct kvm_mmu_page * root_to_sp(hpa_t root)
Definition: spte.h:240
static struct kvm_mmu_page * spte_to_child_sp(u64 spte)
Definition: spte.h:230
static void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
Definition: x86.h:247
#define MMIO_GVA_ANY
Definition: x86.h:245
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_track_write()

void kvm_mmu_track_write ( struct kvm_vcpu *  vcpu,
gpa_t  gpa,
const u8 *  new,
int  bytes 
)

Definition at line 5781 of file mmu.c.

5783 {
5784  gfn_t gfn = gpa >> PAGE_SHIFT;
5785  struct kvm_mmu_page *sp;
5786  LIST_HEAD(invalid_list);
5787  u64 entry, gentry, *spte;
5788  int npte;
5789  bool flush = false;
5790 
5791  /*
5792  * If we don't have indirect shadow pages, it means no page is
5793  * write-protected, so we can exit simply.
5794  */
5795  if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5796  return;
5797 
5798  write_lock(&vcpu->kvm->mmu_lock);
5799 
5800  gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5801 
5802  ++vcpu->kvm->stat.mmu_pte_write;
5803 
5804  for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
5805  if (detect_write_misaligned(sp, gpa, bytes) ||
5806  detect_write_flooding(sp)) {
5807  kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5808  ++vcpu->kvm->stat.mmu_flooded;
5809  continue;
5810  }
5811 
5812  spte = get_written_sptes(sp, gpa, &npte);
5813  if (!spte)
5814  continue;
5815 
5816  while (npte--) {
5817  entry = *spte;
5818  mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5819  if (gentry && sp->role.level != PG_LEVEL_4K)
5820  ++vcpu->kvm->stat.mmu_pde_zapped;
5821  if (is_shadow_present_pte(entry))
5822  flush = true;
5823  ++spte;
5824  }
5825  }
5826  kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5827  write_unlock(&vcpu->kvm->mmu_lock);
5828 }
LIST_HEAD(vm_list)
static bool detect_write_flooding(struct kvm_mmu_page *sp)
Definition: mmu.c:5712
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, struct list_head *invalid_list, bool remote_flush)
Definition: mmu.c:1997
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list)
Definition: mmu.c:2493
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)
Definition: mmu.c:1913
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, int bytes)
Definition: mmu.c:5729
static u64 * get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
Definition: mmu.c:5750
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, int *bytes)
Definition: mmu.c:5682
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list)
Definition: mmu.c:2634
static bool is_shadow_present_pte(u64 pte)
Definition: spte.h:258
union kvm_mmu_page_role role
Definition: mmu_internal.h:80
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_unload()

void kvm_mmu_unload ( struct kvm_vcpu *  vcpu)

Definition at line 5621 of file mmu.c.

5622 {
5623  struct kvm *kvm = vcpu->kvm;
5624 
5625  kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5626  WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5627  kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5628  WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5630 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_shadow_root_allocated()

static bool kvm_shadow_root_allocated ( struct kvm *  kvm)
inlinestatic

Definition at line 262 of file mmu.h.

263 {
264  /*
265  * Read shadow_root_allocated before related pointers. Hence, threads
266  * reading shadow_root_allocated in any lock context are guaranteed to
267  * see the pointers. Pairs with smp_store_release in
268  * mmu_first_shadow_root_alloc.
269  */
270  return smp_load_acquire(&kvm->arch.shadow_root_allocated);
271 }
Here is the caller graph for this function:

◆ kvm_translate_gpa()

static gpa_t kvm_translate_gpa ( struct kvm_vcpu *  vcpu,
struct kvm_mmu *  mmu,
gpa_t  gpa,
u64  access,
struct x86_exception exception 
)
inlinestatic

Definition at line 313 of file mmu.h.

317 {
318  if (mmu != &vcpu->arch.nested_mmu)
319  return gpa;
320  return translate_nested_gpa(vcpu, gpa, access, exception);
321 }
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, struct x86_exception *exception)
Definition: x86.c:7468
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_update_page_stats()

static void kvm_update_page_stats ( struct kvm *  kvm,
int  level,
int  count 
)
inlinestatic

Definition at line 305 of file mmu.h.

306 {
307  atomic64_add(count, &kvm->stat.pages[level - 1]);
308 }
Here is the caller graph for this function:

◆ kvm_zap_gfn_range()

void kvm_zap_gfn_range ( struct kvm *  kvm,
gfn_t  gfn_start,
gfn_t  gfn_end 
)

Definition at line 6373 of file mmu.c.

6374 {
6375  bool flush;
6376 
6377  if (WARN_ON_ONCE(gfn_end <= gfn_start))
6378  return;
6379 
6380  write_lock(&kvm->mmu_lock);
6381 
6382  kvm_mmu_invalidate_begin(kvm);
6383 
6384  kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6385 
6386  flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
6387 
6388  if (tdp_mmu_enabled)
6389  flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
6390 
6391  if (flush)
6392  kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
6393 
6394  kvm_mmu_invalidate_end(kvm);
6395 
6396  write_unlock(&kvm->mmu_lock);
6397 }
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
Definition: kvm_main.c:367
static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
Definition: mmu.c:6338
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
Definition: tdp_mmu.c:820
Here is the call graph for this function:
Here is the caller graph for this function:

◆ permission_fault()

static u8 permission_fault ( struct kvm_vcpu *  vcpu,
struct kvm_mmu *  mmu,
unsigned  pte_access,
unsigned  pte_pkey,
u64  access 
)
inlinestatic

Definition at line 194 of file mmu.h.

197 {
198  /* strip nested paging fault error codes */
199  unsigned int pfec = access;
200  unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
201 
202  /*
203  * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
204  * For implicit supervisor accesses, SMAP cannot be overridden.
205  *
206  * SMAP works on supervisor accesses only, and not_smap can
207  * be set or not set when user access with neither has any bearing
208  * on the result.
209  *
210  * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
211  * this bit will always be zero in pfec, but it will be one in index
212  * if SMAP checks are being disabled.
213  */
214  u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
215  bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
216  int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
217  u32 errcode = PFERR_PRESENT_MASK;
218  bool fault;
219 
221 
222  fault = (mmu->permissions[index] >> pte_access) & 1;
223 
224  WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
225  if (unlikely(mmu->pkru_mask)) {
226  u32 pkru_bits, offset;
227 
228  /*
229  * PKRU defines 32 bits, there are 16 domains and 2
230  * attribute bits per domain in pkru. pte_pkey is the
231  * index of the protection domain, so pte_pkey * 2 is
232  * is the index of the first bit for the domain.
233  */
234  pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
235 
236  /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
237  offset = (pfec & ~1) +
238  ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
239 
240  pkru_bits &= mmu->pkru_mask >> offset;
241  errcode |= -pkru_bits & PFERR_PK_MASK;
242  fault |= (pkru_bits != 0);
243  }
244 
245  return -(u32)fault & errcode;
246 }
#define PT_USER_SHIFT
Definition: mmu.h:12
static void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
Definition: mmu.h:168
#define PT_USER_MASK
Definition: mmu.h:16
Here is the call graph for this function:
Here is the caller graph for this function:

◆ rsvd_bits()

static __always_inline u64 rsvd_bits ( int  s,
int  e 
)
static

Definition at line 45 of file mmu.h.

46 {
47  BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
48 
49  if (__builtin_constant_p(e))
50  BUILD_BUG_ON(e > 63);
51  else
52  e &= 63;
53 
54  if (e < s)
55  return 0;
56 
57  return ((2ULL << (e - s)) - 1) << s;
58 }
Here is the caller graph for this function:

◆ translate_nested_gpa()

gpa_t translate_nested_gpa ( struct kvm_vcpu *  vcpu,
gpa_t  gpa,
u64  access,
struct x86_exception exception 
)

Definition at line 7468 of file x86.c.

7470 {
7471  struct kvm_mmu *mmu = vcpu->arch.mmu;
7472  gpa_t t_gpa;
7473 
7474  BUG_ON(!mmu_is_nested(vcpu));
7475 
7476  /* NPT walks are always user-walks */
7477  access |= PFERR_USER_MASK;
7478  t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7479 
7480  return t_gpa;
7481 }
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ enable_mmio_caching

bool __read_mostly enable_mmio_caching
extern

Definition at line 22 of file spte.c.

◆ shadow_phys_bits

u8 __read_mostly shadow_phys_bits
extern

Definition at line 46 of file spte.c.