KVM
Macros | Functions | Variables
spte.h File Reference
#include "mmu.h"
#include "mmu_internal.h"
Include dependency graph for spte.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define SPTE_MMU_PRESENT_MASK   BIT_ULL(11)
 
#define SPTE_TDP_AD_SHIFT   52
 
#define SPTE_TDP_AD_MASK   (3ULL << SPTE_TDP_AD_SHIFT)
 
#define SPTE_TDP_AD_ENABLED   (0ULL << SPTE_TDP_AD_SHIFT)
 
#define SPTE_TDP_AD_DISABLED   (1ULL << SPTE_TDP_AD_SHIFT)
 
#define SPTE_TDP_AD_WRPROT_ONLY   (2ULL << SPTE_TDP_AD_SHIFT)
 
#define SPTE_BASE_ADDR_MASK   (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
 
#define SPTE_PERM_MASK
 
#define ACC_EXEC_MASK   1
 
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
 
#define ACC_USER_MASK   PT_USER_MASK
 
#define ACC_ALL   (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
 
#define SPTE_EPT_READABLE_MASK   0x1ull
 
#define SPTE_EPT_EXECUTABLE_MASK   0x4ull
 
#define SPTE_LEVEL_BITS   9
 
#define SPTE_LEVEL_SHIFT(level)   __PT_LEVEL_SHIFT(level, SPTE_LEVEL_BITS)
 
#define SPTE_INDEX(address, level)   __PT_INDEX(address, level, SPTE_LEVEL_BITS)
 
#define SPTE_ENT_PER_PAGE   __PT_ENT_PER_PAGE(SPTE_LEVEL_BITS)
 
#define SHADOW_ACC_TRACK_SAVED_BITS_MASK
 
#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT   54
 
#define SHADOW_ACC_TRACK_SAVED_MASK
 
#define DEFAULT_SPTE_HOST_WRITABLE   BIT_ULL(9)
 
#define DEFAULT_SPTE_MMU_WRITABLE   BIT_ULL(10)
 
#define EPT_SPTE_HOST_WRITABLE   BIT_ULL(57)
 
#define EPT_SPTE_MMU_WRITABLE   BIT_ULL(58)
 
#define MMIO_SPTE_GEN_LOW_START   3
 
#define MMIO_SPTE_GEN_LOW_END   10
 
#define MMIO_SPTE_GEN_HIGH_START   52
 
#define MMIO_SPTE_GEN_HIGH_END   62
 
#define MMIO_SPTE_GEN_LOW_MASK
 
#define MMIO_SPTE_GEN_HIGH_MASK
 
#define SPTE_MMIO_ALLOWED_MASK   (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0))
 
#define MMIO_SPTE_GEN_LOW_BITS   (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
 
#define MMIO_SPTE_GEN_HIGH_BITS   (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
 
#define MMIO_SPTE_GEN_LOW_SHIFT   (MMIO_SPTE_GEN_LOW_START - 0)
 
#define MMIO_SPTE_GEN_HIGH_SHIFT   (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
 
#define MMIO_SPTE_GEN_MASK   GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
 
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN   5
 
#define REMOVED_SPTE   0x5a0ULL
 

Functions

static bool is_removed_spte (u64 spte)
 
static int spte_index (u64 *sptep)
 
static struct kvm_mmu_pageto_shadow_page (hpa_t shadow_page)
 
static struct kvm_mmu_pagespte_to_child_sp (u64 spte)
 
static struct kvm_mmu_pagesptep_to_sp (u64 *sptep)
 
static struct kvm_mmu_pageroot_to_sp (hpa_t root)
 
static bool is_mmio_spte (u64 spte)
 
static bool is_shadow_present_pte (u64 pte)
 
static bool kvm_ad_enabled (void)
 
static bool sp_ad_disabled (struct kvm_mmu_page *sp)
 
static bool spte_ad_enabled (u64 spte)
 
static bool spte_ad_need_write_protect (u64 spte)
 
static u64 spte_shadow_accessed_mask (u64 spte)
 
static u64 spte_shadow_dirty_mask (u64 spte)
 
static bool is_access_track_spte (u64 spte)
 
static bool is_large_pte (u64 pte)
 
static bool is_last_spte (u64 pte, int level)
 
static bool is_executable_pte (u64 spte)
 
static kvm_pfn_t spte_to_pfn (u64 pte)
 
static bool is_accessed_spte (u64 spte)
 
static bool is_dirty_spte (u64 spte)
 
static u64 get_rsvd_bits (struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
 
static bool __is_rsvd_bits_set (struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
 
static bool __is_bad_mt_xwr (struct rsvd_bits_validate *rsvd_check, u64 pte)
 
static __always_inline bool is_rsvd_spte (struct rsvd_bits_validate *rsvd_check, u64 spte, int level)
 
static bool is_writable_pte (unsigned long pte)
 
static void check_spte_writable_invariants (u64 spte)
 
static bool is_mmu_writable_spte (u64 spte)
 
static u64 get_mmio_spte_generation (u64 spte)
 
bool spte_has_volatile_bits (u64 spte)
 
bool make_spte (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte)
 
u64 make_huge_page_split_spte (struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, int index)
 
u64 make_nonleaf_spte (u64 *child_pt, bool ad_disabled)
 
u64 make_mmio_spte (struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
 
u64 mark_spte_for_access_track (u64 spte)
 
static u64 restore_acc_track_spte (u64 spte)
 
u64 kvm_mmu_changed_pte_notifier_make_spte (u64 old_spte, kvm_pfn_t new_pfn)
 
void __init kvm_mmu_spte_module_init (void)
 
void kvm_mmu_reset_all_pte_masks (void)
 

Variables

u64 __read_mostly shadow_host_writable_mask
 
u64 __read_mostly shadow_mmu_writable_mask
 
u64 __read_mostly shadow_nx_mask
 
u64 __read_mostly shadow_x_mask
 
u64 __read_mostly shadow_user_mask
 
u64 __read_mostly shadow_accessed_mask
 
u64 __read_mostly shadow_dirty_mask
 
u64 __read_mostly shadow_mmio_value
 
u64 __read_mostly shadow_mmio_mask
 
u64 __read_mostly shadow_mmio_access_mask
 
u64 __read_mostly shadow_present_mask
 
u64 __read_mostly shadow_memtype_mask
 
u64 __read_mostly shadow_me_value
 
u64 __read_mostly shadow_me_mask
 
u64 __read_mostly shadow_acc_track_mask
 
u64 __read_mostly shadow_nonpresent_or_rsvd_mask
 
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask
 

Macro Definition Documentation

◆ ACC_ALL

#define ACC_ALL   (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

Definition at line 49 of file spte.h.

◆ ACC_EXEC_MASK

#define ACC_EXEC_MASK   1

Definition at line 46 of file spte.h.

◆ ACC_USER_MASK

#define ACC_USER_MASK   PT_USER_MASK

Definition at line 48 of file spte.h.

◆ ACC_WRITE_MASK

#define ACC_WRITE_MASK   PT_WRITABLE_MASK

Definition at line 47 of file spte.h.

◆ DEFAULT_SPTE_HOST_WRITABLE

#define DEFAULT_SPTE_HOST_WRITABLE   BIT_ULL(9)

Definition at line 80 of file spte.h.

◆ DEFAULT_SPTE_MMU_WRITABLE

#define DEFAULT_SPTE_MMU_WRITABLE   BIT_ULL(10)

Definition at line 81 of file spte.h.

◆ EPT_SPTE_HOST_WRITABLE

#define EPT_SPTE_HOST_WRITABLE   BIT_ULL(57)

Definition at line 88 of file spte.h.

◆ EPT_SPTE_MMU_WRITABLE

#define EPT_SPTE_MMU_WRITABLE   BIT_ULL(58)

Definition at line 89 of file spte.h.

◆ MMIO_SPTE_GEN_HIGH_BITS

#define MMIO_SPTE_GEN_HIGH_BITS   (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)

Definition at line 142 of file spte.h.

◆ MMIO_SPTE_GEN_HIGH_END

#define MMIO_SPTE_GEN_HIGH_END   62

Definition at line 118 of file spte.h.

◆ MMIO_SPTE_GEN_HIGH_MASK

#define MMIO_SPTE_GEN_HIGH_MASK
Value:
GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
#define MMIO_SPTE_GEN_HIGH_END
Definition: spte.h:118
#define MMIO_SPTE_GEN_HIGH_START
Definition: spte.h:117

Definition at line 122 of file spte.h.

◆ MMIO_SPTE_GEN_HIGH_SHIFT

#define MMIO_SPTE_GEN_HIGH_SHIFT   (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)

Definition at line 148 of file spte.h.

◆ MMIO_SPTE_GEN_HIGH_START

#define MMIO_SPTE_GEN_HIGH_START   52

Definition at line 117 of file spte.h.

◆ MMIO_SPTE_GEN_LOW_BITS

#define MMIO_SPTE_GEN_LOW_BITS   (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)

Definition at line 141 of file spte.h.

◆ MMIO_SPTE_GEN_LOW_END

#define MMIO_SPTE_GEN_LOW_END   10

Definition at line 115 of file spte.h.

◆ MMIO_SPTE_GEN_LOW_MASK

#define MMIO_SPTE_GEN_LOW_MASK
Value:
GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
#define MMIO_SPTE_GEN_LOW_START
Definition: spte.h:114
#define MMIO_SPTE_GEN_LOW_END
Definition: spte.h:115

Definition at line 120 of file spte.h.

◆ MMIO_SPTE_GEN_LOW_SHIFT

#define MMIO_SPTE_GEN_LOW_SHIFT   (MMIO_SPTE_GEN_LOW_START - 0)

Definition at line 147 of file spte.h.

◆ MMIO_SPTE_GEN_LOW_START

#define MMIO_SPTE_GEN_LOW_START   3

Definition at line 114 of file spte.h.

◆ MMIO_SPTE_GEN_MASK

#define MMIO_SPTE_GEN_MASK   GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)

Definition at line 150 of file spte.h.

◆ REMOVED_SPTE

#define REMOVED_SPTE   0x5a0ULL

Definition at line 197 of file spte.h.

◆ SHADOW_ACC_TRACK_SAVED_BITS_MASK

#define SHADOW_ACC_TRACK_SAVED_BITS_MASK
Value:
SPTE_EPT_EXECUTABLE_MASK)
#define SPTE_EPT_READABLE_MASK
Definition: spte.h:52

Definition at line 67 of file spte.h.

◆ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT

#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT   54

Definition at line 69 of file spte.h.

◆ SHADOW_ACC_TRACK_SAVED_MASK

#define SHADOW_ACC_TRACK_SAVED_MASK
Value:
SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
#define SHADOW_ACC_TRACK_SAVED_BITS_MASK
Definition: spte.h:67

Definition at line 70 of file spte.h.

◆ SHADOW_NONPRESENT_OR_RSVD_MASK_LEN

#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN   5

Definition at line 183 of file spte.h.

◆ SPTE_BASE_ADDR_MASK

#define SPTE_BASE_ADDR_MASK   (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))

Definition at line 40 of file spte.h.

◆ SPTE_ENT_PER_PAGE

#define SPTE_ENT_PER_PAGE   __PT_ENT_PER_PAGE(SPTE_LEVEL_BITS)

Definition at line 58 of file spte.h.

◆ SPTE_EPT_EXECUTABLE_MASK

#define SPTE_EPT_EXECUTABLE_MASK   0x4ull

Definition at line 53 of file spte.h.

◆ SPTE_EPT_READABLE_MASK

#define SPTE_EPT_READABLE_MASK   0x1ull

Definition at line 52 of file spte.h.

◆ SPTE_INDEX

#define SPTE_INDEX (   address,
  level 
)    __PT_INDEX(address, level, SPTE_LEVEL_BITS)

Definition at line 57 of file spte.h.

◆ SPTE_LEVEL_BITS

#define SPTE_LEVEL_BITS   9

Definition at line 55 of file spte.h.

◆ SPTE_LEVEL_SHIFT

#define SPTE_LEVEL_SHIFT (   level)    __PT_LEVEL_SHIFT(level, SPTE_LEVEL_BITS)

Definition at line 56 of file spte.h.

◆ SPTE_MMIO_ALLOWED_MASK

#define SPTE_MMIO_ALLOWED_MASK   (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0))

Definition at line 137 of file spte.h.

◆ SPTE_MMU_PRESENT_MASK

#define SPTE_MMU_PRESENT_MASK   BIT_ULL(11)

Definition at line 16 of file spte.h.

◆ SPTE_PERM_MASK

#define SPTE_PERM_MASK
Value:
#define PT_PRESENT_MASK
Definition: mmu.h:14
#define PT_WRITABLE_MASK
Definition: mmu.h:15
u64 __read_mostly shadow_me_mask
Definition: spte.c:40
u64 __read_mostly shadow_user_mask
Definition: spte.c:31
u64 __read_mostly shadow_nx_mask
Definition: spte.c:29
u64 __read_mostly shadow_x_mask
Definition: spte.c:30

Definition at line 43 of file spte.h.

◆ SPTE_TDP_AD_DISABLED

#define SPTE_TDP_AD_DISABLED   (1ULL << SPTE_TDP_AD_SHIFT)

Definition at line 33 of file spte.h.

◆ SPTE_TDP_AD_ENABLED

#define SPTE_TDP_AD_ENABLED   (0ULL << SPTE_TDP_AD_SHIFT)

Definition at line 32 of file spte.h.

◆ SPTE_TDP_AD_MASK

#define SPTE_TDP_AD_MASK   (3ULL << SPTE_TDP_AD_SHIFT)

Definition at line 31 of file spte.h.

◆ SPTE_TDP_AD_SHIFT

#define SPTE_TDP_AD_SHIFT   52

Definition at line 30 of file spte.h.

◆ SPTE_TDP_AD_WRPROT_ONLY

#define SPTE_TDP_AD_WRPROT_ONLY   (2ULL << SPTE_TDP_AD_SHIFT)

Definition at line 34 of file spte.h.

Function Documentation

◆ __is_bad_mt_xwr()

static bool __is_bad_mt_xwr ( struct rsvd_bits_validate *  rsvd_check,
u64  pte 
)
inlinestatic

Definition at line 362 of file spte.h.

364 {
365  return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
366 }
Here is the caller graph for this function:

◆ __is_rsvd_bits_set()

static bool __is_rsvd_bits_set ( struct rsvd_bits_validate *  rsvd_check,
u64  pte,
int  level 
)
inlinestatic

Definition at line 356 of file spte.h.

358 {
359  return pte & get_rsvd_bits(rsvd_check, pte, level);
360 }
static u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
Definition: spte.h:348
Here is the call graph for this function:
Here is the caller graph for this function:

◆ check_spte_writable_invariants()

static void check_spte_writable_invariants ( u64  spte)
inlinestatic

Definition at line 447 of file spte.h.

448 {
449  if (spte & shadow_mmu_writable_mask)
450  WARN_ONCE(!(spte & shadow_host_writable_mask),
451  KBUILD_MODNAME ": MMU-writable SPTE is not Host-writable: %llx",
452  spte);
453  else
454  WARN_ONCE(is_writable_pte(spte),
455  KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx", spte);
456 }
u64 __read_mostly shadow_host_writable_mask
Definition: spte.c:27
static bool is_writable_pte(unsigned long pte)
Definition: spte.h:441
u64 __read_mostly shadow_mmu_writable_mask
Definition: spte.c:28
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_mmio_spte_generation()

static u64 get_mmio_spte_generation ( u64  spte)
inlinestatic

Definition at line 463 of file spte.h.

464 {
465  u64 gen;
466 
469  return gen;
470 }
#define MMIO_SPTE_GEN_HIGH_SHIFT
Definition: spte.h:148
#define MMIO_SPTE_GEN_HIGH_MASK
Definition: spte.h:122
#define MMIO_SPTE_GEN_LOW_SHIFT
Definition: spte.h:147
#define MMIO_SPTE_GEN_LOW_MASK
Definition: spte.h:120
Here is the caller graph for this function:

◆ get_rsvd_bits()

static u64 get_rsvd_bits ( struct rsvd_bits_validate *  rsvd_check,
u64  pte,
int  level 
)
inlinestatic

Definition at line 348 of file spte.h.

350 {
351  int bit7 = (pte >> 7) & 1;
352 
353  return rsvd_check->rsvd_bits_mask[bit7][level-1];
354 }
Here is the caller graph for this function:

◆ is_access_track_spte()

static bool is_access_track_spte ( u64  spte)
inlinestatic

Definition at line 308 of file spte.h.

309 {
310  return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
311 }
u64 __read_mostly shadow_acc_track_mask
Definition: spte.c:41
static bool spte_ad_enabled(u64 spte)
Definition: spte.h:279
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_accessed_spte()

static bool is_accessed_spte ( u64  spte)
inlinestatic

Definition at line 333 of file spte.h.

334 {
335  u64 accessed_mask = spte_shadow_accessed_mask(spte);
336 
337  return accessed_mask ? spte & accessed_mask
338  : !is_access_track_spte(spte);
339 }
static bool is_access_track_spte(u64 spte)
Definition: spte.h:308
static u64 spte_shadow_accessed_mask(u64 spte)
Definition: spte.h:296
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_dirty_spte()

static bool is_dirty_spte ( u64  spte)
inlinestatic

Definition at line 341 of file spte.h.

342 {
343  u64 dirty_mask = spte_shadow_dirty_mask(spte);
344 
345  return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
346 }
static u64 spte_shadow_dirty_mask(u64 spte)
Definition: spte.h:302
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_executable_pte()

static bool is_executable_pte ( u64  spte)
inlinestatic

Definition at line 323 of file spte.h.

324 {
325  return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
326 }
Here is the caller graph for this function:

◆ is_large_pte()

static bool is_large_pte ( u64  pte)
inlinestatic

Definition at line 313 of file spte.h.

314 {
315  return pte & PT_PAGE_SIZE_MASK;
316 }
#define PT_PAGE_SIZE_MASK
Definition: mmu.h:24
Here is the caller graph for this function:

◆ is_last_spte()

static bool is_last_spte ( u64  pte,
int  level 
)
inlinestatic

Definition at line 318 of file spte.h.

319 {
320  return (level == PG_LEVEL_4K) || is_large_pte(pte);
321 }
static bool is_large_pte(u64 pte)
Definition: spte.h:313
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_mmio_spte()

static bool is_mmio_spte ( u64  spte)
inlinestatic

Definition at line 252 of file spte.h.

253 {
254  return (spte & shadow_mmio_mask) == shadow_mmio_value &&
255  likely(enable_mmio_caching);
256 }
bool __read_mostly enable_mmio_caching
Definition: spte.c:22
u64 __read_mostly shadow_mmio_mask
Definition: spte.c:35
u64 __read_mostly shadow_mmio_value
Definition: spte.c:34
Here is the caller graph for this function:

◆ is_mmu_writable_spte()

static bool is_mmu_writable_spte ( u64  spte)
inlinestatic

Definition at line 458 of file spte.h.

459 {
460  return spte & shadow_mmu_writable_mask;
461 }
Here is the caller graph for this function:

◆ is_removed_spte()

static bool is_removed_spte ( u64  spte)
inlinestatic

Definition at line 202 of file spte.h.

203 {
204  return spte == REMOVED_SPTE;
205 }
#define REMOVED_SPTE
Definition: spte.h:197
Here is the caller graph for this function:

◆ is_rsvd_spte()

static __always_inline bool is_rsvd_spte ( struct rsvd_bits_validate *  rsvd_check,
u64  spte,
int  level 
)
static

Definition at line 368 of file spte.h.

370 {
371  return __is_bad_mt_xwr(rsvd_check, spte) ||
372  __is_rsvd_bits_set(rsvd_check, spte, level);
373 }
static bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
Definition: spte.h:356
static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
Definition: spte.h:362
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_shadow_present_pte()

static bool is_shadow_present_pte ( u64  pte)
inlinestatic

Definition at line 258 of file spte.h.

259 {
260  return !!(pte & SPTE_MMU_PRESENT_MASK);
261 }
#define SPTE_MMU_PRESENT_MASK
Definition: spte.h:16
Here is the caller graph for this function:

◆ is_writable_pte()

static bool is_writable_pte ( unsigned long  pte)
inlinestatic

Definition at line 441 of file spte.h.

442 {
443  return pte & PT_WRITABLE_MASK;
444 }
Here is the caller graph for this function:

◆ kvm_ad_enabled()

static bool kvm_ad_enabled ( void  )
inlinestatic

Definition at line 269 of file spte.h.

270 {
271  return !!shadow_accessed_mask;
272 }
u64 __read_mostly shadow_accessed_mask
Definition: spte.c:32
Here is the caller graph for this function:

◆ kvm_mmu_changed_pte_notifier_make_spte()

u64 kvm_mmu_changed_pte_notifier_make_spte ( u64  old_spte,
kvm_pfn_t  new_pfn 
)

Definition at line 325 of file spte.c.

326 {
327  u64 new_spte;
328 
329  new_spte = old_spte & ~SPTE_BASE_ADDR_MASK;
330  new_spte |= (u64)new_pfn << PAGE_SHIFT;
331 
332  new_spte &= ~PT_WRITABLE_MASK;
333  new_spte &= ~shadow_host_writable_mask;
334  new_spte &= ~shadow_mmu_writable_mask;
335 
336  new_spte = mark_spte_for_access_track(new_spte);
337 
338  return new_spte;
339 }
u64 __read_mostly shadow_host_writable_mask
Definition: spte.c:27
u64 mark_spte_for_access_track(u64 spte)
Definition: spte.c:341
u64 __read_mostly shadow_mmu_writable_mask
Definition: spte.c:28
#define SPTE_BASE_ADDR_MASK
Definition: spte.h:40
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_reset_all_pte_masks()

void kvm_mmu_reset_all_pte_masks ( void  )

Definition at line 453 of file spte.c.

454 {
455  u8 low_phys_bits;
456  u64 mask;
457 
459 
460  /*
461  * If the CPU has 46 or less physical address bits, then set an
462  * appropriate mask to guard against L1TF attacks. Otherwise, it is
463  * assumed that the CPU is not vulnerable to L1TF.
464  *
465  * Some Intel CPUs address the L1 cache using more PA bits than are
466  * reported by CPUID. Use the PA width of the L1 cache when possible
467  * to achieve more effective mitigation, e.g. if system RAM overlaps
468  * the most significant bits of legal physical address space.
469  */
471  low_phys_bits = boot_cpu_data.x86_phys_bits;
472  if (boot_cpu_has_bug(X86_BUG_L1TF) &&
473  !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
475  low_phys_bits = boot_cpu_data.x86_cache_bits
478  rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
479  }
480 
482  GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
483 
488  shadow_x_mask = 0;
490 
491  /*
492  * For shadow paging and NPT, KVM uses PAT entry '0' to encode WB
493  * memtype in the SPTEs, i.e. relies on host MTRRs to provide the
494  * correct memtype (WB is the "weakest" memtype).
495  */
498  shadow_me_mask = 0;
499  shadow_me_value = 0;
500 
503 
504  /*
505  * Set a reserved PA bit in MMIO SPTEs to generate page faults with
506  * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
507  * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
508  * 52-bit physical addresses then there are no reserved PA bits in the
509  * PTEs and so the reserved PA approach must be disabled.
510  */
511  if (shadow_phys_bits < 52)
512  mask = BIT_ULL(51) | PT_PRESENT_MASK;
513  else
514  mask = 0;
515 
517 }
static __always_inline u64 rsvd_bits(int s, int e)
Definition: mmu.h:45
#define PT_DIRTY_MASK
Definition: mmu.h:22
#define PT_ACCESSED_MASK
Definition: mmu.h:20
static u8 kvm_get_shadow_phys_bits(void)
Definition: mmu.h:84
#define PT64_NX_MASK
Definition: mmu.h:28
#define PT_USER_MASK
Definition: mmu.h:16
u64 __read_mostly shadow_accessed_mask
Definition: spte.c:32
u64 __read_mostly shadow_me_value
Definition: spte.c:39
u64 __read_mostly shadow_acc_track_mask
Definition: spte.c:41
u64 __read_mostly shadow_nonpresent_or_rsvd_mask
Definition: spte.c:43
u8 __read_mostly shadow_phys_bits
Definition: spte.c:46
u64 __read_mostly shadow_dirty_mask
Definition: spte.c:33
u64 __read_mostly shadow_memtype_mask
Definition: spte.c:38
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
Definition: spte.c:362
u64 __read_mostly shadow_me_mask
Definition: spte.c:40
u64 __read_mostly shadow_user_mask
Definition: spte.c:31
u64 __read_mostly shadow_nx_mask
Definition: spte.c:29
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask
Definition: spte.c:44
u64 __read_mostly shadow_x_mask
Definition: spte.c:30
u64 __read_mostly shadow_present_mask
Definition: spte.c:37
#define ACC_USER_MASK
Definition: spte.h:48
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN
Definition: spte.h:183
#define ACC_WRITE_MASK
Definition: spte.h:47
#define DEFAULT_SPTE_HOST_WRITABLE
Definition: spte.h:80
#define DEFAULT_SPTE_MMU_WRITABLE
Definition: spte.h:81
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mmu_spte_module_init()

void __init kvm_mmu_spte_module_init ( void  )

Definition at line 48 of file spte.c.

49 {
50  /*
51  * Snapshot userspace's desire to allow MMIO caching. Whether or not
52  * KVM can actually enable MMIO caching depends on vendor-specific
53  * hardware capabilities and other module params that can't be resolved
54  * until the vendor module is loaded, i.e. enable_mmio_caching can and
55  * will change when the vendor module is (re)loaded.
56  */
58 }
static bool __ro_after_init allow_mmio_caching
Definition: spte.c:23
Here is the caller graph for this function:

◆ make_huge_page_split_spte()

u64 make_huge_page_split_spte ( struct kvm *  kvm,
u64  huge_spte,
union kvm_mmu_page_role  role,
int  index 
)

Definition at line 274 of file spte.c.

276 {
277  u64 child_spte;
278 
279  if (WARN_ON_ONCE(!is_shadow_present_pte(huge_spte)))
280  return 0;
281 
282  if (WARN_ON_ONCE(!is_large_pte(huge_spte)))
283  return 0;
284 
285  child_spte = huge_spte;
286 
287  /*
288  * The child_spte already has the base address of the huge page being
289  * split. So we just have to OR in the offset to the page at the next
290  * lower level for the given index.
291  */
292  child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT;
293 
294  if (role.level == PG_LEVEL_4K) {
295  child_spte &= ~PT_PAGE_SIZE_MASK;
296 
297  /*
298  * When splitting to a 4K page where execution is allowed, mark
299  * the page executable as the NX hugepage mitigation no longer
300  * applies.
301  */
302  if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm))
303  child_spte = make_spte_executable(child_spte);
304  }
305 
306  return child_spte;
307 }
static bool is_nx_huge_page_enabled(struct kvm *kvm)
Definition: mmu_internal.h:185
static u64 make_spte_executable(u64 spte)
Definition: spte.c:251
static bool is_shadow_present_pte(u64 pte)
Definition: spte.h:258
#define ACC_EXEC_MASK
Definition: spte.h:46
Here is the call graph for this function:
Here is the caller graph for this function:

◆ make_mmio_spte()

u64 make_mmio_spte ( struct kvm_vcpu *  vcpu,
u64  gfn,
unsigned int  access 
)

Definition at line 71 of file spte.c.

72 {
73  u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
74  u64 spte = generation_mmio_spte_mask(gen);
75  u64 gpa = gfn << PAGE_SHIFT;
76 
77  WARN_ON_ONCE(!shadow_mmio_value);
78 
79  access &= shadow_mmio_access_mask;
80  spte |= shadow_mmio_value | access;
81  spte |= gpa | shadow_nonpresent_or_rsvd_mask;
82  spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
84 
85  return spte;
86 }
u64 __read_mostly shadow_mmio_access_mask
Definition: spte.c:36
static u64 generation_mmio_spte_mask(u64 gen)
Definition: spte.c:60
u64 __read_mostly shadow_mmio_value
Definition: spte.c:34
#define MMIO_SPTE_GEN_MASK
Definition: spte.h:150
Here is the call graph for this function:
Here is the caller graph for this function:

◆ make_nonleaf_spte()

u64 make_nonleaf_spte ( u64 *  child_pt,
bool  ad_disabled 
)

Definition at line 310 of file spte.c.

311 {
312  u64 spte = SPTE_MMU_PRESENT_MASK;
313 
314  spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
316 
317  if (ad_disabled)
318  spte |= SPTE_TDP_AD_DISABLED;
319  else
320  spte |= shadow_accessed_mask;
321 
322  return spte;
323 }
#define SPTE_TDP_AD_DISABLED
Definition: spte.h:33
Here is the caller graph for this function:

◆ make_spte()

bool make_spte ( struct kvm_vcpu *  vcpu,
struct kvm_mmu_page sp,
const struct kvm_memory_slot *  slot,
unsigned int  pte_access,
gfn_t  gfn,
kvm_pfn_t  pfn,
u64  old_spte,
bool  prefetch,
bool  can_unsync,
bool  host_writable,
u64 *  new_spte 
)

Definition at line 137 of file spte.c.

142 {
143  int level = sp->role.level;
144  u64 spte = SPTE_MMU_PRESENT_MASK;
145  bool wrprot = false;
146 
147  WARN_ON_ONCE(!pte_access && !shadow_present_mask);
148 
149  if (sp->role.ad_disabled)
150  spte |= SPTE_TDP_AD_DISABLED;
152  spte |= SPTE_TDP_AD_WRPROT_ONLY;
153 
154  /*
155  * For the EPT case, shadow_present_mask is 0 if hardware
156  * supports exec-only page table entries. In that case,
157  * ACC_USER_MASK and shadow_user_mask are used to represent
158  * read access. See FNAME(gpte_access) in paging_tmpl.h.
159  */
160  spte |= shadow_present_mask;
161  if (!prefetch)
162  spte |= spte_shadow_accessed_mask(spte);
163 
164  /*
165  * For simplicity, enforce the NX huge page mitigation even if not
166  * strictly necessary. KVM could ignore the mitigation if paging is
167  * disabled in the guest, as the guest doesn't have any page tables to
168  * abuse. But to safely ignore the mitigation, KVM would have to
169  * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG
170  * is toggled on, and that's a net negative for performance when TDP is
171  * enabled. When TDP is disabled, KVM will always switch to a new MMU
172  * when CR0.PG is toggled, but leveraging that to ignore the mitigation
173  * would tie make_spte() further to vCPU/MMU state, and add complexity
174  * just to optimize a mode that is anything but performance critical.
175  */
176  if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
177  is_nx_huge_page_enabled(vcpu->kvm)) {
178  pte_access &= ~ACC_EXEC_MASK;
179  }
180 
181  if (pte_access & ACC_EXEC_MASK)
182  spte |= shadow_x_mask;
183  else
184  spte |= shadow_nx_mask;
185 
186  if (pte_access & ACC_USER_MASK)
187  spte |= shadow_user_mask;
188 
189  if (level > PG_LEVEL_4K)
190  spte |= PT_PAGE_SIZE_MASK;
191 
193  spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
194  kvm_is_mmio_pfn(pfn));
195  if (host_writable)
197  else
198  pte_access &= ~ACC_WRITE_MASK;
199 
200  if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
201  spte |= shadow_me_value;
202 
203  spte |= (u64)pfn << PAGE_SHIFT;
204 
205  if (pte_access & ACC_WRITE_MASK) {
207 
208  /*
209  * Optimization: for pte sync, if spte was writable the hash
210  * lookup is unnecessary (and expensive). Write protection
211  * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
212  * Same reasoning can be applied to dirty page accounting.
213  */
214  if (is_writable_pte(old_spte))
215  goto out;
216 
217  /*
218  * Unsync shadow pages that are reachable by the new, writable
219  * SPTE. Write-protect the SPTE if the page can't be unsync'd,
220  * e.g. it's write-tracked (upper-level SPs) or has one or more
221  * shadow pages and unsync'ing pages is not allowed.
222  */
223  if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
224  wrprot = true;
225  pte_access &= ~ACC_WRITE_MASK;
227  }
228  }
229 
230  if (pte_access & ACC_WRITE_MASK)
231  spte |= spte_shadow_dirty_mask(spte);
232 
233 out:
234  if (prefetch)
235  spte = mark_spte_for_access_track(spte);
236 
237  WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
238  "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
239  get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
240 
241  if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
242  /* Enforced by kvm_mmu_hugepage_adjust. */
243  WARN_ON_ONCE(level > PG_LEVEL_4K);
244  mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
245  }
246 
247  *new_spte = spte;
248  return wrprot;
249 }
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
Definition: kvm_main.c:3635
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch)
Definition: mmu.c:2805
static bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
Definition: mmu_internal.h:148
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
Definition: spte.c:88
#define SPTE_TDP_AD_WRPROT_ONLY
Definition: spte.h:34
static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level)
Definition: spte.h:368
union kvm_mmu_page_role role
Definition: mmu_internal.h:80
Here is the call graph for this function:
Here is the caller graph for this function:

◆ mark_spte_for_access_track()

u64 mark_spte_for_access_track ( u64  spte)

Definition at line 341 of file spte.c.

342 {
343  if (spte_ad_enabled(spte))
344  return spte & ~shadow_accessed_mask;
345 
346  if (is_access_track_spte(spte))
347  return spte;
348 
350 
351  WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
353  "Access Tracking saved bit locations are not zero\n");
354 
355  spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
357  spte &= ~shadow_acc_track_mask;
358 
359  return spte;
360 }
#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT
Definition: spte.h:69
static void check_spte_writable_invariants(u64 spte)
Definition: spte.h:447
Here is the call graph for this function:
Here is the caller graph for this function:

◆ restore_acc_track_spte()

static u64 restore_acc_track_spte ( u64  spte)
inlinestatic

Definition at line 486 of file spte.h.

487 {
488  u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
490 
491  spte &= ~shadow_acc_track_mask;
494  spte |= saved_bits;
495 
496  return spte;
497 }
Here is the caller graph for this function:

◆ root_to_sp()

static struct kvm_mmu_page* root_to_sp ( hpa_t  root)
inlinestatic

Definition at line 240 of file spte.h.

241 {
242  if (kvm_mmu_is_dummy_root(root))
243  return NULL;
244 
245  /*
246  * The "root" may be a special root, e.g. a PAE entry, treat it as a
247  * SPTE to ensure any non-PA bits are dropped.
248  */
249  return spte_to_child_sp(root);
250 }
static bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
Definition: mmu_internal.h:45
static struct kvm_mmu_page * spte_to_child_sp(u64 spte)
Definition: spte.h:230
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sp_ad_disabled()

static bool sp_ad_disabled ( struct kvm_mmu_page sp)
inlinestatic

Definition at line 274 of file spte.h.

275 {
276  return sp->role.ad_disabled;
277 }
Here is the caller graph for this function:

◆ spte_ad_enabled()

static bool spte_ad_enabled ( u64  spte)
inlinestatic

Definition at line 279 of file spte.h.

280 {
282  return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED;
283 }
#define KVM_MMU_WARN_ON(x)
Definition: mmu_internal.h:12
#define SPTE_TDP_AD_MASK
Definition: spte.h:31
Here is the call graph for this function:
Here is the caller graph for this function:

◆ spte_ad_need_write_protect()

static bool spte_ad_need_write_protect ( u64  spte)
inlinestatic

Definition at line 285 of file spte.h.

286 {
288  /*
289  * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0',
290  * and non-TDP SPTEs will never set these bits. Optimize for 64-bit
291  * TDP and do the A/D type check unconditionally.
292  */
293  return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED;
294 }
#define SPTE_TDP_AD_ENABLED
Definition: spte.h:32
Here is the call graph for this function:
Here is the caller graph for this function:

◆ spte_has_volatile_bits()

bool spte_has_volatile_bits ( u64  spte)

Definition at line 114 of file spte.c.

115 {
116  /*
117  * Always atomically update spte if it can be updated
118  * out of mmu-lock, it can ensure dirty bit is not lost,
119  * also, it can help us to get a stable is_writable_pte()
120  * to ensure tlb flush is not missed.
121  */
122  if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
123  return true;
124 
125  if (is_access_track_spte(spte))
126  return true;
127 
128  if (spte_ad_enabled(spte)) {
129  if (!(spte & shadow_accessed_mask) ||
130  (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
131  return true;
132  }
133 
134  return false;
135 }
static bool is_mmu_writable_spte(u64 spte)
Definition: spte.h:458
Here is the call graph for this function:
Here is the caller graph for this function:

◆ spte_index()

static int spte_index ( u64 *  sptep)
inlinestatic

Definition at line 208 of file spte.h.

209 {
210  return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1);
211 }
#define SPTE_ENT_PER_PAGE
Definition: spte.h:58
Here is the caller graph for this function:

◆ spte_shadow_accessed_mask()

static u64 spte_shadow_accessed_mask ( u64  spte)
inlinestatic

Definition at line 296 of file spte.h.

297 {
299  return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
300 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ spte_shadow_dirty_mask()

static u64 spte_shadow_dirty_mask ( u64  spte)
inlinestatic

Definition at line 302 of file spte.h.

303 {
305  return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
306 }
u64 __read_mostly shadow_dirty_mask
Definition: spte.c:33
Here is the call graph for this function:
Here is the caller graph for this function:

◆ spte_to_child_sp()

static struct kvm_mmu_page* spte_to_child_sp ( u64  spte)
inlinestatic

Definition at line 230 of file spte.h.

231 {
232  return to_shadow_page(spte & SPTE_BASE_ADDR_MASK);
233 }
static struct kvm_mmu_page * to_shadow_page(hpa_t shadow_page)
Definition: spte.h:223
Here is the call graph for this function:
Here is the caller graph for this function:

◆ spte_to_pfn()

static kvm_pfn_t spte_to_pfn ( u64  pte)
inlinestatic

Definition at line 328 of file spte.h.

329 {
330  return (pte & SPTE_BASE_ADDR_MASK) >> PAGE_SHIFT;
331 }
Here is the caller graph for this function:

◆ sptep_to_sp()

static struct kvm_mmu_page* sptep_to_sp ( u64 *  sptep)
inlinestatic

Definition at line 235 of file spte.h.

236 {
237  return to_shadow_page(__pa(sptep));
238 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ to_shadow_page()

static struct kvm_mmu_page* to_shadow_page ( hpa_t  shadow_page)
inlinestatic

Definition at line 223 of file spte.h.

224 {
225  struct page *page = pfn_to_page((shadow_page) >> PAGE_SHIFT);
226 
227  return (struct kvm_mmu_page *)page_private(page);
228 }
Here is the caller graph for this function:

Variable Documentation

◆ shadow_acc_track_mask

u64 __read_mostly shadow_acc_track_mask
extern

Definition at line 41 of file spte.c.

◆ shadow_accessed_mask

u64 __read_mostly shadow_accessed_mask
extern

Definition at line 32 of file spte.c.

◆ shadow_dirty_mask

u64 __read_mostly shadow_dirty_mask
extern

Definition at line 33 of file spte.c.

◆ shadow_host_writable_mask

u64 __read_mostly shadow_host_writable_mask
extern

Definition at line 27 of file spte.c.

◆ shadow_me_mask

u64 __read_mostly shadow_me_mask
extern

Definition at line 40 of file spte.c.

◆ shadow_me_value

u64 __read_mostly shadow_me_value
extern

Definition at line 39 of file spte.c.

◆ shadow_memtype_mask

u64 __read_mostly shadow_memtype_mask
extern

Definition at line 38 of file spte.c.

◆ shadow_mmio_access_mask

u64 __read_mostly shadow_mmio_access_mask
extern

Definition at line 36 of file spte.c.

◆ shadow_mmio_mask

u64 __read_mostly shadow_mmio_mask
extern

Definition at line 35 of file spte.c.

◆ shadow_mmio_value

u64 __read_mostly shadow_mmio_value
extern

Definition at line 34 of file spte.c.

◆ shadow_mmu_writable_mask

u64 __read_mostly shadow_mmu_writable_mask
extern

Definition at line 28 of file spte.c.

◆ shadow_nonpresent_or_rsvd_lower_gfn_mask

u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask
extern

Definition at line 44 of file spte.c.

◆ shadow_nonpresent_or_rsvd_mask

u64 __read_mostly shadow_nonpresent_or_rsvd_mask
extern

Definition at line 43 of file spte.c.

◆ shadow_nx_mask

u64 __read_mostly shadow_nx_mask
extern

Definition at line 29 of file spte.c.

◆ shadow_present_mask

u64 __read_mostly shadow_present_mask
extern

Definition at line 37 of file spte.c.

◆ shadow_user_mask

u64 __read_mostly shadow_user_mask
extern

Definition at line 31 of file spte.c.

◆ shadow_x_mask

u64 __read_mostly shadow_x_mask
extern

Definition at line 30 of file spte.c.