10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kvm_host.h>
18 #include <asm/e820/api.h>
19 #include <asm/memtype.h>
75 u64 gpa = gfn << PAGE_SHIFT;
91 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
102 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
104 return !e820__mapped_raw_any(pfn_to_hpa(pfn),
105 pfn_to_hpa(pfn + 1) - 1,
138 const struct kvm_memory_slot *slot,
139 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
140 u64 old_spte,
bool prefetch,
bool can_unsync,
141 bool host_writable, u64 *new_spte)
143 int level = sp->
role.level;
149 if (sp->
role.ad_disabled)
189 if (level > PG_LEVEL_4K)
193 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
203 spte |= (u64)pfn << PAGE_SHIFT;
237 WARN_ONCE(
is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
238 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
239 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
243 WARN_ON_ONCE(level > PG_LEVEL_4K);
285 child_spte = huge_spte;
292 child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT;
294 if (role.level == PG_LEVEL_4K) {
330 new_spte |= (u64)new_pfn << PAGE_SHIFT;
353 "Access Tracking saved bit locations are not zero\n");
364 BUG_ON((u64)(
unsigned)access_mask != access_mask);
401 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
402 WARN_ON(mmio_value && (
REMOVED_SPTE & mmio_mask) == mmio_value))
417 if (WARN_ON(me_value & ~me_mask))
418 me_value = me_mask = 0;
449 VMX_EPT_RWX_MASK, 0);
471 low_phys_bits = boot_cpu_data.x86_phys_bits;
472 if (boot_cpu_has_bug(X86_BUG_L1TF) &&
473 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
475 low_phys_bits = boot_cpu_data.x86_cache_bits
478 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
482 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
struct vmcs_config vmcs_config __ro_after_init
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch)
static __always_inline u64 rsvd_bits(int s, int e)
static u8 kvm_get_shadow_phys_bits(void)
#define PT_PAGE_SIZE_MASK
static bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
static bool is_nx_huge_page_enabled(struct kvm *kvm)
u64 __read_mostly shadow_accessed_mask
bool spte_has_volatile_bits(u64 spte)
u64 __read_mostly shadow_me_value
u64 __read_mostly shadow_acc_track_mask
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
u64 __read_mostly shadow_host_writable_mask
EXPORT_SYMBOL_GPL(enable_mmio_caching)
u64 __read_mostly shadow_nonpresent_or_rsvd_mask
static bool __ro_after_init allow_mmio_caching
u8 __read_mostly shadow_phys_bits
u64 __read_mostly shadow_dirty_mask
u64 __read_mostly shadow_mmio_access_mask
u64 __read_mostly shadow_mmio_mask
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
u64 __read_mostly shadow_memtype_mask
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
u64 __read_mostly shadow_me_mask
u64 __read_mostly shadow_user_mask
u64 mark_spte_for_access_track(u64 spte)
static u64 generation_mmio_spte_mask(u64 gen)
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
void __init kvm_mmu_spte_module_init(void)
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte)
static u64 make_spte_executable(u64 spte)
u64 __read_mostly shadow_nx_mask
u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
bool __read_mostly enable_mmio_caching
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask
void kvm_mmu_reset_all_pte_masks(void)
u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, int index)
u64 __read_mostly shadow_x_mask
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444)
u64 __read_mostly shadow_mmu_writable_mask
u64 __read_mostly shadow_present_mask
u64 __read_mostly shadow_mmio_value
#define EPT_SPTE_MMU_WRITABLE
#define SPTE_MMU_PRESENT_MASK
#define SPTE_MMIO_ALLOWED_MASK
#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT
#define SPTE_TDP_AD_WRPROT_ONLY
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN
#define EPT_SPTE_HOST_WRITABLE
#define SPTE_TDP_AD_DISABLED
static void check_spte_writable_invariants(u64 spte)
static bool is_access_track_spte(u64 spte)
static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level)
#define MMIO_SPTE_GEN_HIGH_SHIFT
#define MMIO_SPTE_GEN_HIGH_MASK
static bool is_writable_pte(unsigned long pte)
#define SHADOW_ACC_TRACK_SAVED_BITS_MASK
static bool is_shadow_present_pte(u64 pte)
#define DEFAULT_SPTE_HOST_WRITABLE
static bool spte_ad_enabled(u64 spte)
static u64 spte_shadow_accessed_mask(u64 spte)
#define SPTE_BASE_ADDR_MASK
#define DEFAULT_SPTE_MMU_WRITABLE
#define MMIO_SPTE_GEN_LOW_SHIFT
static u64 restore_acc_track_spte(u64 spte)
#define MMIO_SPTE_GEN_LOW_MASK
static u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
static u64 spte_shadow_dirty_mask(u64 spte)
#define MMIO_SPTE_GEN_MASK
static bool is_large_pte(u64 pte)
static bool is_mmu_writable_spte(u64 spte)
union kvm_mmu_page_role role