2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
9 #ifdef CONFIG_KVM_PROVE_MMU
10 #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
12 #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)
16 #define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12)
17 #define __PT_LEVEL_SHIFT(level, bits_per_level) \
18 (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
19 #define __PT_INDEX(address, level, bits_per_level) \
20 (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
22 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \
23 ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
25 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \
26 ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
28 #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level))
37 #define INVALID_PAE_ROOT 0
38 #define IS_VALID_PAE_ROOT(x) (!!(x))
42 return my_zero_pfn(0) << PAGE_SHIFT;
47 return is_zero_pfn(shadow_page >> PAGE_SHIFT);
57 struct list_head
link;
80 union kvm_mmu_page_role
role;
124 int clear_spte_count;
132 struct rcu_head rcu_head;
140 return role.smm ? 1 : 0;
158 return kvm_x86_ops.cpu_dirty_log_size && sp->
role.guest_mode;
163 return gfn & -KVM_PAGES_PER_HPAGE(level);
167 gfn_t gfn,
bool can_unsync,
bool prefetch);
172 struct kvm_memory_slot *slot, u64 gfn,
179 KVM_PAGES_PER_HPAGE(level));
187 return READ_ONCE(
nx_huge_pages) && !kvm->arch.disable_nx_huge_pages;
283 u32 err,
bool prefetch,
int *emulation_type)
288 .exec = err & PFERR_FETCH_MASK,
289 .write = err & PFERR_WRITE_MASK,
290 .present = err & PFERR_PRESENT_MASK,
291 .rsvd = err & PFERR_RSVD_MASK,
292 .user = err & PFERR_USER_MASK,
295 .nx_huge_page_workaround_enabled =
298 .max_level = KVM_MAX_HUGEPAGE_LEVEL,
299 .req_level = PG_LEVEL_4K,
300 .goal_level = PG_LEVEL_4K,
301 .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
305 if (vcpu->arch.mmu->root_role.direct) {
306 fault.
gfn = fault.
addr >> PAGE_SHIFT;
316 vcpu->stat.pf_taken++;
318 if (IS_ENABLED(CONFIG_RETPOLINE) && fault.
is_tdp)
321 r = vcpu->arch.mmu->page_fault(vcpu, &fault);
324 *emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
332 vcpu->stat.pf_fixed++;
336 vcpu->stat.pf_emulate++;
338 vcpu->stat.pf_spurious++;
343 const struct kvm_memory_slot *
slot, gfn_t
gfn,
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
struct kvm_memory_slot * kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
static hpa_t kvm_mmu_get_dummy_root(void)
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level)
void * mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
struct kmem_cache * mmu_page_header_cache
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level)
static int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
static bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
static gfn_t gfn_round_for_level(gfn_t gfn, int level)
static bool is_nx_huge_page_enabled(struct kvm *kvm)
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
static void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
static int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefetch, int *emulation_type)
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch)
unsigned int unsync_children
struct hlist_node hash_link
refcount_t tdp_mmu_root_count
bool nx_huge_page_disallowed
bool tdp_mmu_scheduled_root_to_zap
DECLARE_BITMAP(unsync_child_bitmap, 512)
struct list_head possible_nx_huge_page_link
union kvm_mmu_page_role role
u64 * shadowed_translation
struct kvm_rmap_head parent_ptes
atomic_t write_flooding_count
bool write_fault_to_shadow_pgtable
bool huge_page_disallowed
const bool nx_huge_page_workaround_enabled
struct kvm_memory_slot * slot