2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
26 lockdep_assert_held_read(&kvm->mmu_lock);
28 lockdep_assert_held_write(&kvm->mmu_lock);
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
56 free_page((
unsigned long)sp->
spt);
88 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
89 list_del_rcu(&root->
link);
90 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
114 lockdep_assert_held(&kvm->mmu_lock);
119 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121 typeof(*prev_root),
link);
123 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
124 typeof(*next_root),
link);
127 if ((!only_valid || !next_root->
role.invalid) &&
131 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132 &next_root->
link, typeof(*next_root),
link);
152 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
153 for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
154 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
155 _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
156 if (kvm_mmu_page_as_id(_root) != _as_id) { \
159 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
160 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
162 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
163 for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
164 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
165 _root = tdp_mmu_next_root(_kvm, _root, false))
174 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
175 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
176 if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
177 kvm_mmu_page_as_id(_root) != _as_id) { \
184 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
185 sp->
spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
191 gfn_t
gfn,
union kvm_mmu_page_role
role)
195 set_page_private(virt_to_page(sp->
spt), (
unsigned long)sp);
202 trace_kvm_mmu_get_page(sp,
true);
209 union kvm_mmu_page_role role;
213 role = parent_sp->
role;
221 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
222 struct kvm *kvm = vcpu->kvm;
225 lockdep_assert_held_write(&kvm->mmu_lock);
249 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
250 list_add_rcu(&root->
link, &kvm->arch.tdp_mmu_roots);
251 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
254 return __pa(root->
spt);
258 u64 old_spte, u64 new_spte,
int level,
263 kvm_account_pgtable_pages((
void *)sp->
spt, +1);
264 atomic64_inc(&kvm->arch.tdp_mmu_pages);
269 kvm_account_pgtable_pages((
void *)sp->
spt, -1);
270 atomic64_dec(&kvm->arch.tdp_mmu_pages);
286 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
289 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
312 int level = sp->
role.level;
313 gfn_t base_gfn = sp->
gfn;
316 trace_kvm_mmu_prepare_zap_page(sp);
322 gfn_t
gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
409 u64 old_spte, u64 new_spte,
int level,
414 bool was_leaf = was_present &&
is_last_spte(old_spte, level);
415 bool is_leaf = is_present &&
is_last_spte(new_spte, level);
418 WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
419 WARN_ON_ONCE(level < PG_LEVEL_4K);
420 WARN_ON_ONCE(
gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
431 if (was_leaf && is_leaf && pfn_changed) {
432 pr_err(
"Invalid SPTE change: cannot replace a present leaf\n"
433 "SPTE with another present leaf SPTE mapping a\n"
435 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
436 as_id,
gfn, old_spte, new_spte, level);
445 if (old_spte == new_spte)
448 trace_kvm_tdp_mmu_spte_changed(as_id,
gfn, level, old_spte, new_spte);
458 if (!was_present && !is_present) {
468 pr_err(
"Unexpected SPTE change! Nonpresent SPTEs\n"
469 "should not be replaced with another,\n"
470 "different nonpresent SPTE, unless one or both\n"
471 "are MMIO SPTEs, or the new SPTE is\n"
472 "a temporary removed SPTE.\n"
473 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
474 as_id,
gfn, old_spte, new_spte, level);
478 if (is_leaf != was_leaf)
491 if (was_present && !was_leaf &&
492 (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
521 u64 *sptep = rcu_dereference(iter->
sptep);
531 lockdep_assert_held_read(&kvm->mmu_lock);
540 if (!try_cmpxchg64(sptep, &iter->
old_spte, new_spte))
544 new_spte, iter->
level,
true);
593 u64 old_spte, u64 new_spte, gfn_t
gfn,
int level)
595 lockdep_assert_held_write(&kvm->mmu_lock);
621 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
622 for_each_tdp_pte(_iter, _root, _start, _end)
624 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
625 tdp_root_for_each_pte(_iter, _root, _start, _end) \
626 if (!is_shadow_present_pte(_iter.old_spte) || \
627 !is_last_spte(_iter.old_spte, _iter.level)) \
631 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
632 for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
650 bool flush,
bool shared)
658 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
665 cond_resched_rwlock_read(&kvm->mmu_lock);
667 cond_resched_rwlock_write(&kvm->mmu_lock);
691 bool shared,
int zap_level)
706 if (iter.
level > zap_level)
760 if (WARN_ON_ONCE(!sp->
ptep))
781 gfn_t start, gfn_t end,
bool can_yield,
bool flush)
787 lockdep_assert_held_write(&kvm->mmu_lock);
824 lockdep_assert_held_write(&kvm->mmu_lock);
847 lockdep_assert_held_write(&kvm->mmu_lock);
860 read_lock(&kvm->mmu_lock);
867 KVM_BUG_ON(!root->
role.invalid, kvm);
888 read_unlock(&kvm->mmu_lock);
916 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
917 refcount_read(&kvm->users_count) && kvm->created_vcpus)
918 lockdep_assert_held_write(&kvm->mmu_lock);
925 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots,
link) {
932 if (!root->
role.invalid) {
934 root->
role.invalid =
true;
955 if (unlikely(!fault->
slot))
982 vcpu->stat.pf_mmio_spte_created++;
983 trace_mark_mmio_spte(rcu_dereference(iter->
sptep), iter->
gfn,
987 trace_kvm_mmu_set_spte(iter->
level, iter->
gfn,
988 rcu_dereference(iter->
sptep));
1034 struct kvm_mmu *mmu = vcpu->arch.mmu;
1035 struct kvm *kvm = vcpu->kvm;
1042 trace_kvm_mmu_spte_requested(fault);
1060 goto map_target_level;
1092 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1095 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1121 range->may_block, flush);
1127 struct kvm_gfn_range *range);
1130 struct kvm_gfn_range *range,
1145 ret |= handler(kvm, &iter, range);
1162 struct kvm_gfn_range *range)
1190 trace_kvm_tdp_mmu_spte_changed(iter->
as_id, iter->
gfn, iter->
level,
1201 struct kvm_gfn_range *range)
1212 struct kvm_gfn_range *range)
1217 WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
1219 if (iter->
level != PG_LEVEL_4K ||
1231 if (!pte_write(range->arg.pte)) {
1233 pte_pfn(range->arg.pte));
1267 bool spte_set =
false;
1271 BUG_ON(
min_level > KVM_MAX_HUGEPAGE_LEVEL);
1301 const struct kvm_memory_slot *slot,
int min_level)
1304 bool spte_set =
false;
1306 lockdep_assert_held_read(&kvm->mmu_lock);
1310 slot->base_gfn + slot->npages, min_level);
1325 sp->
spt = (
void *)__get_free_page(gfp);
1358 read_unlock(&kvm->mmu_lock);
1360 write_unlock(&kvm->mmu_lock);
1366 read_lock(&kvm->mmu_lock);
1368 write_lock(&kvm->mmu_lock);
1379 const u64 huge_spte = iter->
old_spte;
1380 const int level = iter->
level;
1410 trace_kvm_mmu_split_huge_page(iter->
gfn, huge_spte, level, ret);
1416 gfn_t start, gfn_t end,
1417 int target_level,
bool shared)
1448 trace_kvm_mmu_split_huge_page(iter.
gfn,
1484 const struct kvm_memory_slot *slot,
1485 gfn_t start, gfn_t end,
1486 int target_level,
bool shared)
1519 gfn_t start, gfn_t end)
1524 bool spte_set =
false;
1561 const struct kvm_memory_slot *slot)
1564 bool spte_set =
false;
1566 lockdep_assert_held_read(&kvm->mmu_lock);
1569 slot->base_gfn + slot->npages);
1582 gfn_t
gfn,
unsigned long mask,
bool wrprot)
1588 lockdep_assert_held_write(&kvm->mmu_lock);
1593 gfn + BITS_PER_LONG) {
1600 if (iter.
level > PG_LEVEL_4K ||
1601 !(mask & (1UL << (iter.
gfn -
gfn))))
1604 mask &= ~(1UL << (iter.
gfn -
gfn));
1613 trace_kvm_tdp_mmu_spte_changed(iter.
as_id, iter.
gfn, iter.
level,
1630 struct kvm_memory_slot *slot,
1631 gfn_t
gfn,
unsigned long mask,
1642 const struct kvm_memory_slot *slot)
1644 gfn_t start = slot->base_gfn;
1645 gfn_t end = start + slot->npages;
1647 int max_mapping_level;
1656 if (iter.
level > KVM_MAX_HUGEPAGE_LEVEL ||
1675 if (iter.
gfn < start || iter.
gfn >= end)
1679 iter.
gfn, PG_LEVEL_NUM);
1680 if (max_mapping_level < iter.
level)
1696 const struct kvm_memory_slot *slot)
1700 lockdep_assert_held_read(&kvm->mmu_lock);
1711 gfn_t
gfn,
int min_level)
1715 bool spte_set =
false;
1717 BUG_ON(
min_level > KVM_MAX_HUGEPAGE_LEVEL);
1747 struct kvm_memory_slot *slot, gfn_t
gfn,
1751 bool spte_set =
false;
1753 lockdep_assert_held_write(&kvm->mmu_lock);
1770 struct kvm_mmu *mmu = vcpu->arch.mmu;
1771 gfn_t gfn = addr >> PAGE_SHIFT;
1774 *root_level = vcpu->arch.mmu->root_role.
level;
1799 struct kvm_mmu *mmu = vcpu->arch.mmu;
1800 gfn_t gfn = addr >> PAGE_SHIFT;
1818 return rcu_dereference(sptep);
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
void kvm_flush_remote_tlbs(struct kvm *kvm)
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
struct kmem_cache * mmu_page_header_cache
int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level)
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
static gfn_t kvm_mmu_max_gfn(void)
static void kvm_update_page_stats(struct kvm *kvm, int level, int count)
static bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
static int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
#define KVM_MMU_WARN_ON(x)
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
static void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
u64 __read_mostly shadow_accessed_mask
u64 __read_mostly shadow_dirty_mask
u64 mark_spte_for_access_track(u64 spte)
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte)
u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, int index)
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
u64 __read_mostly shadow_mmu_writable_mask
static bool is_dirty_spte(u64 spte)
static bool is_accessed_spte(u64 spte)
static bool is_mmio_spte(u64 spte)
static void check_spte_writable_invariants(u64 spte)
static bool is_writable_pte(unsigned long pte)
static struct kvm_mmu_page * sptep_to_sp(u64 *sptep)
#define SPTE_ENT_PER_PAGE
static bool spte_ad_need_write_protect(u64 spte)
static bool is_last_spte(u64 pte, int level)
static bool is_shadow_present_pte(u64 pte)
static kvm_pfn_t spte_to_pfn(u64 pte)
static bool spte_ad_enabled(u64 spte)
static bool kvm_ad_enabled(void)
static bool is_removed_spte(u64 spte)
static bool is_large_pte(u64 pte)
refcount_t tdp_mmu_root_count
bool nx_huge_page_disallowed
bool tdp_mmu_scheduled_root_to_zap
struct list_head possible_nx_huge_page_link
union kvm_mmu_page_role role
bool huge_page_disallowed
const bool nx_huge_page_workaround_enabled
struct kvm_memory_slot * slot
gfn_t next_last_level_gfn
tdp_ptep_t spte_to_child_pt(u64 spte, int level)
static u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte, u64 mask, int level)
static u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
static u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, u64 new_spte, int level)
static void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
static u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
#define for_each_tdp_pte_min_level(iter, root, min_level, start, end)
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
#define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, bool can_yield, bool flush)
static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, u64 old_spte, u64 new_spte, gfn_t gfn, int level)
bool(* tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, unsigned long mask, bool wrprot)
static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot)
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level)
static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, struct tdp_iter *iter)
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level, bool shared)
static struct kvm_mmu_page * tdp_mmu_next_root(struct kvm *kvm, struct kvm_mmu_page *prev_root, bool only_valid)
static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *sp, bool shared)
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, const struct kvm_memory_slot *slot)
static int tdp_mmu_set_spte_atomic(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte)
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level)
#define for_each_tdp_mmu_root(_kvm, _root, _as_id)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, gfn_t gfn, union kvm_mmu_page_role role)
static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, bool shared)
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level, bool shared)
static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared)
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end)
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, int min_level)
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static struct kvm_mmu_page * tdp_mmu_alloc_sp_for_split(struct kvm *kvm, struct tdp_iter *iter, bool shared)
static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, struct tdp_iter *iter)
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush)
static struct kvm_mmu_page * tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
u64 * kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *spte)
static int tdp_mmu_zap_spte_atomic(struct kvm *kvm, struct tdp_iter *iter)
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)
static bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter, bool flush, bool shared)
static gfn_t tdp_mmu_max_gfn_exclusive(void)
static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
static void zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, const struct kvm_memory_slot *slot)
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *sp, bool shared)
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot)
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, int min_level)
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, struct kvm_gfn_range *range, tdp_handler_t handler)
static struct kvm_mmu_page * __tdp_mmu_alloc_sp_for_split(gfp_t gfp)
static void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte)
#define tdp_root_for_each_pte(_iter, _root, _start, _end)
static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared, int zap_level)
static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, int target_level, bool shared)
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
static bool is_tdp_mmu_page(struct kvm_mmu_page *sp)
static __must_check bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)