17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/kvm_host.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
36 #include <linux/highmem.h>
37 #include <linux/moduleparam.h>
38 #include <linux/export.h>
39 #include <linux/swap.h>
40 #include <linux/hugetlb.h>
41 #include <linux/compiler.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/sched/signal.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/kern_levels.h>
48 #include <linux/kstrtox.h>
49 #include <linux/kthread.h>
52 #include <asm/memtype.h>
53 #include <asm/cmpxchg.h>
55 #include <asm/set_memory.h>
66 #ifdef CONFIG_PREEMPT_RT
84 .get = param_get_uint,
119 #define PTE_PREFETCH_NUM 8
121 #include <trace/events/kvm.h>
124 #define PTE_LIST_EXT 14
163 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
164 for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
166 shadow_walk_okay(&(_walker)); \
167 shadow_walk_next(&(_walker)))
169 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
170 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
171 shadow_walk_okay(&(_walker)); \
172 shadow_walk_next(&(_walker)))
174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
175 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
176 shadow_walk_okay(&(_walker)) && \
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 __shadow_walk_next(&(_walker), spte))
192 #define CREATE_TRACE_POINTS
200 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
201 static inline bool __maybe_unused \
202 ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs) \
204 return !!(regs->reg & flag); \
223 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \
226 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \
239 return mmu->cpu_role.base.level > 0;
244 return !mmu->cpu_role.base.has_4_byte_gpte;
252 .efer = vcpu->arch.efer,
266 if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd ==
get_guest_cr3)
269 return mmu->get_guest_pgd(vcpu);
274 #if IS_ENABLED(CONFIG_HYPERV)
275 return kvm_x86_ops.flush_remote_tlbs_range;
297 trace_mark_mmio_spte(sptep,
gfn, spte);
308 return gpa >> PAGE_SHIFT;
318 u64 kvm_gen, spte_gen, gen;
320 gen = kvm_vcpu_memslots(vcpu)->generation;
321 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
327 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
328 return likely(kvm_gen == spte_gen);
339 WRITE_ONCE(*sptep, spte);
344 WRITE_ONCE(*sptep, spte);
349 return xchg(sptep, spte);
354 return READ_ONCE(*sptep);
374 sp->clear_spte_count++;
456 count = sp->clear_spte_count;
466 count != sp->clear_spte_count))
491 u64 old_spte = *sptep;
564 u64 old_spte = *sptep;
588 WARN_ON_ONCE(page && !page_count(page));
624 (
unsigned long *)sptep);
660 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
674 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
684 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
688 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
689 PT64_ROOT_MAX_LEVEL);
692 if (maybe_indirect) {
693 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
694 PT64_ROOT_MAX_LEVEL);
698 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
699 PT64_ROOT_MAX_LEVEL);
704 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
705 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
706 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
707 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
719 if (sp->
role.passthrough)
722 if (!sp->
role.direct)
751 return sp->
role.access;
755 gfn_t gfn,
unsigned int access)
763 "access mismatch under %s page %llx (expected %u, got %u)\n",
764 sp->
role.passthrough ?
"passthrough" :
"direct",
768 "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
769 sp->
role.passthrough ?
"passthrough" :
"direct",
786 const struct kvm_memory_slot *slot,
int level)
791 return &slot->arch.lpage_info[level - 2][idx];
800 #define KVM_LPAGE_MIXED_FLAG BIT(31)
803 gfn_t gfn,
int count)
805 struct kvm_lpage_info *linfo;
808 for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
811 old = linfo->disallow_lpage;
812 linfo->disallow_lpage += count;
829 struct kvm_memslots *slots;
830 struct kvm_memory_slot *slot;
833 kvm->arch.indirect_shadow_pages++;
835 slots = kvm_memslots_for_spte_role(kvm, sp->
role);
836 slot = __gfn_to_memslot(slots, gfn);
839 if (sp->
role.level > PG_LEVEL_4K)
861 ++kvm->stat.nx_lpage_splits;
863 &kvm->arch.possible_nx_huge_pages);
867 bool nx_huge_page_possible)
871 if (nx_huge_page_possible)
877 struct kvm_memslots *slots;
878 struct kvm_memory_slot *slot;
881 kvm->arch.indirect_shadow_pages--;
883 slots = kvm_memslots_for_spte_role(kvm, sp->
role);
884 slot = __gfn_to_memslot(slots, gfn);
885 if (sp->
role.level > PG_LEVEL_4K)
896 --kvm->stat.nx_lpage_splits;
911 struct kvm_memory_slot *slot;
914 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
916 if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
934 struct kvm_rmap_head *rmap_head)
939 if (!rmap_head->val) {
940 rmap_head->val = (
unsigned long)spte;
941 }
else if (!(rmap_head->val & 1)) {
942 desc = kvm_mmu_memory_cache_alloc(cache);
943 desc->
sptes[0] = (u64 *)rmap_head->val;
944 desc->
sptes[1] = spte;
947 rmap_head->val = (
unsigned long)desc | 1;
958 desc = kvm_mmu_memory_cache_alloc(cache);
962 rmap_head->val = (
unsigned long)desc | 1;
970 struct kvm_rmap_head *rmap_head,
981 KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
989 head_desc->
sptes[j] = NULL;
999 if (!head_desc->
more)
1002 rmap_head->val = (
unsigned long)head_desc->
more | 1;
1007 struct kvm_rmap_head *rmap_head)
1012 if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
1015 if (!(rmap_head->val & 1)) {
1016 if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
1024 if (desc->
sptes[i] == spte) {
1033 KVM_BUG_ON_DATA_CORRUPTION(
true, kvm);
1038 struct kvm_rmap_head *rmap_head, u64 *sptep)
1046 struct kvm_rmap_head *rmap_head)
1051 if (!rmap_head->val)
1054 if (!(rmap_head->val & 1)) {
1061 for (; desc; desc = next) {
1077 if (!rmap_head->val)
1079 else if (!(rmap_head->val & 1))
1087 const struct kvm_memory_slot *slot)
1092 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1097 struct kvm_memslots *slots;
1098 struct kvm_memory_slot *slot;
1101 struct kvm_rmap_head *rmap_head;
1111 slots = kvm_memslots_for_spte_role(kvm, sp->
role);
1113 slot = __gfn_to_memslot(slots, gfn);
1141 if (!rmap_head->val)
1144 if (!(rmap_head->val & 1)) {
1146 sptep = (u64 *)rmap_head->val;
1191 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
1192 for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
1193 _spte_; _spte_ = rmap_get_next(_iter_))
1208 WARN_ON_ONCE(sp->
role.level == PG_LEVEL_4K);
1269 (
unsigned long *)sptep);
1273 return was_writable;
1283 const struct kvm_memory_slot *slot)
1308 struct kvm_memory_slot *slot,
1309 gfn_t gfn_offset,
unsigned long mask)
1311 struct kvm_rmap_head *rmap_head;
1315 slot->base_gfn + gfn_offset, mask,
true);
1321 rmap_head =
gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1341 struct kvm_memory_slot *slot,
1342 gfn_t gfn_offset,
unsigned long mask)
1344 struct kvm_rmap_head *rmap_head;
1348 slot->base_gfn + gfn_offset, mask,
false);
1354 rmap_head =
gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1374 struct kvm_memory_slot *slot,
1375 gfn_t gfn_offset,
unsigned long mask)
1386 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1387 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1388 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1396 if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1397 ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1403 if (kvm_x86_ops.cpu_dirty_log_size)
1411 return kvm_x86_ops.cpu_dirty_log_size;
1415 struct kvm_memory_slot *slot, u64 gfn,
1418 struct kvm_rmap_head *rmap_head;
1420 bool write_protected =
false;
1423 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1433 return write_protected;
1438 struct kvm_memory_slot *slot;
1445 const struct kvm_memory_slot *slot)
1451 struct kvm_memory_slot *slot, gfn_t gfn,
int level,
1458 struct kvm_memory_slot *slot, gfn_t gfn,
int level,
1463 bool need_flush =
false;
1467 WARN_ON_ONCE(pte_huge(pte));
1468 new_pfn = pte_pfn(pte);
1474 if (pte_write(pte)) {
1496 const struct kvm_memory_slot *
slot;
1514 iterator->
level = level;
1521 const struct kvm_memory_slot *slot,
1522 int start_level,
int end_level,
1523 gfn_t start_gfn, gfn_t end_gfn)
1525 iterator->
slot = slot;
1536 return !!iterator->
rmap;
1542 iterator->
gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->
level));
1544 if (iterator->
rmap->val)
1549 iterator->
rmap = NULL;
1556 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
1557 _start_gfn, _end_gfn, _iter_) \
1558 for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \
1559 _end_level_, _start_gfn, _end_gfn); \
1560 slot_rmap_walk_okay(_iter_); \
1561 slot_rmap_walk_next(_iter_))
1564 struct kvm_memory_slot *slot, gfn_t gfn,
1565 int level, pte_t pte);
1568 struct kvm_gfn_range *range,
1575 range->start, range->end - 1, &iterator)
1576 ret |= handler(kvm, iterator.
rmap, range->slot, iterator.
gfn,
1577 iterator.
level, range->arg.pte);
1592 if (kvm_x86_ops.set_apic_access_page_addr &&
1593 range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
1627 struct kvm_memory_slot *slot, gfn_t gfn,
1628 int level, pte_t unused)
1639 #define RMAP_RECYCLE_THRESHOLD 1000
1642 struct kvm_mmu_memory_cache *cache,
1643 const struct kvm_memory_slot *slot,
1644 u64 *spte, gfn_t gfn,
unsigned int access)
1647 struct kvm_rmap_head *rmap_head;
1657 if (rmap_count > kvm->stat.max_mmu_rmap_size)
1658 kvm->stat.max_mmu_rmap_size = rmap_count;
1665 static void rmap_add(
struct kvm_vcpu *vcpu,
const struct kvm_memory_slot *slot,
1666 u64 *spte, gfn_t gfn,
unsigned int access)
1668 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1670 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1701 #ifdef CONFIG_KVM_PROVE_MMU
1706 pr_err_ratelimited(
"SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1721 kvm->arch.n_used_mmu_pages += nr;
1728 kvm_account_pgtable_pages((
void *)sp->
spt, +1);
1734 kvm_account_pgtable_pages((
void *)sp->
spt, -1);
1742 list_del(&sp->
link);
1743 free_page((
unsigned long)sp->
spt);
1744 if (!sp->
role.direct)
1751 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1792 if (__test_and_set_bit(
spte_index(spte), sp->unsync_child_bitmap))
1799 #define KVM_PAGE_ARRAY_NR 16
1815 for (i=0; i < pvec->
nr; i++)
1816 if (pvec->
page[i].
sp == sp)
1829 __clear_bit(idx, sp->unsync_child_bitmap);
1835 int i, ret, nr_unsync_leaf = 0;
1837 for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1839 u64 ent = sp->
spt[i];
1856 }
else if (ret > 0) {
1857 nr_unsync_leaf += ret;
1860 }
else if (child->
unsync) {
1868 return nr_unsync_leaf;
1871 #define INVALID_INDEX (-1)
1886 WARN_ON_ONCE(!sp->
unsync);
1887 trace_kvm_mmu_sync_page(sp);
1889 --kvm->stat.mmu_unsync;
1893 struct list_head *invalid_list);
1895 struct list_head *invalid_list);
1899 if (sp->
role.direct)
1902 if (sp->
role.passthrough)
1908 #define for_each_valid_sp(_kvm, _sp, _list) \
1909 hlist_for_each_entry(_sp, _list, hash_link) \
1910 if (is_obsolete_sp((_kvm), (_sp))) { \
1913 #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
1914 for_each_valid_sp(_kvm, _sp, \
1915 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1916 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1920 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1931 const union kvm_mmu_page_role sync_role_ign = {
1944 if (WARN_ON_ONCE(sp->
role.direct || !vcpu->arch.mmu->sync_spte ||
1945 (sp->
role.word ^ root_role.word) & ~sync_role_ign.word))
1956 return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
1988 struct list_head *invalid_list)
1998 struct list_head *invalid_list,
2001 if (!remote_flush && list_empty(invalid_list))
2004 if (!list_empty(invalid_list))
2013 if (sp->
role.invalid)
2023 unsigned int idx[PT64_ROOT_MAX_LEVEL];
2026 #define for_each_sp(pvec, sp, parents, i) \
2027 for (i = mmu_pages_first(&pvec, &parents); \
2028 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
2029 i = mmu_pages_next(&pvec, &parents, i))
2037 for (n = i+1; n < pvec->
nr; n++) {
2039 unsigned idx = pvec->
page[n].
idx;
2040 int level = sp->
role.level;
2042 parents->
idx[level-1] = idx;
2043 if (level == PG_LEVEL_4K)
2046 parents->
parent[level-2] = sp;
2064 level = sp->
role.level;
2065 WARN_ON_ONCE(level == PG_LEVEL_4K);
2067 parents->
parent[level-2] = sp;
2072 parents->
parent[level-1] = NULL;
2079 unsigned int level = 0;
2082 unsigned int idx = parents->
idx[level];
2083 sp = parents->
parent[level];
2104 bool protected =
false;
2119 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2122 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2126 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2152 struct kvm_vcpu *vcpu,
2154 struct hlist_head *sp_list,
2155 union kvm_mmu_page_role
role)
2185 if (sp->
role.direct)
2189 if (KVM_BUG_ON(!vcpu, kvm))
2208 WARN_ON_ONCE(!list_empty(&invalid_list));
2219 ++kvm->stat.mmu_cache_miss;
2224 if (collisions > kvm->stat.max_mmu_page_hash_collisions)
2225 kvm->stat.max_mmu_page_hash_collisions = collisions;
2239 struct hlist_head *sp_list,
2240 union kvm_mmu_page_role
role)
2249 set_page_private(virt_to_page(sp->
spt), (
unsigned long)sp);
2259 list_add(&sp->
link, &kvm->arch.active_mmu_pages);
2264 hlist_add_head(&sp->
hash_link, sp_list);
2273 struct kvm_vcpu *vcpu,
2276 union kvm_mmu_page_role
role)
2278 struct hlist_head *sp_list;
2280 bool created =
false;
2290 trace_kvm_mmu_get_page(sp, created);
2296 union kvm_mmu_page_role
role)
2300 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2301 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2308 unsigned int access)
2311 union kvm_mmu_page_role role;
2313 role = parent_sp->
role;
2315 role.access = access;
2316 role.direct = direct;
2317 role.passthrough = 0;
2345 if (role.has_4_byte_gpte) {
2346 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2354 u64 *sptep, gfn_t
gfn,
2355 bool direct,
unsigned int access)
2357 union kvm_mmu_page_role role;
2360 return ERR_PTR(-EEXIST);
2367 struct kvm_vcpu *vcpu, hpa_t root,
2370 iterator->
addr = addr;
2372 iterator->
level = vcpu->arch.mmu->root_role.level;
2376 !vcpu->arch.mmu->root_role.direct)
2384 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2387 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2391 iterator->
level = 0;
2396 struct kvm_vcpu *vcpu, u64 addr)
2404 if (iterator->
level < PG_LEVEL_4K)
2416 iterator->
level = 0;
2430 struct kvm_mmu_memory_cache *cache, u64 *sptep,
2471 unsigned direct_access)
2484 if (child->
role.access == direct_access)
2494 u64 *spte,
struct list_head *invalid_list)
2525 struct list_head *invalid_list)
2547 struct list_head *invalid_list)
2553 if (parent->
role.level == PG_LEVEL_4K)
2571 struct list_head *invalid_list,
2574 bool list_unstable, zapped_root =
false;
2576 lockdep_assert_held_write(&kvm->mmu_lock);
2577 trace_kvm_mmu_prepare_zap_page(sp);
2578 ++kvm->stat.mmu_shadow_zapped;
2584 list_unstable = *nr_zapped;
2600 if (sp->
role.invalid)
2601 list_add(&sp->
link, invalid_list);
2603 list_move(&sp->
link, invalid_list);
2610 list_del(&sp->
link);
2623 sp->
role.invalid = 1;
2631 return list_unstable;
2635 struct list_head *invalid_list)
2644 struct list_head *invalid_list)
2648 if (list_empty(invalid_list))
2662 list_for_each_entry_safe(sp, nsp, invalid_list,
link) {
2669 unsigned long nr_to_zap)
2671 unsigned long total_zapped = 0;
2677 if (list_empty(&kvm->arch.active_mmu_pages))
2681 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages,
link) {
2691 total_zapped += nr_zapped;
2692 if (total_zapped >= nr_to_zap)
2701 kvm->stat.mmu_recycled += total_zapped;
2702 return total_zapped;
2707 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2708 return kvm->arch.n_max_mmu_pages -
2709 kvm->arch.n_used_mmu_pages;
2718 if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2743 write_lock(&kvm->mmu_lock);
2745 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2749 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2752 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2754 write_unlock(&kvm->mmu_lock);
2764 write_lock(&kvm->mmu_lock);
2770 write_unlock(&kvm->mmu_lock);
2780 if (vcpu->arch.mmu->root_role.direct)
2792 trace_kvm_mmu_unsync_page(sp);
2793 ++kvm->stat.mmu_unsync;
2806 gfn_t
gfn,
bool can_unsync,
bool prefetch)
2809 bool locked =
false;
2844 spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2854 if (READ_ONCE(sp->
unsync))
2858 WARN_ON_ONCE(sp->
role.level != PG_LEVEL_4K);
2862 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2906 static int mmu_set_spte(
struct kvm_vcpu *vcpu,
struct kvm_memory_slot *slot,
2907 u64 *sptep,
unsigned int pte_access, gfn_t
gfn,
2911 int level = sp->
role.level;
2912 int was_rmapped = 0;
2920 bool prefetch = !fault || fault->
prefetch;
2921 bool write_fault = fault && fault->
write;
2923 if (unlikely(is_noslot_pfn(pfn))) {
2924 vcpu->stat.pf_mmio_spte_created++;
2948 wrprot =
make_spte(vcpu, sp, slot, pte_access,
gfn, pfn, *sptep, prefetch,
2949 true, host_writable, &spte);
2951 if (*sptep == spte) {
2955 trace_kvm_mmu_set_spte(level,
gfn, sptep);
2979 u64 *start, u64 *end)
2982 struct kvm_memory_slot *slot;
2983 unsigned int access = sp->
role.access;
2996 for (i = 0; i < ret; i++, gfn++, start++) {
2998 page_to_pfn(pages[i]), NULL);
3008 u64 *spte, *start = NULL;
3011 WARN_ON_ONCE(!sp->
role.direct);
3044 if (sp->
role.level > PG_LEVEL_4K)
3051 if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
3083 const struct kvm_memory_slot *slot)
3085 int level = PG_LEVEL_4K;
3087 unsigned long flags;
3101 hva = __gfn_to_hva_memslot(slot,
gfn);
3108 local_irq_save(
flags);
3117 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3121 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3122 if (p4d_none(p4d) || !p4d_present(p4d))
3125 pud = READ_ONCE(*pud_offset(&p4d, hva));
3126 if (pud_none(pud) || !pud_present(pud))
3129 if (pud_leaf(pud)) {
3130 level = PG_LEVEL_1G;
3134 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3135 if (pmd_none(pmd) || !pmd_present(pmd))
3139 level = PG_LEVEL_2M;
3142 local_irq_restore(
flags);
3147 const struct kvm_memory_slot *slot,
3148 gfn_t
gfn,
int max_level,
bool is_private)
3150 struct kvm_lpage_info *linfo;
3153 max_level = min(max_level, max_huge_page_level);
3154 for ( ; max_level > PG_LEVEL_4K; max_level--) {
3156 if (!linfo->disallow_lpage)
3163 if (max_level == PG_LEVEL_4K)
3167 return min(host_level, max_level);
3171 const struct kvm_memory_slot *slot, gfn_t gfn,
3174 bool is_private = kvm_slot_can_be_private(slot) &&
3175 kvm_mem_is_private(kvm, gfn);
3182 struct kvm_memory_slot *slot = fault->
slot;
3187 if (unlikely(fault->
max_level == PG_LEVEL_4K))
3190 if (is_error_noslot_pfn(fault->
pfn))
3193 if (kvm_slot_dirty_track_enabled(slot))
3211 mask = KVM_PAGES_PER_HPAGE(fault->
goal_level) - 1;
3212 VM_BUG_ON((fault->
gfn & mask) != (fault->
pfn & mask));
3213 fault->
pfn &= ~mask;
3218 if (cur_level > PG_LEVEL_4K &&
3230 u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
3231 KVM_PAGES_PER_HPAGE(cur_level - 1);
3232 fault->
pfn |= fault->
gfn & page_mask;
3242 gfn_t base_gfn = fault->
gfn;
3246 trace_kvm_mmu_spte_requested(fault);
3260 if (sp == ERR_PTR(-EEXIST))
3273 base_gfn, fault->
pfn, fault);
3285 send_sig_mceerr(BUS_MCEERR_AR, (
void __user *)hva, PAGE_SHIFT, current);
3290 if (is_sigpending_pfn(fault->
pfn)) {
3291 kvm_handle_signal_exit(vcpu);
3300 if (fault->
pfn == KVM_PFN_ERR_RO_FAULT)
3303 if (fault->
pfn == KVM_PFN_ERR_HWPOISON) {
3313 unsigned int access)
3374 return fault->
write;
3383 u64 *sptep, u64 old_spte, u64 new_spte)
3397 if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3450 uint retry_count = 0;
3470 if (WARN_ON_ONCE(!sptep))
3530 if (sp->
role.level > PG_LEVEL_4K &&
3531 kvm_slot_dirty_track_enabled(fault->
slot))
3536 if (new_spte == spte ||
3550 if (++retry_count > 4) {
3551 pr_warn_once(
"Fast #PF retrying more than 4 times.\n");
3557 trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3561 vcpu->stat.pf_fast++;
3567 struct list_head *invalid_list)
3571 if (!VALID_PAGE(*root_hpa))
3575 if (WARN_ON_ONCE(!sp))
3583 *root_hpa = INVALID_PAGE;
3588 ulong roots_to_free)
3592 bool free_active_root;
3594 WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
3596 BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3599 free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
3600 && VALID_PAGE(mmu->root.hpa);
3602 if (!free_active_root) {
3603 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3604 if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3605 VALID_PAGE(mmu->prev_roots[i].hpa))
3608 if (i == KVM_MMU_NUM_PREV_ROOTS)
3612 write_lock(&kvm->mmu_lock);
3614 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3615 if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3619 if (free_active_root) {
3624 }
else if (mmu->pae_root) {
3625 for (i = 0; i < 4; ++i) {
3634 mmu->root.hpa = INVALID_PAGE;
3639 write_unlock(&kvm->mmu_lock);
3645 unsigned long roots_to_free = 0;
3654 WARN_ON_ONCE(mmu->root_role.guest_mode);
3656 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3657 root_hpa = mmu->prev_roots[i].hpa;
3658 if (!VALID_PAGE(root_hpa))
3662 if (!sp || sp->
role.guest_mode)
3663 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3673 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3677 role.quadrant = quadrant;
3679 WARN_ON_ONCE(quadrant && !
role.has_4_byte_gpte);
3680 WARN_ON_ONCE(
role.direct &&
role.has_4_byte_gpte);
3685 return __pa(sp->
spt);
3690 struct kvm_mmu *mmu = vcpu->arch.mmu;
3691 u8 shadow_root_level = mmu->root_role.level;
3696 write_lock(&vcpu->kvm->mmu_lock);
3703 mmu->root.hpa = root;
3706 mmu->root.hpa = root;
3708 if (WARN_ON_ONCE(!mmu->pae_root)) {
3713 for (i = 0; i < 4; ++i) {
3721 mmu->root.hpa = __pa(mmu->pae_root);
3723 WARN_ONCE(1,
"Bad TDP root level = %d\n", shadow_root_level);
3731 write_unlock(&vcpu->kvm->mmu_lock);
3737 struct kvm_memslots *slots;
3738 struct kvm_memory_slot *slot;
3748 mutex_lock(&kvm->slots_arch_lock);
3762 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
3763 slots = __kvm_memslots(kvm, i);
3764 kvm_for_each_memslot(slot, bkt, slots) {
3789 smp_store_release(&kvm->arch.shadow_root_allocated,
true);
3792 mutex_unlock(&kvm->slots_arch_lock);
3798 struct kvm_mmu *mmu = vcpu->arch.mmu;
3799 u64 pdptrs[4], pm_mask;
3800 gfn_t root_gfn, root_pgd;
3817 for (i = 0; i < 4; ++i) {
3818 pdptrs[i] = mmu->get_pdptr(vcpu, i);
3831 write_lock(&vcpu->kvm->mmu_lock);
3842 mmu->root_role.level);
3843 mmu->root.hpa = root;
3847 if (WARN_ON_ONCE(!mmu->pae_root)) {
3861 if (WARN_ON_ONCE(!mmu->pml4_root)) {
3865 mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3868 if (WARN_ON_ONCE(!mmu->pml5_root)) {
3872 mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3876 for (i = 0; i < 4; ++i) {
3884 root_gfn = pdptrs[i] >> PAGE_SHIFT;
3896 mmu->pae_root[i] = root | pm_mask;
3900 mmu->root.hpa = __pa(mmu->pml5_root);
3902 mmu->root.hpa = __pa(mmu->pml4_root);
3904 mmu->root.hpa = __pa(mmu->pae_root);
3907 mmu->root.pgd = root_pgd;
3909 write_unlock(&vcpu->kvm->mmu_lock);
3916 struct kvm_mmu *mmu = vcpu->arch.mmu;
3918 u64 *pml5_root = NULL;
3919 u64 *pml4_root = NULL;
3928 if (mmu->root_role.direct ||
3940 if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3947 if (WARN_ON_ONCE(!
tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3948 (need_pml5 && mmu->pml5_root)))
3955 pae_root = (
void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3959 #ifdef CONFIG_X86_64
3960 pml4_root = (
void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3965 pml5_root = (
void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3971 mmu->pae_root = pae_root;
3972 mmu->pml4_root = pml4_root;
3973 mmu->pml5_root = pml5_root;
3977 #ifdef CONFIG_X86_64
3979 free_page((
unsigned long)pml4_root);
3981 free_page((
unsigned long)pae_root);
4012 if (WARN_ON_ONCE(!sp))
4026 if (vcpu->arch.mmu->root_role.direct)
4029 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4035 hpa_t root = vcpu->arch.mmu->root.hpa;
4042 write_lock(&vcpu->kvm->mmu_lock);
4044 write_unlock(&vcpu->kvm->mmu_lock);
4048 write_lock(&vcpu->kvm->mmu_lock);
4050 for (i = 0; i < 4; ++i) {
4051 hpa_t root = vcpu->arch.mmu->pae_root[i];
4059 write_unlock(&vcpu->kvm->mmu_lock);
4064 unsigned long roots_to_free = 0;
4067 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4069 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
4076 gpa_t vaddr, u64 access,
4105 static int get_walk(
struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
4112 *root_level = iterator.
level;
4115 leaf = iterator.
level;
4127 u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
4128 struct rsvd_bits_validate *rsvd_check;
4129 int root, leaf, level;
4130 bool reserved =
false;
4137 leaf =
get_walk(vcpu, addr, sptes, &root);
4141 if (unlikely(leaf < 0)) {
4146 *sptep = sptes[leaf];
4157 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4159 for (level = root; level >= leaf; level--)
4160 reserved |=
is_rsvd_spte(rsvd_check, sptes[level], level);
4163 pr_err(
"%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4165 for (level = root; level >= leaf; level--)
4166 pr_err(
"------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4167 sptes[level], level,
4183 if (WARN_ON_ONCE(reserved))
4196 trace_handle_mmio_page_fault(addr, gfn, access);
4211 if (unlikely(fault->
rsvd))
4241 u32
id = vcpu->arch.apf.id;
4244 vcpu->arch.apf.id = 1;
4246 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4252 struct kvm_arch_async_pf arch;
4256 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4267 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4275 if (!vcpu->arch.mmu->root_role.direct &&
4284 BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
4287 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
4288 order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
4290 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
4293 if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4302 kvm_prepare_memory_fault_exit(vcpu, fault->
gfn << PAGE_SHIFT,
4312 if (!kvm_slot_can_be_private(fault->
slot)) {
4333 struct kvm_memory_slot *slot = fault->
slot;
4341 if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
4344 if (!kvm_is_visible_memslot(slot)) {
4348 fault->
pfn = KVM_PFN_NOSLOT;
4358 if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
4363 if (fault->
is_private != kvm_mem_is_private(vcpu->kvm, fault->
gfn)) {
4379 trace_kvm_try_async_get_page(fault->
addr, fault->
gfn);
4381 trace_kvm_async_pf_repeated_fault(fault->
addr, fault->
gfn);
4382 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
4401 unsigned int access)
4405 fault->
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
4430 mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->
mmu_seq, fault->
gfn))
4437 if (unlikely(is_error_pfn(fault->
pfn)))
4440 if (unlikely(!fault->
slot))
4450 if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->
mmu_seq, fault->
gfn)) {
4479 if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4487 return fault->
slot &&
4488 mmu_invalidate_retry_gfn(vcpu->kvm, fault->
mmu_seq, fault->
gfn);
4515 write_lock(&vcpu->kvm->mmu_lock);
4527 write_unlock(&vcpu->kvm->mmu_lock);
4541 u64 fault_address,
char *insn,
int insn_len)
4544 u32
flags = vcpu->arch.apf.host_apf_flags;
4546 #ifndef CONFIG_X86_64
4548 if (WARN_ON_ONCE(fault_address >> 32))
4552 vcpu->arch.l1tf_flush_l1d =
true;
4554 trace_kvm_page_fault(vcpu, fault_address, error_code);
4560 }
else if (
flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4561 vcpu->arch.apf.host_apf_flags = 0;
4562 local_irq_disable();
4563 kvm_async_pf_task_wait_schedule(fault_address);
4566 WARN_ONCE(1,
"Unexpected host async PF flags: %x\n",
flags);
4573 #ifdef CONFIG_X86_64
4574 static int kvm_tdp_mmu_page_fault(
struct kvm_vcpu *vcpu,
4595 read_lock(&vcpu->kvm->mmu_lock);
4603 read_unlock(&vcpu->kvm->mmu_lock);
4632 int page_num = KVM_PAGES_PER_HPAGE(fault->
max_level);
4641 #ifdef CONFIG_X86_64
4643 return kvm_tdp_mmu_page_fault(vcpu, fault);
4653 context->sync_spte = NULL;
4657 union kvm_mmu_page_role
role)
4661 if (!VALID_PAGE(root->hpa))
4664 if (!
role.direct && pgd != root->pgd)
4668 if (WARN_ON_ONCE(!sp))
4684 union kvm_mmu_page_role new_role)
4691 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4700 swap(mmu->root, mmu->prev_roots[i]);
4718 union kvm_mmu_page_role new_role)
4722 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4729 swap(mmu->root, mmu->prev_roots[i]);
4731 for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
4732 mmu->prev_roots[i] = mmu->prev_roots[i + 1];
4733 mmu->prev_roots[i].hpa = INVALID_PAGE;
4738 gpa_t new_pgd,
union kvm_mmu_page_role new_role)
4744 if (VALID_PAGE(mmu->root.hpa) && !
root_to_sp(mmu->root.hpa))
4747 if (VALID_PAGE(mmu->root.hpa))
4755 struct kvm_mmu *mmu = vcpu->arch.mmu;
4756 union kvm_mmu_page_role new_role = mmu->root_role;
4771 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4774 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4775 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4790 if (!new_role.direct) {
4793 if (!WARN_ON_ONCE(!sp))
4800 unsigned int access)
4815 #define PTTYPE_EPT 18
4816 #define PTTYPE PTTYPE_EPT
4829 u64 pa_bits_rsvd,
int level,
bool nx,
4830 bool gbpages,
bool pse,
bool amd)
4832 u64 gbpages_bit_rsvd = 0;
4833 u64 nonleaf_bit8_rsvd = 0;
4836 rsvd_check->bad_mt_xwr = 0;
4842 high_bits_rsvd = pa_bits_rsvd &
rsvd_bits(0, 62);
4844 high_bits_rsvd = pa_bits_rsvd &
rsvd_bits(0, 51);
4860 rsvd_check->rsvd_bits_mask[0][1] = 0;
4861 rsvd_check->rsvd_bits_mask[0][0] = 0;
4862 rsvd_check->rsvd_bits_mask[1][0] =
4863 rsvd_check->rsvd_bits_mask[0][0];
4866 rsvd_check->rsvd_bits_mask[1][1] = 0;
4872 rsvd_check->rsvd_bits_mask[1][1] =
rsvd_bits(17, 21);
4875 rsvd_check->rsvd_bits_mask[1][1] =
rsvd_bits(13, 21);
4878 rsvd_check->rsvd_bits_mask[0][2] =
rsvd_bits(63, 63) |
4882 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4883 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4884 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4886 rsvd_check->rsvd_bits_mask[1][0] =
4887 rsvd_check->rsvd_bits_mask[0][0];
4890 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4893 rsvd_check->rsvd_bits_mask[1][4] =
4894 rsvd_check->rsvd_bits_mask[0][4];
4897 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4900 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4902 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4903 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4904 rsvd_check->rsvd_bits_mask[1][3] =
4905 rsvd_check->rsvd_bits_mask[0][3];
4906 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4909 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4911 rsvd_check->rsvd_bits_mask[1][0] =
4912 rsvd_check->rsvd_bits_mask[0][0];
4918 struct kvm_mmu *context)
4921 vcpu->arch.reserved_gpa_bits,
4922 context->cpu_role.base.level, is_efer_nx(context),
4924 is_cr4_pse(context),
4929 u64 pa_bits_rsvd,
bool execonly,
4930 int huge_page_level)
4932 u64 high_bits_rsvd = pa_bits_rsvd &
rsvd_bits(0, 51);
4933 u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
4936 if (huge_page_level < PG_LEVEL_1G)
4938 if (huge_page_level < PG_LEVEL_2M)
4941 rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
rsvd_bits(3, 7);
4942 rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
rsvd_bits(3, 7);
4943 rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
rsvd_bits(3, 6) | large_1g_rsvd;
4944 rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd |
rsvd_bits(3, 6) | large_2m_rsvd;
4945 rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4948 rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4949 rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4950 rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
rsvd_bits(12, 29) | large_1g_rsvd;
4951 rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
rsvd_bits(12, 20) | large_2m_rsvd;
4952 rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4954 bad_mt_xwr = 0xFFull << (2 * 8);
4955 bad_mt_xwr |= 0xFFull << (3 * 8);
4956 bad_mt_xwr |= 0xFFull << (7 * 8);
4957 bad_mt_xwr |= REPEAT_BYTE(1ull << 2);
4958 bad_mt_xwr |= REPEAT_BYTE(1ull << 6);
4961 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4963 rsvd_check->bad_mt_xwr = bad_mt_xwr;
4967 struct kvm_mmu *context,
bool execonly,
int huge_page_level)
4970 vcpu->arch.reserved_gpa_bits, execonly,
4985 struct kvm_mmu *context)
4991 struct rsvd_bits_validate *shadow_zero_check;
4996 shadow_zero_check = &context->shadow_zero_check;
4998 context->root_role.level,
4999 context->root_role.efer_nx,
5006 for (i = context->root_role.level; --i >= 0;) {
5033 struct rsvd_bits_validate *shadow_zero_check;
5036 shadow_zero_check = &context->shadow_zero_check;
5040 context->root_role.level,
true,
5041 boot_cpu_has(X86_FEATURE_GBPAGES),
5046 max_huge_page_level);
5051 for (i = context->root_role.level; --i >= 0;) {
5066 max_huge_page_level);
5069 #define BYTE_MASK(access) \
5070 ((1 & (access) ? 2 : 0) | \
5071 (2 & (access) ? 4 : 0) | \
5072 (3 & (access) ? 8 : 0) | \
5073 (4 & (access) ? 16 : 0) | \
5074 (5 & (access) ? 32 : 0) | \
5075 (6 & (access) ? 64 : 0) | \
5076 (7 & (access) ? 128 : 0))
5087 bool cr4_smep = is_cr4_smep(mmu);
5088 bool cr4_smap = is_cr4_smap(mmu);
5089 bool cr0_wp = is_cr0_wp(mmu);
5090 bool efer_nx = is_efer_nx(mmu);
5092 for (
byte = 0;
byte < ARRAY_SIZE(mmu->permissions); ++byte) {
5093 unsigned pfec =
byte << 1;
5101 u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
5103 u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
5105 u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
5113 u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
5121 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
5125 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
5144 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
5147 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
5182 if (!is_cr4_pke(mmu))
5185 wp = is_cr0_wp(mmu);
5187 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
5188 unsigned pfec, pkey_bits;
5189 bool check_pkey, check_write, ff, uf, wf, pte_user;
5192 ff = pfec & PFERR_FETCH_MASK;
5193 uf = pfec & PFERR_USER_MASK;
5194 wf = pfec & PFERR_WRITE_MASK;
5197 pte_user = pfec & PFERR_RSVD_MASK;
5203 check_pkey = (!ff && pte_user);
5208 check_write = check_pkey && wf && (uf || wp);
5211 pkey_bits = !!check_pkey;
5213 pkey_bits |= (!!check_write) << 1;
5215 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
5220 struct kvm_mmu *mmu)
5232 context->page_fault = paging64_page_fault;
5233 context->gva_to_gpa = paging64_gva_to_gpa;
5234 context->sync_spte = paging64_sync_spte;
5239 context->page_fault = paging32_page_fault;
5240 context->gva_to_gpa = paging32_gva_to_gpa;
5241 context->sync_spte = paging32_sync_spte;
5247 union kvm_cpu_role role = {0};
5250 role.base.smm =
is_smm(vcpu);
5254 if (!____is_cr0_pg(regs)) {
5255 role.base.direct = 1;
5259 role.base.efer_nx = ____is_efer_nx(regs);
5260 role.base.cr0_wp = ____is_cr0_wp(regs);
5261 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5262 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5263 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5265 if (____is_efer_lma(regs))
5268 else if (____is_cr4_pae(regs))
5273 role.ext.cr4_smep = ____is_cr4_smep(regs);
5274 role.ext.cr4_smap = ____is_cr4_smap(regs);
5275 role.ext.cr4_pse = ____is_cr4_pse(regs);
5278 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5279 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5280 role.ext.efer_lma = ____is_efer_lma(regs);
5285 struct kvm_mmu *mmu)
5292 if (is_cr0_wp(mmu) == cr0_wp)
5295 mmu->cpu_role.base.cr0_wp = cr0_wp;
5303 return tdp_root_level;
5309 return max_tdp_level;
5312 static union kvm_mmu_page_role
5314 union kvm_cpu_role cpu_role)
5316 union kvm_mmu_page_role role = {0};
5320 role.efer_nx =
true;
5321 role.smm = cpu_role.base.smm;
5322 role.guest_mode = cpu_role.base.guest_mode;
5326 role.has_4_byte_gpte =
false;
5332 union kvm_cpu_role cpu_role)
5334 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5337 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5338 root_role.word == context->root_role.word)
5341 context->cpu_role.as_u64 = cpu_role.as_u64;
5342 context->root_role.word = root_role.word;
5344 context->sync_spte = NULL;
5352 context->gva_to_gpa = paging64_gva_to_gpa;
5354 context->gva_to_gpa = paging32_gva_to_gpa;
5361 union kvm_cpu_role cpu_role,
5362 union kvm_mmu_page_role root_role)
5364 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5365 root_role.word == context->root_role.word)
5368 context->cpu_role.as_u64 = cpu_role.as_u64;
5369 context->root_role.word = root_role.word;
5383 union kvm_cpu_role cpu_role)
5385 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5386 union kvm_mmu_page_role root_role;
5388 root_role = cpu_role.base;
5402 root_role.efer_nx =
true;
5408 unsigned long cr4, u64 efer, gpa_t nested_cr3)
5410 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5413 .cr4 =
cr4 & ~X86_CR4_PKE,
5417 union kvm_mmu_page_role root_role;
5420 WARN_ON_ONCE(cpu_role.base.direct);
5422 root_role = cpu_role.base;
5426 root_role.passthrough = 1;
5433 static union kvm_cpu_role
5435 bool execonly, u8 level)
5437 union kvm_cpu_role role = {0};
5443 WARN_ON_ONCE(
is_smm(vcpu));
5444 role.base.level = level;
5445 role.base.has_4_byte_gpte =
false;
5446 role.base.direct =
false;
5447 role.base.ad_disabled = !accessed_dirty;
5448 role.base.guest_mode =
true;
5452 role.ext.execonly = execonly;
5459 int huge_page_level,
bool accessed_dirty,
5462 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5463 u8 level = vmx_eptp_page_walk_level(new_eptp);
5464 union kvm_cpu_role new_mode =
5468 if (new_mode.as_u64 != context->cpu_role.as_u64) {
5470 context->cpu_role.as_u64 = new_mode.as_u64;
5471 context->root_role.word = new_mode.base.word;
5473 context->page_fault = ept_page_fault;
5474 context->gva_to_gpa = ept_gva_to_gpa;
5475 context->sync_spte = ept_sync_spte;
5478 context->pkru_mask = 0;
5488 union kvm_cpu_role cpu_role)
5490 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5500 union kvm_cpu_role new_mode)
5502 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5504 if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5507 g_context->cpu_role.as_u64 = new_mode.as_u64;
5516 g_context->sync_spte = NULL;
5529 g_context->gva_to_gpa = paging64_gva_to_gpa;
5531 g_context->gva_to_gpa = paging64_gva_to_gpa;
5533 g_context->gva_to_gpa = paging32_gva_to_gpa;
5566 vcpu->arch.root_mmu.root_role.word = 0;
5567 vcpu->arch.guest_mmu.root_role.word = 0;
5568 vcpu->arch.nested_mmu.root_role.word = 0;
5569 vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5570 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5571 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5598 if (vcpu->arch.mmu->root_role.direct)
5616 static_call(kvm_x86_flush_tlb_current)(vcpu);
5623 struct kvm *kvm = vcpu->kvm;
5626 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5628 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5636 if (!VALID_PAGE(root_hpa))
5661 unsigned long roots_to_free = 0;
5665 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5667 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5669 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5693 if (
is_pae(vcpu) && *bytes == 4) {
5699 if (*bytes == 4 || *bytes == 8) {
5718 if (sp->
role.level == PG_LEVEL_4K)
5732 unsigned offset, pte_size, misaligned;
5734 offset = offset_in_page(gpa);
5735 pte_size = sp->
role.has_4_byte_gpte ? 4 : 8;
5741 if (!(offset & (pte_size - 1)) && bytes == 1)
5744 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5745 misaligned |= bytes < 4;
5752 unsigned page_offset, quadrant;
5756 page_offset = offset_in_page(gpa);
5757 level = sp->
role.level;
5759 if (sp->
role.has_4_byte_gpte) {
5771 quadrant = page_offset >> PAGE_SHIFT;
5772 page_offset &= ~PAGE_MASK;
5773 if (quadrant != sp->
role.quadrant)
5777 spte = &sp->
spt[page_offset /
sizeof(*spte)];
5784 gfn_t
gfn = gpa >> PAGE_SHIFT;
5787 u64 entry, gentry, *spte;
5795 if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5798 write_lock(&vcpu->kvm->mmu_lock);
5802 ++vcpu->kvm->stat.mmu_pte_write;
5808 ++vcpu->kvm->stat.mmu_flooded;
5819 if (gentry && sp->
role.level != PG_LEVEL_4K)
5820 ++vcpu->kvm->stat.mmu_pde_zapped;
5827 write_unlock(&vcpu->kvm->mmu_lock);
5831 void *insn,
int insn_len)
5833 int r, emulation_type = EMULTYPE_PF;
5834 bool direct = vcpu->arch.mmu->root_role.direct;
5844 if (WARN_ON_ONCE(error_code & PFERR_IMPLICIT_ACCESS))
5845 error_code &= ~PFERR_IMPLICIT_ACCESS;
5847 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5851 if (unlikely(error_code & PFERR_RSVD_MASK)) {
5859 lower_32_bits(error_code),
false,
5877 if (vcpu->arch.mmu->root_role.direct &&
5878 (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5895 emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5903 u64 addr, hpa_t root_hpa)
5914 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
5917 if (!VALID_PAGE(root_hpa))
5920 write_lock(&vcpu->kvm->mmu_lock);
5936 write_unlock(&vcpu->kvm->mmu_lock);
5940 u64 addr,
unsigned long roots)
5944 WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
5947 if (mmu != &vcpu->arch.guest_mmu) {
5952 static_call(kvm_x86_flush_tlb_gva)(vcpu, addr);
5955 if (!mmu->sync_spte)
5958 if (roots & KVM_MMU_ROOT_CURRENT)
5961 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5962 if (roots & KVM_MMU_ROOT_PREVIOUS(i))
5981 ++vcpu->stat.invlpg;
5988 struct kvm_mmu *mmu = vcpu->arch.mmu;
5989 unsigned long roots = 0;
5993 roots |= KVM_MMU_ROOT_CURRENT;
5995 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5996 if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5998 roots |= KVM_MMU_ROOT_PREVIOUS(i);
6003 ++vcpu->stat.invlpg;
6013 int tdp_max_root_level,
int tdp_huge_page_level)
6016 tdp_root_level = tdp_forced_root_level;
6017 max_tdp_level = tdp_max_root_level;
6019 #ifdef CONFIG_X86_64
6030 max_huge_page_level = tdp_huge_page_level;
6031 else if (boot_cpu_has(X86_FEATURE_GBPAGES))
6032 max_huge_page_level = PG_LEVEL_1G;
6034 max_huge_page_level = PG_LEVEL_2M;
6040 struct kvm_rmap_head *rmap_head,
6041 const struct kvm_memory_slot *slot);
6044 const struct kvm_memory_slot *slot,
6046 int start_level,
int end_level,
6047 gfn_t start_gfn, gfn_t end_gfn,
6048 bool flush_on_yield,
bool flush)
6052 lockdep_assert_held_write(&kvm->mmu_lock);
6057 flush |= fn(kvm, iterator.
rmap,
slot);
6059 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6060 if (flush && flush_on_yield) {
6065 cond_resched_rwlock_write(&kvm->mmu_lock);
6073 const struct kvm_memory_slot *
slot,
6076 bool flush_on_yield)
6080 flush_on_yield,
false);
6084 const struct kvm_memory_slot *
slot,
6086 bool flush_on_yield)
6094 set_memory_encrypted((
unsigned long)mmu->pae_root, 1);
6095 free_page((
unsigned long)mmu->pae_root);
6096 free_page((
unsigned long)mmu->pml4_root);
6097 free_page((
unsigned long)mmu->pml5_root);
6105 mmu->root.hpa = INVALID_PAGE;
6107 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
6108 mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
6128 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
6132 mmu->pae_root = page_address(page);
6143 set_memory_decrypted((
unsigned long)mmu->pae_root, 1);
6147 for (i = 0; i < 4; ++i)
6158 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6161 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6163 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6165 vcpu->arch.mmu = &vcpu->arch.root_mmu;
6166 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6174 goto fail_allocate_root;
6182 #define BATCH_ZAP_PAGES 10
6186 int nr_zapped, batch = 0;
6190 list_for_each_entry_safe_reverse(sp, node,
6191 &kvm->arch.active_mmu_pages,
link) {
6204 if (WARN_ON_ONCE(sp->
role.invalid))
6214 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6220 &kvm->arch.zapped_obsolete_pages, &nr_zapped);
6250 lockdep_assert_held(&kvm->slots_lock);
6252 write_lock(&kvm->mmu_lock);
6253 trace_kvm_mmu_zap_all_fast(kvm);
6262 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6285 write_unlock(&kvm->mmu_lock);
6301 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
6306 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6307 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
6308 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6309 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6315 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6317 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6320 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6325 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6326 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6327 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6340 const struct kvm_memory_slot *memslot;
6341 struct kvm_memslots *slots;
6342 struct kvm_memslot_iter iter;
6350 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
6351 slots = __kvm_memslots(kvm, i);
6353 kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
6354 memslot = iter.slot;
6355 start = max(gfn_start, memslot->base_gfn);
6356 end = min(gfn_end, memslot->base_gfn + memslot->npages);
6357 if (WARN_ON_ONCE(start >= end))
6361 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
6362 start, end - 1,
true, flush);
6377 if (WARN_ON_ONCE(gfn_end <= gfn_start))
6380 write_lock(&kvm->mmu_lock);
6382 kvm_mmu_invalidate_begin(kvm);
6384 kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
6394 kvm_mmu_invalidate_end(kvm);
6396 write_unlock(&kvm->mmu_lock);
6400 struct kvm_rmap_head *rmap_head,
6401 const struct kvm_memory_slot *slot)
6407 const struct kvm_memory_slot *memslot,
6411 write_lock(&kvm->mmu_lock);
6413 start_level, KVM_MAX_HUGEPAGE_LEVEL,
false);
6414 write_unlock(&kvm->mmu_lock);
6418 read_lock(&kvm->mmu_lock);
6420 read_unlock(&kvm->mmu_lock);
6424 static inline bool need_topup(
struct kvm_mmu_memory_cache *cache,
int min)
6426 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
6431 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6439 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6440 need_topup(&kvm->arch.split_page_header_cache, 1) ||
6441 need_topup(&kvm->arch.split_shadow_page_cache, 1);
6459 const int capacity = SPLIT_DESC_CACHE_MIN_NR_OBJECTS +
6460 KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
6463 lockdep_assert_held(&kvm->slots_lock);
6465 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6466 SPLIT_DESC_CACHE_MIN_NR_OBJECTS);
6470 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6474 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6481 union kvm_mmu_page_role role;
6482 unsigned int access;
6505 const struct kvm_memory_slot *slot,
6509 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6510 u64 huge_spte = READ_ONCE(*huge_sptep);
6520 sptep = &sp->
spt[index];
6551 const struct kvm_memory_slot *slot,
6561 level = huge_sp->
role.level;
6570 write_unlock(&kvm->mmu_lock);
6578 write_lock(&kvm->mmu_lock);
6585 trace_kvm_mmu_split_huge_page(
gfn, spte, level, r);
6590 struct kvm_rmap_head *rmap_head,
6591 const struct kvm_memory_slot *slot)
6603 if (WARN_ON_ONCE(!sp->
role.guest_mode))
6611 if (WARN_ON_ONCE(sp->
unsync))
6615 if (sp->
role.invalid)
6625 if (!r || r == -EAGAIN)
6636 const struct kvm_memory_slot *slot,
6637 gfn_t start, gfn_t end,
6648 for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
6650 level, level, start, end - 1,
true,
false);
6655 const struct kvm_memory_slot *memslot,
6674 const struct kvm_memory_slot *memslot,
6677 u64 start = memslot->base_gfn;
6678 u64 end = start + memslot->npages;
6684 write_lock(&kvm->mmu_lock);
6686 write_unlock(&kvm->mmu_lock);
6689 read_lock(&kvm->mmu_lock);
6691 read_unlock(&kvm->mmu_lock);
6705 struct kvm_rmap_head *rmap_head,
6706 const struct kvm_memory_slot *slot)
6710 int need_tlb_flush = 0;
6724 if (sp->
role.direct &&
6738 return need_tlb_flush;
6742 const struct kvm_memory_slot *slot)
6749 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1,
true))
6754 const struct kvm_memory_slot *slot)
6757 write_lock(&kvm->mmu_lock);
6759 write_unlock(&kvm->mmu_lock);
6763 read_lock(&kvm->mmu_lock);
6765 read_unlock(&kvm->mmu_lock);
6770 const struct kvm_memory_slot *memslot)
6773 write_lock(&kvm->mmu_lock);
6779 write_unlock(&kvm->mmu_lock);
6783 read_lock(&kvm->mmu_lock);
6785 read_unlock(&kvm->mmu_lock);
6804 write_lock(&kvm->mmu_lock);
6806 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages,
link) {
6807 if (WARN_ON_ONCE(sp->
role.invalid))
6811 if (cond_resched_rwlock_write(&kvm->mmu_lock))
6820 write_unlock(&kvm->mmu_lock);
6829 struct kvm_memory_slot *slot)
6836 WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6847 gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
6853 if (unlikely(gen == 0)) {
6854 kvm_debug_ratelimited(
"zapping shadow pages for mmio generation wraparound\n");
6860 struct shrink_control *sc)
6863 int nr_to_scan = sc->nr_to_scan;
6864 unsigned long freed = 0;
6866 mutex_lock(&kvm_lock);
6868 list_for_each_entry(kvm, &vm_list, vm_list) {
6886 if (!kvm->arch.n_used_mmu_pages &&
6890 idx = srcu_read_lock(&kvm->srcu);
6891 write_lock(&kvm->mmu_lock);
6895 &kvm->arch.zapped_obsolete_pages);
6902 write_unlock(&kvm->mmu_lock);
6903 srcu_read_unlock(&kvm->srcu, idx);
6910 list_move_tail(&kvm->vm_list, &vm_list);
6914 mutex_unlock(&kvm_lock);
6919 struct shrink_control *sc)
6935 return sysfs_emit(buffer,
"never\n");
6937 return param_get_bool(buffer, kp);
6943 return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6960 if (sysfs_streq(val,
"off")) {
6962 }
else if (sysfs_streq(val,
"force")) {
6964 }
else if (sysfs_streq(val,
"auto")) {
6966 }
else if (sysfs_streq(val,
"never")) {
6969 mutex_lock(&kvm_lock);
6970 if (!list_empty(&vm_list)) {
6971 mutex_unlock(&kvm_lock);
6975 mutex_unlock(&kvm_lock);
6976 }
else if (kstrtobool(val, &new_val) < 0) {
6982 if (new_val != old_val) {
6985 mutex_lock(&kvm_lock);
6987 list_for_each_entry(kvm, &vm_list, vm_list) {
6988 mutex_lock(&kvm->slots_lock);
6990 mutex_unlock(&kvm->slots_lock);
6992 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
6994 mutex_unlock(&kvm_lock);
7036 BUILD_BUG_ON(
sizeof(
union kvm_mmu_page_role) !=
sizeof(u32));
7037 BUILD_BUG_ON(
sizeof(
union kvm_mmu_extended_role) !=
sizeof(u32));
7038 BUILD_BUG_ON(
sizeof(
union kvm_cpu_role) !=
sizeof(u64));
7044 0, SLAB_ACCOUNT, NULL);
7050 0, SLAB_ACCOUNT, NULL);
7104 if (!enabled || !ratio)
7110 ratio = min(ratio, 3600u);
7111 *period = 60 * 60 * 1000 / ratio;
7118 bool was_recovery_enabled, is_recovery_enabled;
7119 uint old_period, new_period;
7127 err = param_set_uint(val, kp);
7133 if (is_recovery_enabled &&
7134 (!was_recovery_enabled || old_period > new_period)) {
7137 mutex_lock(&kvm_lock);
7139 list_for_each_entry(kvm, &vm_list, vm_list)
7140 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7142 mutex_unlock(&kvm_lock);
7150 unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
7151 struct kvm_memory_slot *slot;
7159 rcu_idx = srcu_read_lock(&kvm->srcu);
7160 write_lock(&kvm->mmu_lock);
7170 to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
7171 for ( ; to_zap; --to_zap) {
7172 if (list_empty(&kvm->arch.possible_nx_huge_pages))
7182 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7186 WARN_ON_ONCE(!sp->
role.direct);
7209 if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
7210 struct kvm_memslots *slots;
7212 slots = kvm_memslots_for_spte_role(kvm, sp->
role);
7213 slot = __gfn_to_memslot(slots, sp->
gfn);
7214 WARN_ON_ONCE(!slot);
7217 if (slot && kvm_slot_dirty_track_enabled(slot))
7225 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7229 cond_resched_rwlock_write(&kvm->mmu_lock);
7239 write_unlock(&kvm->mmu_lock);
7240 srcu_read_unlock(&kvm->srcu, rcu_idx);
7250 return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
7251 : MAX_SCHEDULE_TIMEOUT;
7257 long remaining_time;
7260 start_time = get_jiffies_64();
7263 set_current_state(TASK_INTERRUPTIBLE);
7264 while (!kthread_should_stop() && remaining_time > 0) {
7265 schedule_timeout(remaining_time);
7267 set_current_state(TASK_INTERRUPTIBLE);
7270 set_current_state(TASK_RUNNING);
7272 if (kthread_should_stop())
7287 "kvm-nx-lpage-recovery",
7288 &kvm->arch.nx_huge_page_recovery_thread);
7290 kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
7297 if (kvm->arch.nx_huge_page_recovery_thread)
7298 kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
7301 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
7302 bool kvm_arch_pre_set_memory_attributes(
struct kvm *kvm,
7303 struct kvm_gfn_range *range)
7316 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7322 static bool hugepage_test_mixed(
struct kvm_memory_slot *slot, gfn_t gfn,
7328 static void hugepage_clear_mixed(
struct kvm_memory_slot *slot, gfn_t gfn,
7334 static void hugepage_set_mixed(
struct kvm_memory_slot *slot, gfn_t gfn,
7340 static bool hugepage_has_attrs(
struct kvm *kvm,
struct kvm_memory_slot *slot,
7341 gfn_t gfn,
int level,
unsigned long attrs)
7343 const unsigned long start = gfn;
7344 const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
7346 if (level == PG_LEVEL_2M)
7347 return kvm_range_has_memory_attributes(kvm, start, end, attrs);
7349 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7350 if (hugepage_test_mixed(slot, gfn, level - 1) ||
7351 attrs != kvm_get_memory_attributes(kvm, gfn))
7357 bool kvm_arch_post_set_memory_attributes(
struct kvm *kvm,
7358 struct kvm_gfn_range *range)
7360 unsigned long attrs = range->arg.attributes;
7361 struct kvm_memory_slot *slot = range->slot;
7364 lockdep_assert_held_write(&kvm->mmu_lock);
7365 lockdep_assert_held(&kvm->slots_lock);
7373 if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
7380 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7381 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7385 if (gfn != range->start || gfn + nr_pages > range->end) {
7391 if (gfn >= slot->base_gfn &&
7392 gfn + nr_pages <= slot->base_gfn + slot->npages) {
7393 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7394 hugepage_clear_mixed(slot, gfn, level);
7396 hugepage_set_mixed(slot, gfn, level);
7405 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
7406 hugepage_clear_mixed(slot, gfn, level);
7413 if (gfn < range->end &&
7414 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
7415 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7416 hugepage_clear_mixed(slot, gfn, level);
7418 hugepage_set_mixed(slot, gfn, level);
7424 void kvm_mmu_init_memslot_memory_attributes(
struct kvm *kvm,
7425 struct kvm_memory_slot *slot)
7429 if (!kvm_arch_has_private_mem(kvm))
7432 for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7440 gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7443 if (start < slot->base_gfn)
7450 for (gfn = start; gfn < end; gfn += nr_pages) {
7451 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
7453 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7454 hugepage_clear_mixed(slot, gfn, level);
7456 hugepage_set_mixed(slot, gfn, level);
bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, unsigned long hva, struct kvm_arch_async_pf *arch)
struct vmcs_config vmcs_config __ro_after_init
static int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
static ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
#define KVM_POSSIBLE_CR4_GUEST_BITS
static ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
#define KVM_POSSIBLE_CR0_GUEST_BITS
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
void kvm_flush_remote_tlbs(struct kvm *kvm)
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva)
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr)
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
struct page * kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot)
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages)
struct kvm_memory_slot * kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
void kvm_release_pfn_clean(kvm_pfn_t pfn)
static struct kvm_mmu_page * kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu, gfn_t gfn, union kvm_mmu_page_role role)
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3)
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list, int *nr_zapped)
static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, hpa_t root, u64 addr)
static int shadow_mmu_try_split_huge_page(struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep)
#define for_each_valid_sp(_kvm, _sp, _list)
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *memslot, int target_level)
static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role)
static struct kvm_mmu_page * kvm_mmu_find_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role)
static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsigned int access)
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)
void kvm_mmu_uninit_vm(struct kvm *kvm)
static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, bool flush)
static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role)
static struct kvm_mmu_page * shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
static void reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
static gfn_t get_mmio_spte_gfn(u64 spte)
int kvm_mmu_post_init_vm(struct kvm *kvm)
static int mmu_pages_first(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents)
static void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static bool slot_rmap_write_protect(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot)
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, const struct kvm_memory_slot *memslot, int start_level)
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644)
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, hpa_t root_hpa)
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access)
static struct kvm_lpage_info * lpage_info_slot(gfn_t gfn, const struct kvm_memory_slot *slot, int level)
static struct kvm_mmu_page * __kvm_mmu_get_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, struct shadow_page_caches *caches, gfn_t gfn, union kvm_mmu_page_role role)
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn)
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level)
static int is_cpuid_PSE36(void)
static union kvm_cpu_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, bool execonly, u8 level)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role)
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t vaddr, u64 access, struct x86_exception *exception)
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot)
static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, rmap_handler_t handler)
static u64 reserved_hpa_bits(void)
static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep)
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
static void rmap_remove(struct kvm *kvm, u64 *spte)
static int __mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec)
static __always_inline bool walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, bool flush_on_yield)
static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot)
static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
static u64 mmu_spte_get_lockless(u64 *sptep)
static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
static int mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *parent, bool can_yield)
static u64 * rmap_get_next(struct rmap_iterator *iter)
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_mmu_vendor_module_exit(void)
static void mmu_free_vm_memory_caches(struct kvm *kvm)
#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, _start_gfn, _end_gfn, _iter_)
static bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, union kvm_mmu_page_role role)
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot)
static u64 __get_spte_lockless(u64 *sptep)
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access)
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot)
struct kmem_cache * mmu_page_header_cache
static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static void init_kvm_softmmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role)
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list)
static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
static bool detect_write_flooding(struct kvm_mmu_page *sp)
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
static int __kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level, bool is_private)
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len)
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
static bool rmap_write_protect(struct kvm_rmap_head *rmap_head, bool pt_protect)
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role)
void __init kvm_mmu_x86_module_init(void)
static void drop_spte(struct kvm *kvm, u64 *sptep)
static uint __read_mostly nx_huge_pages_recovery_period_ms
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
static void __set_nx_huge_pages(bool val)
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
static unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
int kvm_mmu_load(struct kvm_vcpu *vcpu)
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, u64 addr)
static void __link_shadow_page(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, u64 *sptep, struct kvm_mmu_page *sp, bool flush)
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role)
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len)
static int max_huge_page_level __read_mostly
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level)
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot)
static void count_spte_clear(u64 *sptep, u64 spte)
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, struct list_head *invalid_list, bool remote_flush)
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, struct list_head *invalid_list)
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
#define for_each_shadow_entry(_vcpu, _addr, _walker)
static const struct kernel_param_ops nx_huge_pages_ops
static bool spte_clear_dirty(u64 *sptep)
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list)
static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot)
static bool get_nx_auto_mode(void)
static u8 kvm_max_level_for_order(int order)
static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access)
static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp)
static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *memslot, u64 start, u64 end, int target_level)
static struct kvm_rmap_head * gfn_to_rmap(gfn_t gfn, int level, const struct kvm_memory_slot *slot)
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
static void kvm_mmu_zap_all(struct kvm *kvm)
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
static struct kmem_cache * pte_list_desc_cache
static struct kvm_mmu_page * kvm_mmu_alloc_shadow_page(struct kvm *kvm, struct shadow_page_caches *caches, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly, int huge_page_level)
static int mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec)
static void pte_list_desc_remove_entry(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc, int i)
static void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots)
static bool mmu_spte_age(u64 *sptep)
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
static void mmu_destroy_caches(void)
static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, u64 *sptep, u64 old_spte, u64 new_spte)
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
static void paging32_init_context(struct kvm_mmu *context)
static uint __read_mostly nx_huge_pages_recovery_ratio
static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
static bool kvm_available_flush_remote_tlbs_range(void)
static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static unsigned get_mmio_spte_access(u64 spte)
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
bool(* slot_rmaps_handler)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot)
static void kvm_recover_nx_huge_pages(struct kvm *kvm)
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, int bytes)
static void mmu_spte_clear_no_track(u64 *sptep)
static bool is_unsync_root(hpa_t root)
static bool sp_has_gptes(struct kvm_mmu_page *sp)
static int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, int i)
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level)
void kvm_init_mmu(struct kvm_vcpu *vcpu)
bool itlb_multihit_kvm_mitigation
static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t pte)
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_cpu_role new_mode)
static bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
static struct kvm_memory_slot * gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
#define KVM_LPAGE_MIXED_FLAG
static void kvm_zap_one_rmap_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head, u64 *sptep)
static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused)
static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault)
static unsigned kvm_page_table_hashfn(gfn_t gfn)
static void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static struct shrinker * mmu_shrinker
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots)
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, bool execonly, int huge_page_level)
static unsigned long kvm_mmu_available_pages(struct kvm *kvm)
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, int tdp_max_root_level, int tdp_huge_page_level)
static bool calc_nx_huge_pages_recovery_period(uint *period)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index, unsigned int access)
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot)
static void mmu_spte_set(u64 *sptep, u64 spte)
static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access)
static u64 * get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
static struct kvm_mmu_page * kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, bool direct, unsigned int access)
static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache, struct kvm_mmu_page *sp, u64 *parent_pte)
void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, int *bytes)
static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, struct kvm_rmap_head *rmap_head)
static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
static void update_pkru_bitmask(struct kvm_mmu *mmu)
static void pte_list_remove(struct kvm *kvm, u64 *spte, struct kvm_rmap_head *rmap_head)
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list)
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
static int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end)
static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
static bool is_cr4_pae(struct kvm_mmu *mmu)
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, unsigned int access)
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
void kvm_mmu_init_vm(struct kvm *kvm)
static void __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access)
static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, gfn_t gfn, unsigned int access)
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free)
static void paging64_init_context(struct kvm_mmu *context)
static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list)
int kvm_mmu_create(struct kvm_vcpu *vcpu)
static int kvm_mmu_page_unlink_children(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list)
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, gfn_t gfn, int count)
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644)
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, union kvm_cpu_role cpu_role, union kvm_mmu_page_role root_role)
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp)
static void shadow_mmu_split_huge_page(struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep)
static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused)
static const struct kernel_param_ops nx_huge_pages_recovery_param_ops
int kvm_mmu_vendor_module_init(void)
static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
static void free_mmu_pages(struct kvm_mmu *mmu)
#define RMAP_RECYCLE_THRESHOLD
static u64 * rmap_get_first(struct kvm_rmap_head *rmap_head, struct rmap_iterator *iter)
static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, int level, bool nx, bool gbpages, bool pse, bool amd)
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, int idx)
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, const struct kvm_memory_slot *memslot)
#define KVM_PAGE_ARRAY_NR
static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role)
static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, const struct kvm_memory_slot *slot, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn)
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
static bool is_cr0_pg(struct kvm_mmu *mmu)
static void kvm_zap_obsolete_pages(struct kvm *kvm)
#define for_each_sp(pvec, sp, parents, i)
static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte)
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte)
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access)
static bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static bool nx_hugepage_mitigation_hard_disabled
static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, bool nx_huge_page_possible)
void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes)
static bool __ro_after_init tdp_mmu_allowed
static int mmu_first_shadow_root_alloc(struct kvm *kvm)
static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, bool flush_on_yield)
static void clear_sp_write_flooding_count(u64 *spte)
static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level)
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
static long get_nx_huge_page_recovery_timeout(u64 start_time)
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
static bool __read_mostly force_flush_and_sync_on_reuse
bool(* rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t pte)
static u64 * fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
static struct percpu_counter kvm_total_used_mmu_pages
static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
static unsigned long mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused)
static bool boot_cpu_is_amd(void)
static void mark_unsync(u64 *spte)
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
#define BYTE_MASK(access)
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, unsigned long nr_to_zap)
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
static bool kvm_zap_all_rmap_sptes(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
static void __set_spte(u64 *sptep, u64 spte)
static int topup_split_caches(struct kvm *kvm)
static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch)
static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *parent, struct list_head *invalid_list)
static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte)
int __read_mostly nx_huge_pages
static bool spte_write_protect(u64 *sptep, bool pt_protect)
int kvm_cpu_dirty_log_size(void)
static bool need_topup_split_caches_or_resched(struct kvm *kvm)
__MODULE_PARM_TYPE(nx_huge_pages, "bool")
static void nonpaging_init_context(struct kvm_mmu *context)
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
static bool kvm_mmu_honors_guest_mtrrs(struct kvm *kvm)
static __always_inline u64 rsvd_bits(int s, int e)
static gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
#define KVM_MMU_CR0_ROLE_BITS
static unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
static gfn_t kvm_mmu_max_gfn(void)
static bool kvm_shadow_root_allocated(struct kvm *kvm)
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
static gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t gpa, u64 access, struct x86_exception *exception)
static void kvm_update_page_stats(struct kvm *kvm, int level, int count)
static int kvm_mmu_reload(struct kvm_vcpu *vcpu)
#define PT_WRITABLE_SHIFT
static bool kvm_memslots_have_rmaps(struct kvm *kvm)
static unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
#define KVM_MMU_CR4_ROLE_BITS
static hpa_t kvm_mmu_get_dummy_root(void)
#define IS_VALID_PAE_ROOT(x)
#define __PT_BASE_ADDR_MASK
static bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
#define KVM_MMU_WARN_ON(x)
static gfn_t gfn_round_for_level(gfn_t gfn, int level)
static void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
static int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefetch, int *emulation_type)
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num)
int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
bool kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn)
void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn)
void __kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn)
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
static bool is_smm(struct kvm_vcpu *vcpu)
u64 __read_mostly shadow_accessed_mask
bool spte_has_volatile_bits(u64 spte)
u64 __read_mostly shadow_me_value
u64 __read_mostly shadow_nonpresent_or_rsvd_mask
u8 __read_mostly shadow_phys_bits
u64 __read_mostly shadow_dirty_mask
u64 __read_mostly shadow_mmio_access_mask
u64 __read_mostly shadow_memtype_mask
u64 __read_mostly shadow_me_mask
u64 mark_spte_for_access_track(u64 spte)
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
void __init kvm_mmu_spte_module_init(void)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte)
u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
bool __read_mostly enable_mmio_caching
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask
void kvm_mmu_reset_all_pte_masks(void)
u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, int index)
u64 __read_mostly shadow_x_mask
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
u64 __read_mostly shadow_mmu_writable_mask
static bool is_dirty_spte(u64 spte)
static bool is_accessed_spte(u64 spte)
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN
static u64 get_mmio_spte_generation(u64 spte)
static bool is_mmio_spte(u64 spte)
static void check_spte_writable_invariants(u64 spte)
static bool is_access_track_spte(u64 spte)
static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level)
static bool is_writable_pte(unsigned long pte)
static struct kvm_mmu_page * sptep_to_sp(u64 *sptep)
static bool is_executable_pte(u64 spte)
#define SPTE_ENT_PER_PAGE
static bool spte_ad_need_write_protect(u64 spte)
static bool is_last_spte(u64 pte, int level)
static bool is_shadow_present_pte(u64 pte)
static kvm_pfn_t spte_to_pfn(u64 pte)
static bool spte_ad_enabled(u64 spte)
#define SPTE_BASE_ADDR_MASK
static struct kvm_mmu_page * root_to_sp(hpa_t root)
static u64 restore_acc_track_spte(u64 spte)
static bool kvm_ad_enabled(void)
static u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
#define SPTE_INDEX(address, level)
#define MMIO_SPTE_GEN_MASK
static bool is_large_pte(u64 pte)
static bool is_mmu_writable_spte(u64 spte)
static struct kvm_mmu_page * spte_to_child_sp(u64 spte)
static bool sp_ad_disabled(struct kvm_mmu_page *sp)
static int spte_index(u64 *sptep)
unsigned int unsync_children
struct hlist_node hash_link
bool nx_huge_page_disallowed
struct list_head possible_nx_huge_page_link
union kvm_mmu_page_role role
u64 * shadowed_translation
struct kvm_rmap_head parent_ptes
atomic_t write_flooding_count
struct kvm_mmu_pages::mmu_page_and_offset page[KVM_PAGE_ARRAY_NR]
bool huge_page_disallowed
const bool nx_huge_page_workaround_enabled
struct kvm_memory_slot * slot
struct kvm_mmu_page * parent[PT64_ROOT_MAX_LEVEL]
unsigned int idx[PT64_ROOT_MAX_LEVEL]
struct pte_list_desc * more
u64 * sptes[PTE_LIST_EXT]
struct pte_list_desc * desc
struct kvm_mmu_memory_cache * shadow_page_cache
struct kvm_mmu_memory_cache * page_header_cache
struct kvm_mmu_memory_cache * shadowed_info_cache
const struct kvm_memory_slot * slot
struct kvm_rmap_head * end_rmap
struct kvm_rmap_head * rmap
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot)
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level)
void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level, bool shared)
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, const struct kvm_memory_slot *slot)
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush)
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
u64 * kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *spte)
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot)
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
static bool is_tdp_mmu_page(struct kvm_mmu_page *sp)
static void kvm_tdp_mmu_walk_lockless_begin(void)
static void kvm_tdp_mmu_walk_lockless_end(void)
bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
bool __read_mostly eager_page_split
bool kvm_apicv_activated(struct kvm *kvm)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len)
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception)
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
static void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
static bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
static bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
static bool is_pse(struct kvm_vcpu *vcpu)
static bool mmu_is_nested(struct kvm_vcpu *vcpu)
static bool is_paging(struct kvm_vcpu *vcpu)
static bool is_long_mode(struct kvm_vcpu *vcpu)
static void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access)
static bool is_pae(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
static bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)