KVM
mmu_internal.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
4 
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
8 
9 #ifdef CONFIG_KVM_PROVE_MMU
10 #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
11 #else
12 #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)
13 #endif
14 
15 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
16 #define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12)
17 #define __PT_LEVEL_SHIFT(level, bits_per_level) \
18  (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
19 #define __PT_INDEX(address, level, bits_per_level) \
20  (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
21 
22 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \
23  ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
24 
25 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \
26  ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
27 
28 #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level))
29 
30 /*
31  * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
32  * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
33  * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
34  * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
35  * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
36  */
37 #define INVALID_PAE_ROOT 0
38 #define IS_VALID_PAE_ROOT(x) (!!(x))
39 
40 static inline hpa_t kvm_mmu_get_dummy_root(void)
41 {
42  return my_zero_pfn(0) << PAGE_SHIFT;
43 }
44 
45 static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
46 {
47  return is_zero_pfn(shadow_page >> PAGE_SHIFT);
48 }
49 
50 typedef u64 __rcu *tdp_ptep_t;
51 
52 struct kvm_mmu_page {
53  /*
54  * Note, "link" through "spt" fit in a single 64 byte cache line on
55  * 64-bit kernels, keep it that way unless there's a reason not to.
56  */
57  struct list_head link;
58  struct hlist_node hash_link;
59 
61  bool unsync;
62  union {
64 
65  /* Only accessed under slots_lock. */
67  };
68 
69  /*
70  * The shadow page can't be replaced by an equivalent huge page
71  * because it is being used to map an executable page in the guest
72  * and the NX huge page mitigation is enabled.
73  */
75 
76  /*
77  * The following two entries are used to key the shadow page in the
78  * hash table.
79  */
80  union kvm_mmu_page_role role;
81  gfn_t gfn;
82 
83  u64 *spt;
84 
85  /*
86  * Stores the result of the guest translation being shadowed by each
87  * SPTE. KVM shadows two types of guest translations: nGPA -> GPA
88  * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both
89  * cases the result of the translation is a GPA and a set of access
90  * constraints.
91  *
92  * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed
93  * access permissions are stored in the lower bits. Note, for
94  * convenience and uniformity across guests, the access permissions are
95  * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format.
96  */
98 
99  /* Currently serving as active root */
100  union {
102  refcount_t tdp_mmu_root_count;
103  };
104  unsigned int unsync_children;
105  union {
106  struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
108  };
109  DECLARE_BITMAP(unsync_child_bitmap, 512);
110 
111  /*
112  * Tracks shadow pages that, if zapped, would allow KVM to create an NX
113  * huge page. A shadow page will have nx_huge_page_disallowed set but
114  * not be on the list if a huge page is disallowed for other reasons,
115  * e.g. because KVM is shadowing a PTE at the same gfn, the memslot
116  * isn't properly aligned, etc...
117  */
118  struct list_head possible_nx_huge_page_link;
119 #ifdef CONFIG_X86_32
120  /*
121  * Used out of the mmu-lock to avoid reading spte values while an
122  * update is in progress; see the comments in __get_spte_lockless().
123  */
124  int clear_spte_count;
125 #endif
126 
127  /* Number of writes since the last time traversal visited this page. */
129 
130 #ifdef CONFIG_X86_64
131  /* Used for freeing the page asynchronously if it is a TDP MMU page. */
132  struct rcu_head rcu_head;
133 #endif
134 };
135 
136 extern struct kmem_cache *mmu_page_header_cache;
137 
138 static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
139 {
140  return role.smm ? 1 : 0;
141 }
142 
143 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
144 {
145  return kvm_mmu_role_as_id(sp->role);
146 }
147 
148 static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
149 {
150  /*
151  * When using the EPT page-modification log, the GPAs in the CPU dirty
152  * log would come from L2 rather than L1. Therefore, we need to rely
153  * on write protection to record dirty pages, which bypasses PML, since
154  * writes now result in a vmexit. Note, the check on CPU dirty logging
155  * being enabled is mandatory as the bits used to denote WP-only SPTEs
156  * are reserved for PAE paging (32-bit KVM).
157  */
158  return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
159 }
160 
161 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
162 {
163  return gfn & -KVM_PAGES_PER_HPAGE(level);
164 }
165 
166 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
167  gfn_t gfn, bool can_unsync, bool prefetch);
168 
169 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
170 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
171 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
172  struct kvm_memory_slot *slot, u64 gfn,
173  int min_level);
174 
175 /* Flush the given page (huge or not) of guest memory. */
176 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
177 {
179  KVM_PAGES_PER_HPAGE(level));
180 }
181 
182 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
183 
184 extern int nx_huge_pages;
185 static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
186 {
187  return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages;
188 }
189 
191  /* arguments to kvm_mmu_do_page_fault. */
192  const gpa_t addr;
193  const u32 error_code;
194  const bool prefetch;
195 
196  /* Derived from error_code. */
197  const bool exec;
198  const bool write;
199  const bool present;
200  const bool rsvd;
201  const bool user;
202 
203  /* Derived from mmu and global state. */
204  const bool is_tdp;
205  const bool is_private;
207 
208  /*
209  * Whether a >4KB mapping can be created or is forbidden due to NX
210  * hugepages.
211  */
213 
214  /*
215  * Maximum page size that can be created for this fault; input to
216  * FNAME(fetch), direct_map() and kvm_tdp_mmu_map().
217  */
219 
220  /*
221  * Page size that can be created based on the max_level and the
222  * page size used by the host mapping.
223  */
225 
226  /*
227  * Page size that will be created based on the req_level and
228  * huge_page_disallowed.
229  */
231 
232  /* Shifted addr, or result of guest page table walk if addr is a gva. */
233  gfn_t gfn;
234 
235  /* The memslot containing gfn. May be NULL. */
236  struct kvm_memory_slot *slot;
237 
238  /* Outputs of kvm_faultin_pfn. */
239  unsigned long mmu_seq;
240  kvm_pfn_t pfn;
241  hva_t hva;
243 
244  /*
245  * Indicates the guest is trying to write a gfn that contains one or
246  * more of the PTEs used to translate the write itself, i.e. the access
247  * is changing its own translation in the guest page tables.
248  */
250 };
251 
252 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
253 
254 /*
255  * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
256  * and of course kvm_mmu_do_page_fault().
257  *
258  * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
259  * RET_PF_RETRY: let CPU fault again on the address.
260  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
261  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
262  * RET_PF_FIXED: The faulting entry has been fixed.
263  * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
264  *
265  * Any names added to this enum should be exported to userspace for use in
266  * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
267  *
268  * Note, all values must be greater than or equal to zero so as not to encroach
269  * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which
270  * will allow for efficient machine code when checking for CONTINUE, e.g.
271  * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
272  */
273 enum {
280 };
281 
282 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
283  u32 err, bool prefetch, int *emulation_type)
284 {
285  struct kvm_page_fault fault = {
286  .addr = cr2_or_gpa,
287  .error_code = err,
288  .exec = err & PFERR_FETCH_MASK,
289  .write = err & PFERR_WRITE_MASK,
290  .present = err & PFERR_PRESENT_MASK,
291  .rsvd = err & PFERR_RSVD_MASK,
292  .user = err & PFERR_USER_MASK,
293  .prefetch = prefetch,
294  .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
295  .nx_huge_page_workaround_enabled =
296  is_nx_huge_page_enabled(vcpu->kvm),
297 
298  .max_level = KVM_MAX_HUGEPAGE_LEVEL,
299  .req_level = PG_LEVEL_4K,
300  .goal_level = PG_LEVEL_4K,
301  .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
302  };
303  int r;
304 
305  if (vcpu->arch.mmu->root_role.direct) {
306  fault.gfn = fault.addr >> PAGE_SHIFT;
307  fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
308  }
309 
310  /*
311  * Async #PF "faults", a.k.a. prefetch faults, are not faults from the
312  * guest perspective and have already been counted at the time of the
313  * original fault.
314  */
315  if (!prefetch)
316  vcpu->stat.pf_taken++;
317 
318  if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp)
319  r = kvm_tdp_page_fault(vcpu, &fault);
320  else
321  r = vcpu->arch.mmu->page_fault(vcpu, &fault);
322 
323  if (fault.write_fault_to_shadow_pgtable && emulation_type)
324  *emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
325 
326  /*
327  * Similar to above, prefetch faults aren't truly spurious, and the
328  * async #PF path doesn't do emulation. Do count faults that are fixed
329  * by the async #PF handler though, otherwise they'll never be counted.
330  */
331  if (r == RET_PF_FIXED)
332  vcpu->stat.pf_fixed++;
333  else if (prefetch)
334  ;
335  else if (r == RET_PF_EMULATE)
336  vcpu->stat.pf_emulate++;
337  else if (r == RET_PF_SPURIOUS)
338  vcpu->stat.pf_spurious++;
339  return r;
340 }
341 
342 int kvm_mmu_max_mapping_level(struct kvm *kvm,
343  const struct kvm_memory_slot *slot, gfn_t gfn,
344  int max_level);
345 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
346 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
347 
348 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
349 
350 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
351 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
352 
353 #endif /* __KVM_X86_MMU_INTERNAL_H */
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
Definition: kvm_main.c:367
struct kvm_memory_slot * kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
Definition: kvm_main.c:2636
static hpa_t kvm_mmu_get_dummy_root(void)
Definition: mmu_internal.h:40
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level)
Definition: mmu.c:1414
void * mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
Definition: mmu.c:891
static bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
Definition: mmu_internal.h:148
struct kmem_cache * mmu_page_header_cache
Definition: mmu.c:181
int nx_huge_pages
Definition: mmu.c:64
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
Definition: mmu.c:4623
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
Definition: mmu.c:1073
int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level)
Definition: mmu.c:3170
static int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
Definition: mmu_internal.h:138
static bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
Definition: mmu_internal.h:45
u64 __rcu * tdp_ptep_t
Definition: mmu_internal.h:50
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
Definition: mmu.c:822
static gfn_t gfn_round_for_level(gfn_t gfn, int level)
Definition: mmu_internal.h:161
static bool is_nx_huge_page_enabled(struct kvm *kvm)
Definition: mmu_internal.h:185
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
Definition: mmu_internal.h:143
@ RET_PF_RETRY
Definition: mmu_internal.h:275
@ RET_PF_INVALID
Definition: mmu_internal.h:277
@ RET_PF_FIXED
Definition: mmu_internal.h:278
@ RET_PF_EMULATE
Definition: mmu_internal.h:276
@ RET_PF_SPURIOUS
Definition: mmu_internal.h:279
@ RET_PF_CONTINUE
Definition: mmu_internal.h:274
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
Definition: mmu.c:3180
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
Definition: mmu.c:848
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
Definition: mmu.c:817
static void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
Definition: mmu_internal.h:176
static int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, bool prefetch, int *emulation_type)
Definition: mmu_internal.h:282
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
Definition: mmu.c:3216
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch)
Definition: mmu.c:2805
unsigned int unsync_children
Definition: mmu_internal.h:104
struct hlist_node hash_link
Definition: mmu_internal.h:58
tdp_ptep_t ptep
Definition: mmu_internal.h:107
refcount_t tdp_mmu_root_count
Definition: mmu_internal.h:102
bool nx_huge_page_disallowed
Definition: mmu_internal.h:74
bool tdp_mmu_scheduled_root_to_zap
Definition: mmu_internal.h:66
DECLARE_BITMAP(unsync_child_bitmap, 512)
struct list_head possible_nx_huge_page_link
Definition: mmu_internal.h:118
union kvm_mmu_page_role role
Definition: mmu_internal.h:80
u64 * shadowed_translation
Definition: mmu_internal.h:97
struct list_head link
Definition: mmu_internal.h:57
bool tdp_mmu_page
Definition: mmu_internal.h:60
struct kvm_rmap_head parent_ptes
Definition: mmu_internal.h:106
atomic_t write_flooding_count
Definition: mmu_internal.h:128
bool write_fault_to_shadow_pgtable
Definition: mmu_internal.h:249
const bool prefetch
Definition: mmu_internal.h:194
const u32 error_code
Definition: mmu_internal.h:193
unsigned long mmu_seq
Definition: mmu_internal.h:239
const bool write
Definition: mmu_internal.h:198
bool huge_page_disallowed
Definition: mmu_internal.h:212
const bool user
Definition: mmu_internal.h:201
const bool is_tdp
Definition: mmu_internal.h:204
kvm_pfn_t pfn
Definition: mmu_internal.h:240
const bool nx_huge_page_workaround_enabled
Definition: mmu_internal.h:206
struct kvm_memory_slot * slot
Definition: mmu_internal.h:236
const bool present
Definition: mmu_internal.h:199
const bool is_private
Definition: mmu_internal.h:205
const gpa_t addr
Definition: mmu_internal.h:192
const bool exec
Definition: mmu_internal.h:197
const bool rsvd
Definition: mmu_internal.h:200