KVM
pgtable.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4  * No bombay mix was harmed in the writing of this file.
5  *
6  * Copyright (C) 2020 Google LLC
7  * Author: Will Deacon <will@kernel.org>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13 
14 
15 #define KVM_PTE_TYPE BIT(1)
16 #define KVM_PTE_TYPE_BLOCK 0
17 #define KVM_PTE_TYPE_PAGE 1
18 #define KVM_PTE_TYPE_TABLE 1
19 
20 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
21 
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
25  ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
26 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
27  ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
28 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
29 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
30 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
31 
32 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
37 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
38 
39 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
40 
41 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
42 
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
44 
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
46 
47 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
48 
49 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
50  KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
51  KVM_PTE_LEAF_ATTR_HI_S2_XN)
52 
53 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
54 #define KVM_MAX_OWNER_ID 1
55 
56 /*
57  * Used to indicate a pte for which a 'break-before-make' sequence is in
58  * progress.
59  */
60 #define KVM_INVALID_PTE_LOCKED BIT(10)
61 
63  struct kvm_pgtable_walker *walker;
64 
65  const u64 start;
66  u64 addr;
67  const u64 end;
68 };
69 
70 static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
71 {
72  return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
73 }
74 
75 static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
76 {
77  return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
78 }
79 
80 static bool kvm_phys_is_valid(u64 phys)
81 {
82  u64 parange_max = kvm_get_parange_max();
83  u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
84 
85  return phys < BIT(shift);
86 }
87 
88 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
89 {
90  u64 granule = kvm_granule_size(ctx->level);
91 
92  if (!kvm_level_supports_block_mapping(ctx->level))
93  return false;
94 
95  if (granule > (ctx->end - ctx->addr))
96  return false;
97 
98  if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
99  return false;
100 
101  return IS_ALIGNED(ctx->addr, granule);
102 }
103 
104 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
105 {
106  u64 shift = kvm_granule_shift(level);
107  u64 mask = BIT(PAGE_SHIFT - 3) - 1;
108 
109  return (data->addr >> shift) & mask;
110 }
111 
112 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
113 {
114  u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
115  u64 mask = BIT(pgt->ia_bits) - 1;
116 
117  return (addr & mask) >> shift;
118 }
119 
120 static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
121 {
122  struct kvm_pgtable pgt = {
123  .ia_bits = ia_bits,
124  .start_level = start_level,
125  };
126 
127  return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
128 }
129 
130 static bool kvm_pte_table(kvm_pte_t pte, s8 level)
131 {
132  if (level == KVM_PGTABLE_LAST_LEVEL)
133  return false;
134 
135  if (!kvm_pte_valid(pte))
136  return false;
137 
138  return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
139 }
140 
141 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
142 {
143  return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
144 }
145 
146 static void kvm_clear_pte(kvm_pte_t *ptep)
147 {
148  WRITE_ONCE(*ptep, 0);
149 }
150 
151 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
152 {
153  kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
154 
155  pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
156  pte |= KVM_PTE_VALID;
157  return pte;
158 }
159 
160 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
161 {
162  kvm_pte_t pte = kvm_phys_to_pte(pa);
163  u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
165 
166  pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
167  pte |= FIELD_PREP(KVM_PTE_TYPE, type);
168  pte |= KVM_PTE_VALID;
169 
170  return pte;
171 }
172 
173 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
174 {
175  return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
176 }
177 
179  const struct kvm_pgtable_visit_ctx *ctx,
180  enum kvm_pgtable_walk_flags visit)
181 {
182  struct kvm_pgtable_walker *walker = data->walker;
183 
184  /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
185  WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
186  return walker->cb(ctx, visit);
187 }
188 
189 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
190  int r)
191 {
192  /*
193  * Visitor callbacks return EAGAIN when the conditions that led to a
194  * fault are no longer reflected in the page tables due to a race to
195  * update a PTE. In the context of a fault handler this is interpreted
196  * as a signal to retry guest execution.
197  *
198  * Ignore the return code altogether for walkers outside a fault handler
199  * (e.g. write protecting a range of memory) and chug along with the
200  * page table walk.
201  */
202  if (r == -EAGAIN)
203  return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
204 
205  return !r;
206 }
207 
208 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
209  struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
210 
211 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
212  struct kvm_pgtable_mm_ops *mm_ops,
213  kvm_pteref_t pteref, s8 level)
214 {
215  enum kvm_pgtable_walk_flags flags = data->walker->flags;
216  kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
217  struct kvm_pgtable_visit_ctx ctx = {
218  .ptep = ptep,
219  .old = READ_ONCE(*ptep),
220  .arg = data->walker->arg,
221  .mm_ops = mm_ops,
222  .start = data->start,
223  .addr = data->addr,
224  .end = data->end,
225  .level = level,
226  .flags = flags,
227  };
228  int ret = 0;
229  bool reload = false;
230  kvm_pteref_t childp;
231  bool table = kvm_pte_table(ctx.old, level);
232 
233  if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
234  ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
235  reload = true;
236  }
237 
238  if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
239  ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
240  reload = true;
241  }
242 
243  /*
244  * Reload the page table after invoking the walker callback for leaf
245  * entries or after pre-order traversal, to allow the walker to descend
246  * into a newly installed or replaced table.
247  */
248  if (reload) {
249  ctx.old = READ_ONCE(*ptep);
250  table = kvm_pte_table(ctx.old, level);
251  }
252 
253  if (!kvm_pgtable_walk_continue(data->walker, ret))
254  goto out;
255 
256  if (!table) {
257  data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
258  data->addr += kvm_granule_size(level);
259  goto out;
260  }
261 
262  childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
263  ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
264  if (!kvm_pgtable_walk_continue(data->walker, ret))
265  goto out;
266 
267  if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
268  ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
269 
270 out:
271  if (kvm_pgtable_walk_continue(data->walker, ret))
272  return 0;
273 
274  return ret;
275 }
276 
278  struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
279 {
280  u32 idx;
281  int ret = 0;
282 
283  if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
284  level > KVM_PGTABLE_LAST_LEVEL))
285  return -EINVAL;
286 
287  for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
288  kvm_pteref_t pteref = &pgtable[idx];
289 
290  if (data->addr >= data->end)
291  break;
292 
293  ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
294  if (ret)
295  break;
296  }
297 
298  return ret;
299 }
300 
301 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
302 {
303  u32 idx;
304  int ret = 0;
305  u64 limit = BIT(pgt->ia_bits);
306 
307  if (data->addr > limit || data->end > limit)
308  return -ERANGE;
309 
310  if (!pgt->pgd)
311  return -EINVAL;
312 
313  for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
314  kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
315 
316  ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
317  if (ret)
318  break;
319  }
320 
321  return ret;
322 }
323 
324 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
325  struct kvm_pgtable_walker *walker)
326 {
327  struct kvm_pgtable_walk_data walk_data = {
328  .start = ALIGN_DOWN(addr, PAGE_SIZE),
329  .addr = ALIGN_DOWN(addr, PAGE_SIZE),
330  .end = PAGE_ALIGN(walk_data.addr + size),
331  .walker = walker,
332  };
333  int r;
334 
335  r = kvm_pgtable_walk_begin(walker);
336  if (r)
337  return r;
338 
339  r = _kvm_pgtable_walk(pgt, &walk_data);
340  kvm_pgtable_walk_end(walker);
341 
342  return r;
343 }
344 
346  kvm_pte_t pte;
347  s8 level;
348 };
349 
350 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
351  enum kvm_pgtable_walk_flags visit)
352 {
353  struct leaf_walk_data *data = ctx->arg;
354 
355  data->pte = ctx->old;
356  data->level = ctx->level;
357 
358  return 0;
359 }
360 
361 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
362  kvm_pte_t *ptep, s8 *level)
363 {
364  struct leaf_walk_data data;
365  struct kvm_pgtable_walker walker = {
366  .cb = leaf_walker,
367  .flags = KVM_PGTABLE_WALK_LEAF,
368  .arg = &data,
369  };
370  int ret;
371 
372  ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
373  PAGE_SIZE, &walker);
374  if (!ret) {
375  if (ptep)
376  *ptep = data.pte;
377  if (level)
378  *level = data.level;
379  }
380 
381  return ret;
382 }
383 
384 struct hyp_map_data {
385  const u64 phys;
386  kvm_pte_t attr;
387 };
388 
389 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
390 {
391  bool device = prot & KVM_PGTABLE_PROT_DEVICE;
392  u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
393  kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
395  u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
397 
398  if (!(prot & KVM_PGTABLE_PROT_R))
399  return -EINVAL;
400 
401  if (prot & KVM_PGTABLE_PROT_X) {
402  if (prot & KVM_PGTABLE_PROT_W)
403  return -EINVAL;
404 
405  if (device)
406  return -EINVAL;
407 
408  if (system_supports_bti_kernel())
410  } else {
412  }
413 
414  attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
415  if (!kvm_lpa2_is_enabled())
416  attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
418  attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
419  *ptep = attr;
420 
421  return 0;
422 }
423 
424 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
425 {
426  enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
427  u32 ap;
428 
429  if (!kvm_pte_valid(pte))
430  return prot;
431 
432  if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
433  prot |= KVM_PGTABLE_PROT_X;
434 
435  ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
437  prot |= KVM_PGTABLE_PROT_R;
438  else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
439  prot |= KVM_PGTABLE_PROT_RW;
440 
441  return prot;
442 }
443 
444 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
445  struct hyp_map_data *data)
446 {
447  u64 phys = data->phys + (ctx->addr - ctx->start);
448  kvm_pte_t new;
449 
450  if (!kvm_block_mapping_supported(ctx, phys))
451  return false;
452 
453  new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
454  if (ctx->old == new)
455  return true;
456  if (!kvm_pte_valid(ctx->old))
457  ctx->mm_ops->get_page(ctx->ptep);
458  else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
459  return false;
460 
461  smp_store_release(ctx->ptep, new);
462  return true;
463 }
464 
465 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
466  enum kvm_pgtable_walk_flags visit)
467 {
468  kvm_pte_t *childp, new;
469  struct hyp_map_data *data = ctx->arg;
470  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
471 
472  if (hyp_map_walker_try_leaf(ctx, data))
473  return 0;
474 
475  if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
476  return -EINVAL;
477 
478  childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
479  if (!childp)
480  return -ENOMEM;
481 
482  new = kvm_init_table_pte(childp, mm_ops);
483  mm_ops->get_page(ctx->ptep);
484  smp_store_release(ctx->ptep, new);
485 
486  return 0;
487 }
488 
489 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
490  enum kvm_pgtable_prot prot)
491 {
492  int ret;
493  struct hyp_map_data map_data = {
494  .phys = ALIGN_DOWN(phys, PAGE_SIZE),
495  };
496  struct kvm_pgtable_walker walker = {
497  .cb = hyp_map_walker,
498  .flags = KVM_PGTABLE_WALK_LEAF,
499  .arg = &map_data,
500  };
501 
502  ret = hyp_set_prot_attr(prot, &map_data.attr);
503  if (ret)
504  return ret;
505 
506  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
507  dsb(ishst);
508  isb();
509  return ret;
510 }
511 
512 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
513  enum kvm_pgtable_walk_flags visit)
514 {
515  kvm_pte_t *childp = NULL;
516  u64 granule = kvm_granule_size(ctx->level);
517  u64 *unmapped = ctx->arg;
518  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
519 
520  if (!kvm_pte_valid(ctx->old))
521  return -EINVAL;
522 
523  if (kvm_pte_table(ctx->old, ctx->level)) {
524  childp = kvm_pte_follow(ctx->old, mm_ops);
525 
526  if (mm_ops->page_count(childp) != 1)
527  return 0;
528 
529  kvm_clear_pte(ctx->ptep);
530  dsb(ishst);
531  __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
532  } else {
533  if (ctx->end - ctx->addr < granule)
534  return -EINVAL;
535 
536  kvm_clear_pte(ctx->ptep);
537  dsb(ishst);
538  __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
539  *unmapped += granule;
540  }
541 
542  dsb(ish);
543  isb();
544  mm_ops->put_page(ctx->ptep);
545 
546  if (childp)
547  mm_ops->put_page(childp);
548 
549  return 0;
550 }
551 
552 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
553 {
554  u64 unmapped = 0;
555  struct kvm_pgtable_walker walker = {
556  .cb = hyp_unmap_walker,
557  .arg = &unmapped,
558  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
559  };
560 
561  if (!pgt->mm_ops->page_count)
562  return 0;
563 
564  kvm_pgtable_walk(pgt, addr, size, &walker);
565  return unmapped;
566 }
567 
568 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
569  struct kvm_pgtable_mm_ops *mm_ops)
570 {
571  s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
572  ARM64_HW_PGTABLE_LEVELS(va_bits);
573 
574  if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
575  start_level > KVM_PGTABLE_LAST_LEVEL)
576  return -EINVAL;
577 
578  pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
579  if (!pgt->pgd)
580  return -ENOMEM;
581 
582  pgt->ia_bits = va_bits;
583  pgt->start_level = start_level;
584  pgt->mm_ops = mm_ops;
585  pgt->mmu = NULL;
586  pgt->force_pte_cb = NULL;
587 
588  return 0;
589 }
590 
591 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
592  enum kvm_pgtable_walk_flags visit)
593 {
594  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
595 
596  if (!kvm_pte_valid(ctx->old))
597  return 0;
598 
599  mm_ops->put_page(ctx->ptep);
600 
601  if (kvm_pte_table(ctx->old, ctx->level))
602  mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
603 
604  return 0;
605 }
606 
607 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
608 {
609  struct kvm_pgtable_walker walker = {
610  .cb = hyp_free_walker,
611  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
612  };
613 
614  WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
615  pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
616  pgt->pgd = NULL;
617 }
618 
620  const u64 phys;
621  kvm_pte_t attr;
623 
624  kvm_pte_t *anchor;
625  kvm_pte_t *childp;
626 
627  struct kvm_s2_mmu *mmu;
628  void *memcache;
629 
630  /* Force mappings to page granularity */
631  bool force_pte;
632 };
633 
634 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
635 {
636  u64 vtcr = VTCR_EL2_FLAGS;
637  s8 lvls;
638 
639  vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
640  vtcr |= VTCR_EL2_T0SZ(phys_shift);
641  /*
642  * Use a minimum 2 level page table to prevent splitting
643  * host PMD huge pages at stage2.
644  */
645  lvls = stage2_pgtable_levels(phys_shift);
646  if (lvls < 2)
647  lvls = 2;
648 
649  /*
650  * When LPA2 is enabled, the HW supports an extra level of translation
651  * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
652  * to as an addition to SL0 to enable encoding this extra start level.
653  * However, since we always use concatenated pages for the first level
654  * lookup, we will never need this extra level and therefore do not need
655  * to touch SL2.
656  */
657  vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
658 
659 #ifdef CONFIG_ARM64_HW_AFDBM
660  /*
661  * Enable the Hardware Access Flag management, unconditionally
662  * on all CPUs. In systems that have asymmetric support for the feature
663  * this allows KVM to leverage hardware support on the subset of cores
664  * that implement the feature.
665  *
666  * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
667  * hardware) on implementations that do not advertise support for the
668  * feature. As such, setting HA unconditionally is safe, unless you
669  * happen to be running on a design that has unadvertised support for
670  * HAFDBS. Here be dragons.
671  */
672  if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
673  vtcr |= VTCR_EL2_HA;
674 #endif /* CONFIG_ARM64_HW_AFDBM */
675 
676  if (kvm_lpa2_is_enabled())
677  vtcr |= VTCR_EL2_DS;
678 
679  /* Set the vmid bits */
680  vtcr |= (get_vmid_bits(mmfr1) == 16) ?
681  VTCR_EL2_VS_16BIT :
682  VTCR_EL2_VS_8BIT;
683 
684  return vtcr;
685 }
686 
687 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
688 {
689  if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
690  return false;
691 
692  return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
693 }
694 
695 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
696  phys_addr_t addr, size_t size)
697 {
698  unsigned long pages, inval_pages;
699 
700  if (!system_supports_tlb_range()) {
701  kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
702  return;
703  }
704 
705  pages = size >> PAGE_SHIFT;
706  while (pages > 0) {
707  inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
708  kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
709 
710  addr += inval_pages << PAGE_SHIFT;
711  pages -= inval_pages;
712  }
713 }
714 
715 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
716 
717 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
718  kvm_pte_t *ptep)
719 {
720  bool device = prot & KVM_PGTABLE_PROT_DEVICE;
721  kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
722  KVM_S2_MEMATTR(pgt, NORMAL);
724 
725  if (!(prot & KVM_PGTABLE_PROT_X))
727  else if (device)
728  return -EINVAL;
729 
730  if (prot & KVM_PGTABLE_PROT_R)
732 
733  if (prot & KVM_PGTABLE_PROT_W)
735 
736  if (!kvm_lpa2_is_enabled())
737  attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
738 
740  attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
741  *ptep = attr;
742 
743  return 0;
744 }
745 
746 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
747 {
748  enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
749 
750  if (!kvm_pte_valid(pte))
751  return prot;
752 
754  prot |= KVM_PGTABLE_PROT_R;
756  prot |= KVM_PGTABLE_PROT_W;
757  if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
758  prot |= KVM_PGTABLE_PROT_X;
759 
760  return prot;
761 }
762 
763 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
764 {
765  if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
766  return true;
767 
768  return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
769 }
770 
771 static bool stage2_pte_is_counted(kvm_pte_t pte)
772 {
773  /*
774  * The refcount tracks valid entries as well as invalid entries if they
775  * encode ownership of a page to another entity than the page-table
776  * owner, whose id is 0.
777  */
778  return !!pte;
779 }
780 
781 static bool stage2_pte_is_locked(kvm_pte_t pte)
782 {
783  return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
784 }
785 
786 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
787 {
788  if (!kvm_pgtable_walk_shared(ctx)) {
789  WRITE_ONCE(*ctx->ptep, new);
790  return true;
791  }
792 
793  return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
794 }
795 
796 /**
797  * stage2_try_break_pte() - Invalidates a pte according to the
798  * 'break-before-make' requirements of the
799  * architecture.
800  *
801  * @ctx: context of the visited pte.
802  * @mmu: stage-2 mmu
803  *
804  * Returns: true if the pte was successfully broken.
805  *
806  * If the removed pte was valid, performs the necessary serialization and TLB
807  * invalidation for the old value. For counted ptes, drops the reference count
808  * on the containing table page.
809  */
810 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
811  struct kvm_s2_mmu *mmu)
812 {
813  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
814 
815  if (stage2_pte_is_locked(ctx->old)) {
816  /*
817  * Should never occur if this walker has exclusive access to the
818  * page tables.
819  */
820  WARN_ON(!kvm_pgtable_walk_shared(ctx));
821  return false;
822  }
823 
825  return false;
826 
827  if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
828  /*
829  * Perform the appropriate TLB invalidation based on the
830  * evicted pte value (if any).
831  */
832  if (kvm_pte_table(ctx->old, ctx->level)) {
833  u64 size = kvm_granule_size(ctx->level);
834  u64 addr = ALIGN_DOWN(ctx->addr, size);
835 
836  kvm_tlb_flush_vmid_range(mmu, addr, size);
837  } else if (kvm_pte_valid(ctx->old)) {
838  kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
839  ctx->addr, ctx->level);
840  }
841  }
842 
843  if (stage2_pte_is_counted(ctx->old))
844  mm_ops->put_page(ctx->ptep);
845 
846  return true;
847 }
848 
849 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
850 {
851  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
852 
853  WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
854 
855  if (stage2_pte_is_counted(new))
856  mm_ops->get_page(ctx->ptep);
857 
858  smp_store_release(ctx->ptep, new);
859 }
860 
861 static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
862 {
863  /*
864  * If FEAT_TLBIRANGE is implemented, defer the individual
865  * TLB invalidations until the entire walk is finished, and
866  * then use the range-based TLBI instructions to do the
867  * invalidations. Condition deferred TLB invalidation on the
868  * system supporting FWB as the optimization is entirely
869  * pointless when the unmap walker needs to perform CMOs.
870  */
871  return system_supports_tlb_range() && stage2_has_fwb(pgt);
872 }
873 
874 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
875  struct kvm_s2_mmu *mmu,
876  struct kvm_pgtable_mm_ops *mm_ops)
877 {
878  struct kvm_pgtable *pgt = ctx->arg;
879 
880  /*
881  * Clear the existing PTE, and perform break-before-make if it was
882  * valid. Depending on the system support, defer the TLB maintenance
883  * for the same until the entire unmap walk is completed.
884  */
885  if (kvm_pte_valid(ctx->old)) {
886  kvm_clear_pte(ctx->ptep);
887 
889  kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
890  ctx->addr, ctx->level);
891  }
892 
893  mm_ops->put_page(ctx->ptep);
894 }
895 
896 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
897 {
898  u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
899  return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
900 }
901 
902 static bool stage2_pte_executable(kvm_pte_t pte)
903 {
904  return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
905 }
906 
907 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
908  const struct stage2_map_data *data)
909 {
910  u64 phys = data->phys;
911 
912  /*
913  * Stage-2 walks to update ownership data are communicated to the map
914  * walker using an invalid PA. Avoid offsetting an already invalid PA,
915  * which could overflow and make the address valid again.
916  */
917  if (!kvm_phys_is_valid(phys))
918  return phys;
919 
920  /*
921  * Otherwise, work out the correct PA based on how far the walk has
922  * gotten.
923  */
924  return phys + (ctx->addr - ctx->start);
925 }
926 
927 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
928  struct stage2_map_data *data)
929 {
930  u64 phys = stage2_map_walker_phys_addr(ctx, data);
931 
932  if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
933  return false;
934 
935  return kvm_block_mapping_supported(ctx, phys);
936 }
937 
938 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
939  struct stage2_map_data *data)
940 {
941  kvm_pte_t new;
942  u64 phys = stage2_map_walker_phys_addr(ctx, data);
943  u64 granule = kvm_granule_size(ctx->level);
944  struct kvm_pgtable *pgt = data->mmu->pgt;
945  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
946 
947  if (!stage2_leaf_mapping_allowed(ctx, data))
948  return -E2BIG;
949 
950  if (kvm_phys_is_valid(phys))
951  new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
952  else
954 
955  /*
956  * Skip updating the PTE if we are trying to recreate the exact
957  * same mapping or only change the access permissions. Instead,
958  * the vCPU will exit one more time from guest if still needed
959  * and then go through the path of relaxing permissions.
960  */
961  if (!stage2_pte_needs_update(ctx->old, new))
962  return -EAGAIN;
963 
964  if (!stage2_try_break_pte(ctx, data->mmu))
965  return -EAGAIN;
966 
967  /* Perform CMOs before installation of the guest stage-2 PTE */
968  if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
969  stage2_pte_cacheable(pgt, new))
970  mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
971  granule);
972 
973  if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
975  mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
976 
977  stage2_make_pte(ctx, new);
978 
979  return 0;
980 }
981 
982 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
983  struct stage2_map_data *data)
984 {
985  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
986  kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
987  int ret;
988 
989  if (!stage2_leaf_mapping_allowed(ctx, data))
990  return 0;
991 
992  ret = stage2_map_walker_try_leaf(ctx, data);
993  if (ret)
994  return ret;
995 
996  mm_ops->free_unlinked_table(childp, ctx->level);
997  return 0;
998 }
999 
1000 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
1001  struct stage2_map_data *data)
1002 {
1003  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1004  kvm_pte_t *childp, new;
1005  int ret;
1006 
1007  ret = stage2_map_walker_try_leaf(ctx, data);
1008  if (ret != -E2BIG)
1009  return ret;
1010 
1011  if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1012  return -EINVAL;
1013 
1014  if (!data->memcache)
1015  return -ENOMEM;
1016 
1017  childp = mm_ops->zalloc_page(data->memcache);
1018  if (!childp)
1019  return -ENOMEM;
1020 
1021  if (!stage2_try_break_pte(ctx, data->mmu)) {
1022  mm_ops->put_page(childp);
1023  return -EAGAIN;
1024  }
1025 
1026  /*
1027  * If we've run into an existing block mapping then replace it with
1028  * a table. Accesses beyond 'end' that fall within the new table
1029  * will be mapped lazily.
1030  */
1031  new = kvm_init_table_pte(childp, mm_ops);
1032  stage2_make_pte(ctx, new);
1033 
1034  return 0;
1035 }
1036 
1037 /*
1038  * The TABLE_PRE callback runs for table entries on the way down, looking
1039  * for table entries which we could conceivably replace with a block entry
1040  * for this mapping. If it finds one it replaces the entry and calls
1041  * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
1042  *
1043  * Otherwise, the LEAF callback performs the mapping at the existing leaves
1044  * instead.
1045  */
1046 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
1047  enum kvm_pgtable_walk_flags visit)
1048 {
1049  struct stage2_map_data *data = ctx->arg;
1050 
1051  switch (visit) {
1052  case KVM_PGTABLE_WALK_TABLE_PRE:
1053  return stage2_map_walk_table_pre(ctx, data);
1054  case KVM_PGTABLE_WALK_LEAF:
1055  return stage2_map_walk_leaf(ctx, data);
1056  default:
1057  return -EINVAL;
1058  }
1059 }
1060 
1061 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
1062  u64 phys, enum kvm_pgtable_prot prot,
1063  void *mc, enum kvm_pgtable_walk_flags flags)
1064 {
1065  int ret;
1066  struct stage2_map_data map_data = {
1067  .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1068  .mmu = pgt->mmu,
1069  .memcache = mc,
1070  .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1071  };
1072  struct kvm_pgtable_walker walker = {
1073  .cb = stage2_map_walker,
1074  .flags = flags |
1075  KVM_PGTABLE_WALK_TABLE_PRE |
1076  KVM_PGTABLE_WALK_LEAF,
1077  .arg = &map_data,
1078  };
1079 
1080  if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1081  return -EINVAL;
1082 
1083  ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1084  if (ret)
1085  return ret;
1086 
1087  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1088  dsb(ishst);
1089  return ret;
1090 }
1091 
1092 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
1093  void *mc, u8 owner_id)
1094 {
1095  int ret;
1096  struct stage2_map_data map_data = {
1097  .phys = KVM_PHYS_INVALID,
1098  .mmu = pgt->mmu,
1099  .memcache = mc,
1100  .owner_id = owner_id,
1101  .force_pte = true,
1102  };
1103  struct kvm_pgtable_walker walker = {
1104  .cb = stage2_map_walker,
1105  .flags = KVM_PGTABLE_WALK_TABLE_PRE |
1106  KVM_PGTABLE_WALK_LEAF,
1107  .arg = &map_data,
1108  };
1109 
1110  if (owner_id > KVM_MAX_OWNER_ID)
1111  return -EINVAL;
1112 
1113  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1114  return ret;
1115 }
1116 
1117 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1118  enum kvm_pgtable_walk_flags visit)
1119 {
1120  struct kvm_pgtable *pgt = ctx->arg;
1121  struct kvm_s2_mmu *mmu = pgt->mmu;
1122  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1123  kvm_pte_t *childp = NULL;
1124  bool need_flush = false;
1125 
1126  if (!kvm_pte_valid(ctx->old)) {
1127  if (stage2_pte_is_counted(ctx->old)) {
1128  kvm_clear_pte(ctx->ptep);
1129  mm_ops->put_page(ctx->ptep);
1130  }
1131  return 0;
1132  }
1133 
1134  if (kvm_pte_table(ctx->old, ctx->level)) {
1135  childp = kvm_pte_follow(ctx->old, mm_ops);
1136 
1137  if (mm_ops->page_count(childp) != 1)
1138  return 0;
1139  } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1140  need_flush = !stage2_has_fwb(pgt);
1141  }
1142 
1143  /*
1144  * This is similar to the map() path in that we unmap the entire
1145  * block entry and rely on the remaining portions being faulted
1146  * back lazily.
1147  */
1148  stage2_unmap_put_pte(ctx, mmu, mm_ops);
1149 
1150  if (need_flush && mm_ops->dcache_clean_inval_poc)
1151  mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1152  kvm_granule_size(ctx->level));
1153 
1154  if (childp)
1155  mm_ops->put_page(childp);
1156 
1157  return 0;
1158 }
1159 
1160 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1161 {
1162  int ret;
1163  struct kvm_pgtable_walker walker = {
1164  .cb = stage2_unmap_walker,
1165  .arg = pgt,
1166  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1167  };
1168 
1169  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1171  /* Perform the deferred TLB invalidations */
1172  kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1173 
1174  return ret;
1175 }
1176 
1178  kvm_pte_t attr_set;
1179  kvm_pte_t attr_clr;
1180  kvm_pte_t pte;
1181  s8 level;
1182 };
1183 
1184 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1185  enum kvm_pgtable_walk_flags visit)
1186 {
1187  kvm_pte_t pte = ctx->old;
1188  struct stage2_attr_data *data = ctx->arg;
1189  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1190 
1191  if (!kvm_pte_valid(ctx->old))
1192  return -EAGAIN;
1193 
1194  data->level = ctx->level;
1195  data->pte = pte;
1196  pte &= ~data->attr_clr;
1197  pte |= data->attr_set;
1198 
1199  /*
1200  * We may race with the CPU trying to set the access flag here,
1201  * but worst-case the access flag update gets lost and will be
1202  * set on the next access instead.
1203  */
1204  if (data->pte != pte) {
1205  /*
1206  * Invalidate instruction cache before updating the guest
1207  * stage-2 PTE if we are going to add executable permission.
1208  */
1209  if (mm_ops->icache_inval_pou &&
1210  stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1211  mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1212  kvm_granule_size(ctx->level));
1213 
1214  if (!stage2_try_set_pte(ctx, pte))
1215  return -EAGAIN;
1216  }
1217 
1218  return 0;
1219 }
1220 
1221 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1222  u64 size, kvm_pte_t attr_set,
1223  kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1224  s8 *level, enum kvm_pgtable_walk_flags flags)
1225 {
1226  int ret;
1227  kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1228  struct stage2_attr_data data = {
1229  .attr_set = attr_set & attr_mask,
1230  .attr_clr = attr_clr & attr_mask,
1231  };
1232  struct kvm_pgtable_walker walker = {
1233  .cb = stage2_attr_walker,
1234  .arg = &data,
1235  .flags = flags | KVM_PGTABLE_WALK_LEAF,
1236  };
1237 
1238  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1239  if (ret)
1240  return ret;
1241 
1242  if (orig_pte)
1243  *orig_pte = data.pte;
1244 
1245  if (level)
1246  *level = data.level;
1247  return 0;
1248 }
1249 
1250 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1251 {
1252  return stage2_update_leaf_attrs(pgt, addr, size, 0,
1254  NULL, NULL, 0);
1255 }
1256 
1257 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1258 {
1259  kvm_pte_t pte = 0;
1260  int ret;
1261 
1263  &pte, NULL,
1264  KVM_PGTABLE_WALK_HANDLE_FAULT |
1265  KVM_PGTABLE_WALK_SHARED);
1266  if (!ret)
1267  dsb(ishst);
1268 
1269  return pte;
1270 }
1271 
1273  bool mkold;
1274  bool young;
1275 };
1276 
1277 static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1278  enum kvm_pgtable_walk_flags visit)
1279 {
1280  kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1281  struct stage2_age_data *data = ctx->arg;
1282 
1283  if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1284  return 0;
1285 
1286  data->young = true;
1287 
1288  /*
1289  * stage2_age_walker() is always called while holding the MMU lock for
1290  * write, so this will always succeed. Nonetheless, this deliberately
1291  * follows the race detection pattern of the other stage-2 walkers in
1292  * case the locking mechanics of the MMU notifiers is ever changed.
1293  */
1294  if (data->mkold && !stage2_try_set_pte(ctx, new))
1295  return -EAGAIN;
1296 
1297  /*
1298  * "But where's the TLBI?!", you scream.
1299  * "Over in the core code", I sigh.
1300  *
1301  * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1302  */
1303  return 0;
1304 }
1305 
1306 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1307  u64 size, bool mkold)
1308 {
1309  struct stage2_age_data data = {
1310  .mkold = mkold,
1311  };
1312  struct kvm_pgtable_walker walker = {
1313  .cb = stage2_age_walker,
1314  .arg = &data,
1315  .flags = KVM_PGTABLE_WALK_LEAF,
1316  };
1317 
1318  WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1319  return data.young;
1320 }
1321 
1322 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1323  enum kvm_pgtable_prot prot)
1324 {
1325  int ret;
1326  s8 level;
1327  kvm_pte_t set = 0, clr = 0;
1328 
1329  if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1330  return -EINVAL;
1331 
1332  if (prot & KVM_PGTABLE_PROT_R)
1334 
1335  if (prot & KVM_PGTABLE_PROT_W)
1337 
1338  if (prot & KVM_PGTABLE_PROT_X)
1340 
1341  ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1342  KVM_PGTABLE_WALK_HANDLE_FAULT |
1343  KVM_PGTABLE_WALK_SHARED);
1344  if (!ret || ret == -EAGAIN)
1345  kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1346  return ret;
1347 }
1348 
1349 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1350  enum kvm_pgtable_walk_flags visit)
1351 {
1352  struct kvm_pgtable *pgt = ctx->arg;
1353  struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1354 
1355  if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1356  return 0;
1357 
1358  if (mm_ops->dcache_clean_inval_poc)
1359  mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1360  kvm_granule_size(ctx->level));
1361  return 0;
1362 }
1363 
1364 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1365 {
1366  struct kvm_pgtable_walker walker = {
1367  .cb = stage2_flush_walker,
1368  .flags = KVM_PGTABLE_WALK_LEAF,
1369  .arg = pgt,
1370  };
1371 
1372  if (stage2_has_fwb(pgt))
1373  return 0;
1374 
1375  return kvm_pgtable_walk(pgt, addr, size, &walker);
1376 }
1377 
1378 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
1379  u64 phys, s8 level,
1380  enum kvm_pgtable_prot prot,
1381  void *mc, bool force_pte)
1382 {
1383  struct stage2_map_data map_data = {
1384  .phys = phys,
1385  .mmu = pgt->mmu,
1386  .memcache = mc,
1387  .force_pte = force_pte,
1388  };
1389  struct kvm_pgtable_walker walker = {
1390  .cb = stage2_map_walker,
1391  .flags = KVM_PGTABLE_WALK_LEAF |
1392  KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1393  KVM_PGTABLE_WALK_SKIP_CMO,
1394  .arg = &map_data,
1395  };
1396  /*
1397  * The input address (.addr) is irrelevant for walking an
1398  * unlinked table. Construct an ambiguous IA range to map
1399  * kvm_granule_size(level) worth of memory.
1400  */
1401  struct kvm_pgtable_walk_data data = {
1402  .walker = &walker,
1403  .addr = 0,
1404  .end = kvm_granule_size(level),
1405  };
1406  struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1407  kvm_pte_t *pgtable;
1408  int ret;
1409 
1410  if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1411  return ERR_PTR(-EINVAL);
1412 
1413  ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1414  if (ret)
1415  return ERR_PTR(ret);
1416 
1417  pgtable = mm_ops->zalloc_page(mc);
1418  if (!pgtable)
1419  return ERR_PTR(-ENOMEM);
1420 
1421  ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
1422  level + 1);
1423  if (ret) {
1424  kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1425  return ERR_PTR(ret);
1426  }
1427 
1428  return pgtable;
1429 }
1430 
1431 /*
1432  * Get the number of page-tables needed to replace a block with a
1433  * fully populated tree up to the PTE entries. Note that @level is
1434  * interpreted as in "level @level entry".
1435  */
1437 {
1438  switch (level) {
1439  case 1:
1440  return PTRS_PER_PTE + 1;
1441  case 2:
1442  return 1;
1443  case 3:
1444  return 0;
1445  default:
1446  WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1447  level > KVM_PGTABLE_LAST_LEVEL);
1448  return -EINVAL;
1449  };
1450 }
1451 
1452 static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
1453  enum kvm_pgtable_walk_flags visit)
1454 {
1455  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1456  struct kvm_mmu_memory_cache *mc = ctx->arg;
1457  struct kvm_s2_mmu *mmu;
1458  kvm_pte_t pte = ctx->old, new, *childp;
1459  enum kvm_pgtable_prot prot;
1460  s8 level = ctx->level;
1461  bool force_pte;
1462  int nr_pages;
1463  u64 phys;
1464 
1465  /* No huge-pages exist at the last level */
1466  if (level == KVM_PGTABLE_LAST_LEVEL)
1467  return 0;
1468 
1469  /* We only split valid block mappings */
1470  if (!kvm_pte_valid(pte))
1471  return 0;
1472 
1473  nr_pages = stage2_block_get_nr_page_tables(level);
1474  if (nr_pages < 0)
1475  return nr_pages;
1476 
1477  if (mc->nobjs >= nr_pages) {
1478  /* Build a tree mapped down to the PTE granularity. */
1479  force_pte = true;
1480  } else {
1481  /*
1482  * Don't force PTEs, so create_unlinked() below does
1483  * not populate the tree up to the PTE level. The
1484  * consequence is that the call will require a single
1485  * page of level 2 entries at level 1, or a single
1486  * page of PTEs at level 2. If we are at level 1, the
1487  * PTEs will be created recursively.
1488  */
1489  force_pte = false;
1490  nr_pages = 1;
1491  }
1492 
1493  if (mc->nobjs < nr_pages)
1494  return -ENOMEM;
1495 
1496  mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
1497  phys = kvm_pte_to_phys(pte);
1498  prot = kvm_pgtable_stage2_pte_prot(pte);
1499 
1500  childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1501  level, prot, mc, force_pte);
1502  if (IS_ERR(childp))
1503  return PTR_ERR(childp);
1504 
1505  if (!stage2_try_break_pte(ctx, mmu)) {
1506  kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1507  return -EAGAIN;
1508  }
1509 
1510  /*
1511  * Note, the contents of the page table are guaranteed to be made
1512  * visible before the new PTE is assigned because stage2_make_pte()
1513  * writes the PTE using smp_store_release().
1514  */
1515  new = kvm_init_table_pte(childp, mm_ops);
1516  stage2_make_pte(ctx, new);
1517  dsb(ishst);
1518  return 0;
1519 }
1520 
1521 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
1522  struct kvm_mmu_memory_cache *mc)
1523 {
1524  struct kvm_pgtable_walker walker = {
1525  .cb = stage2_split_walker,
1526  .flags = KVM_PGTABLE_WALK_LEAF,
1527  .arg = mc,
1528  };
1529 
1530  return kvm_pgtable_walk(pgt, addr, size, &walker);
1531 }
1532 
1533 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1534  struct kvm_pgtable_mm_ops *mm_ops,
1535  enum kvm_pgtable_stage2_flags flags,
1536  kvm_pgtable_force_pte_cb_t force_pte_cb)
1537 {
1538  size_t pgd_sz;
1539  u64 vtcr = mmu->vtcr;
1540  u32 ia_bits = VTCR_EL2_IPA(vtcr);
1541  u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1542  s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1543 
1544  pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1545  pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1546  if (!pgt->pgd)
1547  return -ENOMEM;
1548 
1549  pgt->ia_bits = ia_bits;
1550  pgt->start_level = start_level;
1551  pgt->mm_ops = mm_ops;
1552  pgt->mmu = mmu;
1553  pgt->flags = flags;
1554  pgt->force_pte_cb = force_pte_cb;
1555 
1556  /* Ensure zeroed PGD pages are visible to the hardware walker */
1557  dsb(ishst);
1558  return 0;
1559 }
1560 
1562 {
1563  u32 ia_bits = VTCR_EL2_IPA(vtcr);
1564  u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1565  s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1566 
1567  return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1568 }
1569 
1570 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1571  enum kvm_pgtable_walk_flags visit)
1572 {
1573  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1574 
1575  if (!stage2_pte_is_counted(ctx->old))
1576  return 0;
1577 
1578  mm_ops->put_page(ctx->ptep);
1579 
1580  if (kvm_pte_table(ctx->old, ctx->level))
1581  mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
1582 
1583  return 0;
1584 }
1585 
1586 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1587 {
1588  size_t pgd_sz;
1589  struct kvm_pgtable_walker walker = {
1590  .cb = stage2_free_walker,
1591  .flags = KVM_PGTABLE_WALK_LEAF |
1592  KVM_PGTABLE_WALK_TABLE_POST,
1593  };
1594 
1595  WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1596  pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1597  pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1598  pgt->pgd = NULL;
1599 }
1600 
1601 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
1602 {
1603  kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1604  struct kvm_pgtable_walker walker = {
1605  .cb = stage2_free_walker,
1606  .flags = KVM_PGTABLE_WALK_LEAF |
1607  KVM_PGTABLE_WALK_TABLE_POST,
1608  };
1609  struct kvm_pgtable_walk_data data = {
1610  .walker = &walker,
1611 
1612  /*
1613  * At this point the IPA really doesn't matter, as the page
1614  * table being traversed has already been removed from the stage
1615  * 2. Set an appropriate range to cover the entire page table.
1616  */
1617  .addr = 0,
1618  .end = kvm_granule_size(level),
1619  };
1620 
1621  WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1622 
1623  WARN_ON(mm_ops->page_count(pgtable) != 1);
1624  mm_ops->put_page(pgtable);
1625 }
size_t size
Definition: gen-hyprel.c:133
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
Definition: tlb.c:141
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
Definition: tlb.c:111
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
Definition: tlb.c:81
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
Definition: tlb.c:168
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
Definition: pgtable.c:717
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX
Definition: pgtable.c:22
static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:465
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot)
Definition: pgtable.c:1322
#define KVM_PTE_LEAF_ATTR_LO_S1_AP
Definition: pgtable.c:23
static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:874
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
Definition: pgtable.c:849
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu)
Definition: pgtable.c:810
#define KVM_PTE_LEAF_ATTR_LO_S2_SH
Definition: pgtable.c:35
kvm_pte_t * kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level, enum kvm_pgtable_prot prot, void *mc, bool force_pte)
Definition: pgtable.c:1378
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R
Definition: pgtable.c:33
static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1046
static bool stage2_pte_is_locked(kvm_pte_t pte)
Definition: pgtable.c:781
static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
Definition: pgtable.c:861
#define KVM_INVALID_PTE_OWNER_MASK
Definition: pgtable.c:53
static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1570
static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:938
int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker)
Definition: pgtable.c:324
int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:1364
static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1117
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
Definition: pgtable.c:1257
static kvm_pte_t * kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:141
#define KVM_PTE_LEAF_ATTR_HI_S1_XN
Definition: pgtable.c:43
static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
Definition: pgtable.c:70
static bool stage2_has_fwb(struct kvm_pgtable *pgt)
Definition: pgtable.c:687
static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1452
static int stage2_block_get_nr_page_tables(s8 level)
Definition: pgtable.c:1436
static void kvm_clear_pte(kvm_pte_t *ptep)
Definition: pgtable.c:146
static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, s8 *level, enum kvm_pgtable_walk_flags flags)
Definition: pgtable.c:1221
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
Definition: pgtable.c:277
#define KVM_PTE_LEAF_ATTR_HI_S1_GP
Definition: pgtable.c:47
#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR
Definition: pgtable.c:32
#define KVM_PTE_LEAF_ATTR_S2_PERMS
Definition: pgtable.c:49
static bool stage2_pte_is_counted(kvm_pte_t pte)
Definition: pgtable.c:771
static int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pteref, s8 level)
Definition: pgtable.c:211
static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
Definition: pgtable.c:75
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W
Definition: pgtable.c:34
static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:350
int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:1250
static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:1000
static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
Definition: pgtable.c:763
#define KVM_INVALID_PTE_LOCKED
Definition: pgtable.c:60
static bool kvm_pte_table(kvm_pte_t pte, s8 level)
Definition: pgtable.c:130
#define KVM_PTE_LEAF_ATTR_LO
Definition: pgtable.c:20
#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS
Definition: pgtable.c:29
static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
Definition: pgtable.c:786
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size)
Definition: pgtable.c:695
static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:591
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW
Definition: pgtable.c:26
static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:512
#define KVM_PTE_LEAF_ATTR_LO_S1_SH
Definition: pgtable.c:28
static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1277
static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:927
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
Definition: pgtable.c:634
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb)
Definition: pgtable.c:1533
#define KVM_PTE_LEAF_ATTR_HI_S2_XN
Definition: pgtable.c:45
#define KVM_PTE_TYPE
Definition: pgtable.c:15
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:178
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
Definition: pgtable.c:1601
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:568
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:1160
static bool kvm_phys_is_valid(u64 phys)
Definition: pgtable.c:80
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags)
Definition: pgtable.c:1061
#define KVM_MAX_OWNER_ID
Definition: pgtable.c:54
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
Definition: pgtable.c:1586
#define KVM_PTE_LEAF_ATTR_LO_S2_AF
Definition: pgtable.c:37
static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, const struct stage2_map_data *data)
Definition: pgtable.c:907
static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
Definition: pgtable.c:120
#define KVM_PTE_TYPE_TABLE
Definition: pgtable.c:18
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
Definition: pgtable.c:896
#define KVM_PTE_LEAF_ATTR_LO_S1_AF
Definition: pgtable.c:30
static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
Definition: pgtable.c:389
#define KVM_PTE_LEAF_ATTR_HI_SW
Definition: pgtable.c:41
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
Definition: pgtable.c:746
static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker, int r)
Definition: pgtable.c:189
#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS
Definition: pgtable.c:36
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
Definition: pgtable.c:104
static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
Definition: pgtable.c:301
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
Definition: pgtable.c:1561
static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:151
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO
Definition: pgtable.c:24
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot)
Definition: pgtable.c:489
static bool stage2_pte_executable(kvm_pte_t pte)
Definition: pgtable.c:902
static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1349
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
Definition: pgtable.c:88
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc)
Definition: pgtable.c:1521
static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
Definition: pgtable.c:173
static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:982
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
Definition: pgtable.c:160
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold)
Definition: pgtable.c:1306
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct hyp_map_data *data)
Definition: pgtable.c:444
#define KVM_PTE_TYPE_PAGE
Definition: pgtable.c:17
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
Definition: pgtable.c:424
static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1184
void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
Definition: pgtable.c:607
#define KVM_PTE_TYPE_BLOCK
Definition: pgtable.c:16
#define KVM_S2_MEMATTR(pgt, attr)
Definition: pgtable.c:715
static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
Definition: pgtable.c:112
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:552
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level)
Definition: pgtable.c:361
#define KVM_PTE_LEAF_ATTR_HI
Definition: pgtable.c:39
int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id)
Definition: pgtable.c:1092
kvm_pte_t attr
Definition: pgtable.c:386
const u64 phys
Definition: pgtable.c:385
const u64 start
Definition: pgtable.c:65
struct kvm_pgtable_walker * walker
Definition: pgtable.c:63
kvm_pte_t pte
Definition: pgtable.c:346
kvm_pte_t pte
Definition: pgtable.c:1180
kvm_pte_t attr_clr
Definition: pgtable.c:1179
kvm_pte_t attr_set
Definition: pgtable.c:1178
kvm_pte_t attr
Definition: pgtable.c:621
bool force_pte
Definition: pgtable.c:631
void * memcache
Definition: pgtable.c:628
kvm_pte_t * anchor
Definition: pgtable.c:624
const u64 phys
Definition: pgtable.c:620
kvm_pte_t * childp
Definition: pgtable.c:625
struct kvm_s2_mmu * mmu
Definition: pgtable.c:627