10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
15 #define KVM_PTE_TYPE BIT(1)
16 #define KVM_PTE_TYPE_BLOCK 0
17 #define KVM_PTE_TYPE_PAGE 1
18 #define KVM_PTE_TYPE_TABLE 1
20 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
25 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
26 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
27 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
28 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
29 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
30 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
32 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
37 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
39 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
41 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
47 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
49 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
50 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
51 KVM_PTE_LEAF_ATTR_HI_S2_XN)
53 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
54 #define KVM_MAX_OWNER_ID 1
60 #define KVM_INVALID_PTE_LOCKED BIT(10)
72 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
77 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
82 u64 parange_max = kvm_get_parange_max();
83 u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
85 return phys < BIT(shift);
90 u64 granule = kvm_granule_size(ctx->level);
92 if (!kvm_level_supports_block_mapping(ctx->level))
95 if (granule > (ctx->end - ctx->addr))
101 return IS_ALIGNED(ctx->addr, granule);
106 u64 shift = kvm_granule_shift(level);
107 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
109 return (data->
addr >> shift) & mask;
114 u64 shift = kvm_granule_shift(pgt->start_level - 1);
115 u64 mask = BIT(pgt->ia_bits) - 1;
117 return (addr & mask) >> shift;
122 struct kvm_pgtable pgt = {
124 .start_level = start_level,
132 if (level == KVM_PGTABLE_LAST_LEVEL)
135 if (!kvm_pte_valid(pte))
141 static kvm_pte_t *
kvm_pte_follow(kvm_pte_t pte,
struct kvm_pgtable_mm_ops *mm_ops)
143 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
148 WRITE_ONCE(*ptep, 0);
153 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
156 pte |= KVM_PTE_VALID;
162 kvm_pte_t pte = kvm_phys_to_pte(pa);
168 pte |= KVM_PTE_VALID;
179 const struct kvm_pgtable_visit_ctx *ctx,
180 enum kvm_pgtable_walk_flags visit)
182 struct kvm_pgtable_walker *walker = data->
walker;
185 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
186 return walker->cb(ctx, visit);
203 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
209 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
212 struct kvm_pgtable_mm_ops *mm_ops,
213 kvm_pteref_t pteref, s8 level)
215 enum kvm_pgtable_walk_flags flags = data->
walker->flags;
216 kvm_pte_t *ptep = kvm_dereference_pteref(data->
walker, pteref);
217 struct kvm_pgtable_visit_ctx ctx = {
219 .old = READ_ONCE(*ptep),
222 .start = data->
start,
233 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
238 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
249 ctx.old = READ_ONCE(*ptep);
257 data->
addr = ALIGN_DOWN(data->
addr, kvm_granule_size(level));
258 data->
addr += kvm_granule_size(level);
267 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
278 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
283 if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
284 level > KVM_PGTABLE_LAST_LEVEL))
288 kvm_pteref_t pteref = &pgtable[idx];
305 u64 limit = BIT(pgt->ia_bits);
307 if (data->
addr > limit || data->
end > limit)
314 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
325 struct kvm_pgtable_walker *walker)
329 .addr = ALIGN_DOWN(
addr, PAGE_SIZE),
330 .end = PAGE_ALIGN(walk_data.
addr +
size),
335 r = kvm_pgtable_walk_begin(
walker);
340 kvm_pgtable_walk_end(
walker);
351 enum kvm_pgtable_walk_flags visit)
355 data->
pte = ctx->old;
356 data->
level = ctx->level;
362 kvm_pte_t *ptep, s8 *
level)
365 struct kvm_pgtable_walker walker = {
367 .flags = KVM_PGTABLE_WALK_LEAF,
391 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
392 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
398 if (!(prot & KVM_PGTABLE_PROT_R))
401 if (prot & KVM_PGTABLE_PROT_X) {
402 if (prot & KVM_PGTABLE_PROT_W)
408 if (system_supports_bti_kernel())
415 if (!kvm_lpa2_is_enabled())
429 if (!kvm_pte_valid(pte))
433 prot |= KVM_PGTABLE_PROT_X;
437 prot |= KVM_PGTABLE_PROT_R;
439 prot |= KVM_PGTABLE_PROT_RW;
447 u64 phys = data->
phys + (ctx->addr - ctx->start);
456 if (!kvm_pte_valid(ctx->old))
457 ctx->mm_ops->get_page(ctx->ptep);
461 smp_store_release(ctx->ptep,
new);
466 enum kvm_pgtable_walk_flags visit)
468 kvm_pte_t *childp,
new;
470 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
475 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
478 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
483 mm_ops->get_page(ctx->ptep);
484 smp_store_release(ctx->ptep,
new);
490 enum kvm_pgtable_prot prot)
494 .
phys = ALIGN_DOWN(
phys, PAGE_SIZE),
496 struct kvm_pgtable_walker walker = {
498 .flags = KVM_PGTABLE_WALK_LEAF,
513 enum kvm_pgtable_walk_flags visit)
515 kvm_pte_t *childp = NULL;
516 u64 granule = kvm_granule_size(ctx->level);
517 u64 *unmapped = ctx->arg;
518 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
520 if (!kvm_pte_valid(ctx->old))
526 if (mm_ops->page_count(childp) != 1)
531 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
533 if (ctx->end - ctx->addr < granule)
538 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
539 *unmapped += granule;
544 mm_ops->put_page(ctx->ptep);
547 mm_ops->put_page(childp);
555 struct kvm_pgtable_walker walker = {
558 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
561 if (!pgt->mm_ops->page_count)
569 struct kvm_pgtable_mm_ops *mm_ops)
571 s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
572 ARM64_HW_PGTABLE_LEVELS(va_bits);
574 if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
575 start_level > KVM_PGTABLE_LAST_LEVEL)
578 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
582 pgt->ia_bits = va_bits;
583 pgt->start_level = start_level;
584 pgt->mm_ops = mm_ops;
586 pgt->force_pte_cb = NULL;
592 enum kvm_pgtable_walk_flags visit)
594 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
596 if (!kvm_pte_valid(ctx->old))
599 mm_ops->put_page(ctx->ptep);
609 struct kvm_pgtable_walker walker = {
611 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
615 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
636 u64 vtcr = VTCR_EL2_FLAGS;
639 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
640 vtcr |= VTCR_EL2_T0SZ(phys_shift);
645 lvls = stage2_pgtable_levels(phys_shift);
657 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
659 #ifdef CONFIG_ARM64_HW_AFDBM
672 if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
676 if (kvm_lpa2_is_enabled())
680 vtcr |= (get_vmid_bits(mmfr1) == 16) ?
689 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
692 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
696 phys_addr_t addr,
size_t size)
698 unsigned long pages, inval_pages;
700 if (!system_supports_tlb_range()) {
705 pages =
size >> PAGE_SHIFT;
707 inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
710 addr += inval_pages << PAGE_SHIFT;
711 pages -= inval_pages;
715 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
720 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
725 if (!(prot & KVM_PGTABLE_PROT_X))
730 if (prot & KVM_PGTABLE_PROT_R)
733 if (prot & KVM_PGTABLE_PROT_W)
736 if (!kvm_lpa2_is_enabled())
750 if (!kvm_pte_valid(pte))
754 prot |= KVM_PGTABLE_PROT_R;
756 prot |= KVM_PGTABLE_PROT_W;
758 prot |= KVM_PGTABLE_PROT_X;
765 if (!kvm_pte_valid(old) || !kvm_pte_valid(
new))
788 if (!kvm_pgtable_walk_shared(ctx)) {
789 WRITE_ONCE(*ctx->ptep,
new);
793 return cmpxchg(ctx->ptep, ctx->old,
new) == ctx->old;
811 struct kvm_s2_mmu *mmu)
813 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
820 WARN_ON(!kvm_pgtable_walk_shared(ctx));
833 u64
size = kvm_granule_size(ctx->level);
834 u64 addr = ALIGN_DOWN(ctx->addr,
size);
837 }
else if (kvm_pte_valid(ctx->old)) {
839 ctx->addr, ctx->level);
844 mm_ops->put_page(ctx->ptep);
851 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
856 mm_ops->get_page(ctx->ptep);
858 smp_store_release(ctx->ptep,
new);
875 struct kvm_s2_mmu *mmu,
876 struct kvm_pgtable_mm_ops *mm_ops)
878 struct kvm_pgtable *pgt = ctx->arg;
885 if (kvm_pte_valid(ctx->old)) {
890 ctx->addr, ctx->level);
893 mm_ops->put_page(ctx->ptep);
910 u64 phys = data->
phys;
924 return phys + (ctx->addr - ctx->start);
932 if (data->
force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
943 u64 granule = kvm_granule_size(ctx->level);
944 struct kvm_pgtable *pgt = data->
mmu->pgt;
945 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
985 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
996 mm_ops->free_unlinked_table(childp, ctx->level);
1003 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1004 kvm_pte_t *childp,
new;
1011 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1017 childp = mm_ops->zalloc_page(data->
memcache);
1022 mm_ops->put_page(childp);
1047 enum kvm_pgtable_walk_flags visit)
1052 case KVM_PGTABLE_WALK_TABLE_PRE:
1054 case KVM_PGTABLE_WALK_LEAF:
1062 u64
phys,
enum kvm_pgtable_prot prot,
1063 void *mc,
enum kvm_pgtable_walk_flags flags)
1067 .
phys = ALIGN_DOWN(
phys, PAGE_SIZE),
1070 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr +
size, prot),
1072 struct kvm_pgtable_walker walker = {
1075 KVM_PGTABLE_WALK_TABLE_PRE |
1076 KVM_PGTABLE_WALK_LEAF,
1080 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1093 void *mc, u8 owner_id)
1097 .
phys = KVM_PHYS_INVALID,
1103 struct kvm_pgtable_walker walker = {
1105 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
1106 KVM_PGTABLE_WALK_LEAF,
1118 enum kvm_pgtable_walk_flags visit)
1120 struct kvm_pgtable *pgt = ctx->arg;
1121 struct kvm_s2_mmu *mmu = pgt->mmu;
1122 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1123 kvm_pte_t *childp = NULL;
1124 bool need_flush =
false;
1126 if (!kvm_pte_valid(ctx->old)) {
1129 mm_ops->put_page(ctx->ptep);
1137 if (mm_ops->page_count(childp) != 1)
1150 if (need_flush && mm_ops->dcache_clean_inval_poc)
1152 kvm_granule_size(ctx->level));
1155 mm_ops->put_page(childp);
1163 struct kvm_pgtable_walker walker = {
1166 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1185 enum kvm_pgtable_walk_flags visit)
1187 kvm_pte_t pte = ctx->old;
1189 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1191 if (!kvm_pte_valid(ctx->old))
1194 data->
level = ctx->level;
1204 if (data->
pte != pte) {
1209 if (mm_ops->icache_inval_pou &&
1212 kvm_granule_size(ctx->level));
1222 u64
size, kvm_pte_t attr_set,
1223 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1224 s8 *level,
enum kvm_pgtable_walk_flags flags)
1232 struct kvm_pgtable_walker walker = {
1235 .flags = flags | KVM_PGTABLE_WALK_LEAF,
1243 *orig_pte = data.
pte;
1246 *level = data.
level;
1264 KVM_PGTABLE_WALK_HANDLE_FAULT |
1265 KVM_PGTABLE_WALK_SHARED);
1278 enum kvm_pgtable_walk_flags visit)
1283 if (!kvm_pte_valid(ctx->old) ||
new == ctx->old)
1312 struct kvm_pgtable_walker walker = {
1315 .flags = KVM_PGTABLE_WALK_LEAF,
1323 enum kvm_pgtable_prot prot)
1327 kvm_pte_t set = 0, clr = 0;
1332 if (prot & KVM_PGTABLE_PROT_R)
1335 if (prot & KVM_PGTABLE_PROT_W)
1338 if (prot & KVM_PGTABLE_PROT_X)
1342 KVM_PGTABLE_WALK_HANDLE_FAULT |
1343 KVM_PGTABLE_WALK_SHARED);
1344 if (!ret || ret == -EAGAIN)
1350 enum kvm_pgtable_walk_flags visit)
1352 struct kvm_pgtable *pgt = ctx->arg;
1353 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1358 if (mm_ops->dcache_clean_inval_poc)
1360 kvm_granule_size(ctx->level));
1366 struct kvm_pgtable_walker walker = {
1368 .flags = KVM_PGTABLE_WALK_LEAF,
1380 enum kvm_pgtable_prot prot,
1381 void *mc,
bool force_pte)
1389 struct kvm_pgtable_walker walker = {
1391 .flags = KVM_PGTABLE_WALK_LEAF |
1392 KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1393 KVM_PGTABLE_WALK_SKIP_CMO,
1404 .end = kvm_granule_size(level),
1406 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1410 if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1411 return ERR_PTR(-EINVAL);
1415 return ERR_PTR(ret);
1417 pgtable = mm_ops->zalloc_page(mc);
1419 return ERR_PTR(-ENOMEM);
1425 return ERR_PTR(ret);
1440 return PTRS_PER_PTE + 1;
1446 WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1447 level > KVM_PGTABLE_LAST_LEVEL);
1453 enum kvm_pgtable_walk_flags visit)
1455 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1456 struct kvm_mmu_memory_cache *mc = ctx->arg;
1457 struct kvm_s2_mmu *mmu;
1458 kvm_pte_t pte = ctx->old,
new, *childp;
1459 enum kvm_pgtable_prot prot;
1460 s8 level = ctx->level;
1466 if (level == KVM_PGTABLE_LAST_LEVEL)
1470 if (!kvm_pte_valid(pte))
1477 if (mc->nobjs >= nr_pages) {
1493 if (mc->nobjs < nr_pages)
1496 mmu = container_of(mc,
struct kvm_s2_mmu, split_page_cache);
1497 phys = kvm_pte_to_phys(pte);
1501 level, prot, mc, force_pte);
1503 return PTR_ERR(childp);
1522 struct kvm_mmu_memory_cache *mc)
1524 struct kvm_pgtable_walker walker = {
1526 .flags = KVM_PGTABLE_WALK_LEAF,
1534 struct kvm_pgtable_mm_ops *mm_ops,
1535 enum kvm_pgtable_stage2_flags flags,
1536 kvm_pgtable_force_pte_cb_t force_pte_cb)
1539 u64 vtcr = mmu->vtcr;
1540 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1541 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1542 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1545 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1549 pgt->ia_bits = ia_bits;
1550 pgt->start_level = start_level;
1551 pgt->mm_ops = mm_ops;
1554 pgt->force_pte_cb = force_pte_cb;
1563 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1564 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1565 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1571 enum kvm_pgtable_walk_flags visit)
1573 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1578 mm_ops->put_page(ctx->ptep);
1589 struct kvm_pgtable_walker walker = {
1591 .flags = KVM_PGTABLE_WALK_LEAF |
1592 KVM_PGTABLE_WALK_TABLE_POST,
1596 pgd_sz =
kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1597 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1603 kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1604 struct kvm_pgtable_walker walker = {
1606 .flags = KVM_PGTABLE_WALK_LEAF |
1607 KVM_PGTABLE_WALK_TABLE_POST,
1618 .end = kvm_granule_size(level),
1623 WARN_ON(mm_ops->page_count(pgtable) != 1);
1624 mm_ops->put_page(pgtable);
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX
static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot)
#define KVM_PTE_LEAF_ATTR_LO_S1_AP
static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops)
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu)
#define KVM_PTE_LEAF_ATTR_LO_S2_SH
kvm_pte_t * kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level, enum kvm_pgtable_prot prot, void *mc, bool force_pte)
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R
static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
static bool stage2_pte_is_locked(kvm_pte_t pte)
static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
#define KVM_INVALID_PTE_OWNER_MASK
static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker)
int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
static kvm_pte_t * kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
#define KVM_PTE_LEAF_ATTR_HI_S1_XN
static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
static bool stage2_has_fwb(struct kvm_pgtable *pgt)
static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
static int stage2_block_get_nr_page_tables(s8 level)
static void kvm_clear_pte(kvm_pte_t *ptep)
static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, s8 *level, enum kvm_pgtable_walk_flags flags)
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
#define KVM_PTE_LEAF_ATTR_HI_S1_GP
#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR
#define KVM_PTE_LEAF_ATTR_S2_PERMS
static bool stage2_pte_is_counted(kvm_pte_t pte)
static int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pteref, s8 level)
static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W
static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
#define KVM_INVALID_PTE_LOCKED
static bool kvm_pte_table(kvm_pte_t pte, s8 level)
#define KVM_PTE_LEAF_ATTR_LO
#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS
static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size)
static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW
static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
#define KVM_PTE_LEAF_ATTR_LO_S1_SH
static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb)
#define KVM_PTE_LEAF_ATTR_HI_S2_XN
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops)
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
static bool kvm_phys_is_valid(u64 phys)
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags)
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
#define KVM_PTE_LEAF_ATTR_LO_S2_AF
static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, const struct stage2_map_data *data)
static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
#define KVM_PTE_TYPE_TABLE
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
#define KVM_PTE_LEAF_ATTR_LO_S1_AF
static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
#define KVM_PTE_LEAF_ATTR_HI_SW
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker, int r)
#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot)
static bool stage2_pte_executable(kvm_pte_t pte)
static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc)
static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold)
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct hyp_map_data *data)
#define KVM_PTE_TYPE_PAGE
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
#define KVM_PTE_TYPE_BLOCK
#define KVM_S2_MEMATTR(pgt, attr)
static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level)
#define KVM_PTE_LEAF_ATTR_HI
int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id)
struct kvm_pgtable_walker * walker