7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/kvm_pkvm.h>
13 #include <asm/stage2_pgtable.h>
22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
29 #define current_vm (*this_cpu_ptr(&__current_vm))
74 WARN_ON(
size != (PAGE_SIZE << get_order(
size)));
101 unsigned long nr_pages, pfn;
105 nr_pages = host_s2_pgtable_pages();
126 u32 parange, phys_shift;
130 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
159 atomic64_set(&mmu->vmid.id, 0);
165 enum kvm_pgtable_prot prot)
174 WARN_ON(
size != (PAGE_SIZE << get_order(
size)));
182 u8 order = get_order(
size);
185 for (i = 0; i < (1 << order); i++)
202 memset(addr, 0, PAGE_SIZE);
204 memset(p, 0,
sizeof(*p));
234 struct kvm_s2_mmu *mmu = &vm->
kvm.arch.mmu;
235 unsigned long nr_pages;
244 vm->
mm_ops = (
struct kvm_pgtable_mm_ops) {
264 vm->
kvm.arch.mmu.pgd_phys = __hyp_pa(vm->
pgt.pgd);
276 vm->
kvm.arch.mmu.pgd_phys = 0ULL;
292 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
294 if (params->hcr_el2 & HCR_VM)
297 params->vttbr = kvm_get_vttbr(mmu);
298 params->vtcr = mmu->vtcr;
299 params->hcr_el2 |= HCR_VM;
307 kvm_flush_dcache_to_poc(params,
sizeof(*params));
309 write_sysreg(params->hcr_el2, hcr_el2);
316 asm(ALTERNATIVE(
"isb",
"nop", ARM64_WORKAROUND_SPECULATIVE_AT));
329 struct memblock_region *reg;
351 struct memblock_region *reg;
355 range->
end = ULONG_MAX;
358 while (left < right) {
359 cur = (left + right) >> 1;
361 end = reg->base + reg->size;
362 if (addr < reg->
base) {
364 range->
end = reg->base;
365 }
else if (addr >=
end) {
369 range->
start = reg->base;
387 struct memblock_region *reg;
392 return reg && !(reg->flags & MEMBLOCK_NOMAP);
397 return range->
start <= addr && addr < range->
end;
411 enum kvm_pgtable_prot prot)
423 #define host_stage2_try(fn, ...) \
426 hyp_assert_lock_held(&host_mmu.lock); \
427 __ret = fn(__VA_ARGS__); \
428 if (__ret == -ENOMEM) { \
429 __ret = host_stage2_unmap_dev_all(); \
431 __ret = fn(__VA_ARGS__); \
454 if (kvm_pte_valid(pte))
461 u64 granule = kvm_granule_size(level);
462 cur.start = ALIGN_DOWN(addr, granule);
463 cur.end =
cur.start + granule;
465 }
while ((level <= KVM_PGTABLE_LAST_LEVEL) &&
466 !(kvm_level_supports_block_mapping(level) &&
475 enum kvm_pgtable_prot prot)
503 return prot != PKVM_HOST_MEM_PROT;
505 return prot != PKVM_HOST_MMIO_PROT;
512 enum kvm_pgtable_prot prot;
515 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
531 struct kvm_vcpu_fault_info fault;
535 esr = read_sysreg_el2(SYS_ESR);
538 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
540 BUG_ON(ret && ret != -EAGAIN);
582 enum kvm_pgtable_walk_flags visit)
592 struct kvm_pgtable_walker walker = {
595 .flags = KVM_PGTABLE_WALK_LEAF,
606 if (!kvm_pte_valid(pte) && pte)
627 enum kvm_pgtable_prot prot =
pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
684 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
714 if (!kvm_pte_valid(pte))
750 return (ret !=
size) ? -EFAULT : 0;
755 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
760 enum kvm_pgtable_prot perms)
764 if (perms != PAGE_HYP)
798 enum kvm_pgtable_prot perms)
800 void *start = (
void *)addr, *
end = start + (tx->
nr_pages * PAGE_SIZE);
801 enum kvm_pgtable_prot prot;
812 return (ret !=
size) ? -EFAULT : 0;
818 void *start = (
void *)addr, *
end = start + (tx->
nr_pages * PAGE_SIZE);
1090 u64 hyp_addr = (u64)
__hyp_va(host_addr);
1098 .completer_addr = hyp_addr,
1105 .completer_prot = PAGE_HYP,
1123 u64 hyp_addr = (u64)
__hyp_va(host_addr);
1131 .completer_addr = hyp_addr,
1138 .completer_prot = PAGE_HYP,
1156 u64 hyp_addr = (u64)
__hyp_va(host_addr);
1164 .completer_addr = hyp_addr,
1188 u64 hyp_addr = (u64)
__hyp_va(host_addr);
1196 .completer_addr = host_addr,
1218 u64
cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
1219 u64
end = PAGE_ALIGN((u64)to);
1248 u64
cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
1249 u64
end = PAGE_ALIGN((u64)to);
static unsigned long base
static bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, unsigned int reserved_pages)
void hyp_get_page(struct hyp_pool *pool, void *addr)
void * hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
void hyp_split_page(struct hyp_page *page)
void hyp_put_page(struct hyp_pool *pool, void *addr)
u64 id_aa64mmfr0_el1_sys_val
u64 id_aa64mmfr1_el1_sys_val
static void * host_s2_zalloc_pages_exact(size_t size)
static void hyp_unlock_component(void)
static bool range_included(struct kvm_mem_range *child, struct kvm_mem_range *parent)
static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
static int __do_donate(struct pkvm_mem_donation *donation)
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
void hyp_unpin_shared_mem(void *from, void *to)
static bool guest_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
static void host_unlock_component(void)
int __pkvm_host_unshare_hyp(u64 pfn)
static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int __host_set_page_state_range(u64 addr, u64 size, enum pkvm_page_state state)
#define KVM_HOST_S2_FLAGS
static struct hyp_pool host_s2_pool
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, struct check_walk_data *data)
static int hyp_initiate_donation(u64 *completer_addr, const struct pkvm_mem_transition *tx)
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
static int check_donation(struct pkvm_mem_donation *donation)
static void guest_s2_get_page(void *addr)
static int prepare_s2_pool(void *pgt_pool_base)
static void * host_s2_zalloc_page(void *pool)
static int __hyp_check_page_state_range(u64 addr, u64 size, enum pkvm_page_state state)
int __pkvm_host_share_hyp(u64 pfn)
static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, enum kvm_pgtable_prot perms)
static int host_stage2_unmap_dev_all(void)
bool addr_is_memory(phys_addr_t phys)
static void * guest_s2_zalloc_page(void *mc)
static int host_request_owned_transition(u64 *completer_addr, const struct pkvm_mem_transition *tx)
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, enum kvm_pgtable_prot perms)
static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
static int do_share(struct pkvm_mem_share *share)
static bool range_is_memory(u64 start, u64 end)
static int host_initiate_share(u64 *completer_addr, const struct pkvm_mem_transition *tx)
static void host_s2_put_page(void *addr)
static int __do_unshare(struct pkvm_mem_share *share)
static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm)
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx, enum pkvm_page_state state)
static void clean_dcache_guest_page(void *va, size_t size)
static int do_donate(struct pkvm_mem_donation *donation)
static void host_s2_free_unlinked_table(void *addr, s8 level)
static int __do_share(struct pkvm_mem_share *share)
#define host_stage2_try(fn,...)
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
int hyp_pin_shared_mem(void *from, void *to)
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
static void guest_lock_component(struct pkvm_hyp_vm *vm)
static struct memblock_region * find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
static int host_stage2_idmap(u64 addr)
int __pkvm_prot_finalize(void)
static int hyp_request_donation(u64 *completer_addr, const struct pkvm_mem_transition *tx)
static void host_lock_component(void)
static int check_unshare(struct pkvm_mem_share *share)
static int check_share(struct pkvm_mem_share *share)
static int host_initiate_unshare(u64 *completer_addr, const struct pkvm_mem_transition *tx)
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
static void invalidate_icache_guest_page(void *va, size_t size)
static void guest_unlock_component(struct pkvm_hyp_vm *vm)
static int __host_check_page_state_range(u64 addr, u64 size, enum pkvm_page_state state)
static void * guest_s2_zalloc_pages_exact(size_t size)
static int do_unshare(struct pkvm_mem_share *share)
static void hyp_lock_component(void)
static void host_s2_get_page(void *addr)
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int host_initiate_donation(u64 *completer_addr, const struct pkvm_mem_transition *tx)
static int host_request_unshare(u64 *completer_addr, const struct pkvm_mem_transition *tx)
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot)
static int __host_stage2_idmap(u64 start, u64 end, enum kvm_pgtable_prot prot)
static bool addr_is_allowed_memory(phys_addr_t phys)
static void prepare_host_vtcr(void)
static int hyp_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
static void guest_s2_free_pages_exact(void *addr, unsigned long size)
int kvm_host_prepare_stage2(void *pgt_pool_base)
static void guest_s2_put_page(void *addr)
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
static enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
static enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot, enum pkvm_page_state state)
@ PKVM_PAGE_SHARED_BORROWED
static void * hyp_phys_to_virt(phys_addr_t phys)
#define hyp_virt_to_page(virt)
static int hyp_page_count(void *addr)
static void hyp_page_ref_dec(struct hyp_page *p)
static void hyp_page_ref_inc(struct hyp_page *p)
static phys_addr_t hyp_virt_to_phys(void *addr)
#define hyp_virt_to_pfn(virt)
#define hyp_pfn_to_phys(pfn)
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]
unsigned int hyp_memblock_nr
void hyp_fixmap_unmap(void)
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
hyp_spinlock_t pkvm_pgd_lock
struct kvm_pgtable pkvm_pgtable
void * hyp_fixmap_map(phys_addr_t phys)
int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker)
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb)
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags)
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level)
int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id)
static void hyp_spin_unlock(hyp_spinlock_t *lock)
#define hyp_spin_lock_init(l)
static void hyp_assert_lock_held(hyp_spinlock_t *lock)
static void hyp_spin_lock(hyp_spinlock_t *lock)
enum pkvm_page_state(* get_page_state)(kvm_pte_t pte, u64 addr)
enum pkvm_page_state desired
struct kvm_pgtable_mm_ops mm_ops
struct kvm_pgtable_mm_ops mm_ops
const struct pkvm_mem_transition tx
const struct pkvm_mem_transition tx
enum kvm_pgtable_prot completer_prot
enum pkvm_component_id id
struct pkvm_mem_transition::@4 completer
struct pkvm_mem_transition::@3 initiator
struct pkvm_mem_transition::@3::@5::@7 host
struct pkvm_mem_transition::@3::@5::@8 hyp