7 #include <linux/init.h>
8 #include <linux/kmemleak.h>
9 #include <linux/kvm_host.h>
10 #include <linux/memblock.h>
11 #include <linux/mutex.h>
12 #include <linux/sort.h>
14 #include <asm/kvm_pkvm.h>
16 #include "hyp_constants.h"
28 const struct memblock_region *r1 = p1;
29 const struct memblock_region *r2 = p2;
31 return r1->base < r2->base ? -1 : (r1->base > r2->base);
38 sizeof(
struct memblock_region),
45 struct memblock_region *reg;
47 for_each_mem_region(reg) {
52 (*hyp_memblock_nr_ptr)++;
61 u64 hyp_mem_pages = 0;
64 if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
73 kvm_err(
"Failed to register hyp memblocks: %d\n", ret);
77 hyp_mem_pages += hyp_s1_pgtable_pages();
78 hyp_mem_pages += host_s2_pgtable_pages();
79 hyp_mem_pages += hyp_vm_table_pages();
80 hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
81 hyp_mem_pages += hyp_ffa_proxy_pages();
96 kvm_err(
"Failed to reserve hyp memory\n");
100 kvm_info(
"Reserved %lld MiB at 0x%llx\n",
hyp_mem_size >> 20,
106 if (host_kvm->arch.pkvm.handle) {
108 host_kvm->arch.pkvm.handle));
111 host_kvm->arch.pkvm.handle = 0;
127 size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
128 struct kvm_vcpu *host_vcpu;
129 pkvm_handle_t handle;
134 if (host_kvm->created_vcpus < 1)
144 pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT);
149 hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
150 size_mul(
sizeof(
void *),
151 host_kvm->created_vcpus)));
152 hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
165 host_kvm->arch.pkvm.handle = handle;
168 hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
169 kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
173 if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
178 hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
187 free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
198 free_pages_exact(hyp_vm, hyp_vm_sz);
200 free_pages_exact(pgd, pgd_sz);
208 mutex_lock(&host_kvm->arch.config_lock);
209 if (!host_kvm->arch.pkvm.handle)
211 mutex_unlock(&host_kvm->arch.config_lock);
218 mutex_lock(&host_kvm->arch.config_lock);
220 mutex_unlock(&host_kvm->arch.config_lock);
225 mutex_init(&host_kvm->lock);
234 WRITE_ONCE(*err, -EINVAL);
245 static_branch_enable(&kvm_protected_mode_initialized);
261 kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
266 pr_err(
"Failed to finalize Hyp protection: %d\n", ret);
bool is_kvm_arm_initialised(void)
enum kvm_mode kvm_get_mode(void)
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva)
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsigned long vcpu_hva)
int __pkvm_teardown_vm(pkvm_handle_t handle)
int __pkvm_prot_finalize(void)
unsigned int hyp_memblock_nr
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
void __init kvm_hyp_reserve(void)
device_initcall_sync(finalize_pkvm)
static unsigned int * hyp_memblock_nr_ptr
static void __init _kvm_host_prot_finalize(void *arg)
static int cmp_hyp_memblock(const void *p1, const void *p2)
static void __init sort_memblock_regions(void)
int pkvm_create_hyp_vm(struct kvm *host_kvm)
static int __init pkvm_drop_host_privileges(void)
static int __init register_memblock_regions(void)
static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
static int __init finalize_pkvm(void)
static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized)
int pkvm_init_host_vm(struct kvm *host_kvm)
static struct memblock_region * hyp_memory