127 size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
128 struct kvm_vcpu *host_vcpu;
129 pkvm_handle_t handle;
134 if (host_kvm->created_vcpus < 1)
144 pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT);
149 hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
150 size_mul(
sizeof(
void *),
151 host_kvm->created_vcpus)));
152 hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
165 host_kvm->arch.pkvm.handle = handle;
168 hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
169 kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
173 if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
178 hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
187 free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
198 free_pages_exact(hyp_vm, hyp_vm_sz);
200 free_pages_exact(pgd, pgd_sz);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva)
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsigned long vcpu_hva)
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)