KVM
Classes | Functions
pkvm.h File Reference
#include <asm/kvm_pkvm.h>
#include <nvhe/gfp.h>
#include <nvhe/spinlock.h>
Include dependency graph for pkvm.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  pkvm_hyp_vcpu
 
struct  pkvm_hyp_vm
 

Functions

static struct pkvm_hyp_vmpkvm_hyp_vcpu_to_hyp_vm (struct pkvm_hyp_vcpu *hyp_vcpu)
 
void pkvm_hyp_vm_table_init (void *tbl)
 
int __pkvm_init_vm (struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva)
 
int __pkvm_init_vcpu (pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsigned long vcpu_hva)
 
int __pkvm_teardown_vm (pkvm_handle_t handle)
 
struct pkvm_hyp_vcpupkvm_load_hyp_vcpu (pkvm_handle_t handle, unsigned int vcpu_idx)
 
void pkvm_put_hyp_vcpu (struct pkvm_hyp_vcpu *hyp_vcpu)
 

Function Documentation

◆ __pkvm_init_vcpu()

int __pkvm_init_vcpu ( pkvm_handle_t  handle,
struct kvm_vcpu *  host_vcpu,
unsigned long  vcpu_hva 
)

Definition at line 539 of file pkvm.c.

541 {
542  struct pkvm_hyp_vcpu *hyp_vcpu;
543  struct pkvm_hyp_vm *hyp_vm;
544  unsigned int idx;
545  int ret;
546 
547  hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
548  if (!hyp_vcpu)
549  return -ENOMEM;
550 
551  hyp_spin_lock(&vm_table_lock);
552 
553  hyp_vm = get_vm_by_handle(handle);
554  if (!hyp_vm) {
555  ret = -ENOENT;
556  goto unlock;
557  }
558 
559  idx = hyp_vm->nr_vcpus;
560  if (idx >= hyp_vm->kvm.created_vcpus) {
561  ret = -EINVAL;
562  goto unlock;
563  }
564 
565  ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
566  if (ret)
567  goto unlock;
568 
569  hyp_vm->vcpus[idx] = hyp_vcpu;
570  hyp_vm->nr_vcpus++;
571 unlock:
572  hyp_spin_unlock(&vm_table_lock);
573 
574  if (ret)
575  unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
576 
577  return ret;
578 }
static void * map_donated_memory(unsigned long host_va, size_t size)
Definition: pkvm.c:421
static void unmap_donated_memory(void *va, size_t size)
Definition: pkvm.c:437
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, struct pkvm_hyp_vm *hyp_vm, struct kvm_vcpu *host_vcpu, unsigned int vcpu_idx)
Definition: pkvm.c:313
static struct pkvm_hyp_vm * get_vm_by_handle(pkvm_handle_t handle)
Definition: pkvm.c:253
static void hyp_spin_unlock(hyp_spinlock_t *lock)
Definition: spinlock.h:82
static void hyp_spin_lock(hyp_spinlock_t *lock)
Definition: spinlock.h:44
unsigned int nr_vcpus
Definition: pkvm.h:44
struct kvm kvm
Definition: pkvm.h:29
struct pkvm_hyp_vcpu * vcpus[]
Definition: pkvm.h:47
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_init_vm()

int __pkvm_init_vm ( struct kvm *  host_kvm,
unsigned long  vm_hva,
unsigned long  pgd_hva 
)

Definition at line 470 of file pkvm.c.

472 {
473  struct pkvm_hyp_vm *hyp_vm = NULL;
474  size_t vm_size, pgd_size;
475  unsigned int nr_vcpus;
476  void *pgd = NULL;
477  int ret;
478 
480  if (ret)
481  return ret;
482 
483  nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
484  if (nr_vcpus < 1) {
485  ret = -EINVAL;
486  goto err_unpin_kvm;
487  }
488 
489  vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
490  pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
491 
492  ret = -ENOMEM;
493 
494  hyp_vm = map_donated_memory(vm_hva, vm_size);
495  if (!hyp_vm)
496  goto err_remove_mappings;
497 
498  pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
499  if (!pgd)
500  goto err_remove_mappings;
501 
503 
504  hyp_spin_lock(&vm_table_lock);
505  ret = insert_vm_table_entry(host_kvm, hyp_vm);
506  if (ret < 0)
507  goto err_unlock;
508 
509  ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
510  if (ret)
511  goto err_remove_vm_table_entry;
512  hyp_spin_unlock(&vm_table_lock);
513 
514  return hyp_vm->kvm.arch.pkvm.handle;
515 
516 err_remove_vm_table_entry:
517  remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
518 err_unlock:
519  hyp_spin_unlock(&vm_table_lock);
520 err_remove_mappings:
521  unmap_donated_memory(hyp_vm, vm_size);
522  unmap_donated_memory(pgd, pgd_size);
523 err_unpin_kvm:
525  return ret;
526 }
static void remove_vm_table_entry(pkvm_handle_t handle)
Definition: pkvm.c:395
static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
Definition: pkvm.c:401
static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm)
Definition: pkvm.c:360
static void * map_donated_memory_noclear(unsigned long host_va, size_t size)
Definition: pkvm.c:407
static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, unsigned int nr_vcpus)
Definition: pkvm.c:305
void hyp_unpin_shared_mem(void *from, void *to)
Definition: mem_protect.c:1246
int hyp_pin_shared_mem(void *from, void *to)
Definition: mem_protect.c:1216
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
Definition: mem_protect.c:232
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
Definition: pgtable.c:1561
struct kvm_arch arch
Definition: mem_protect.h:49
struct kvm * host_kvm
Definition: pkvm.h:32
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_teardown_vm()

int __pkvm_teardown_vm ( pkvm_handle_t  handle)

Definition at line 592 of file pkvm.c.

593 {
594  struct kvm_hyp_memcache *mc;
595  struct pkvm_hyp_vm *hyp_vm;
596  struct kvm *host_kvm;
597  unsigned int idx;
598  size_t vm_size;
599  int err;
600 
601  hyp_spin_lock(&vm_table_lock);
602  hyp_vm = get_vm_by_handle(handle);
603  if (!hyp_vm) {
604  err = -ENOENT;
605  goto err_unlock;
606  }
607 
608  if (WARN_ON(hyp_page_count(hyp_vm))) {
609  err = -EBUSY;
610  goto err_unlock;
611  }
612 
613  host_kvm = hyp_vm->host_kvm;
614 
615  /* Ensure the VMID is clean before it can be reallocated */
616  __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
617  remove_vm_table_entry(handle);
618  hyp_spin_unlock(&vm_table_lock);
619 
620  /* Reclaim guest pages (including page-table pages) */
621  mc = &host_kvm->arch.pkvm.teardown_mc;
622  reclaim_guest_pages(hyp_vm, mc);
623  unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
624 
625  /* Push the metadata pages to the teardown memcache */
626  for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
627  struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
628 
629  teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
630  }
631 
632  vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
633  teardown_donated_memory(mc, hyp_vm, vm_size);
634  hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
635  return 0;
636 
637 err_unlock:
638  hyp_spin_unlock(&vm_table_lock);
639  return err;
640 }
static void teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
Definition: pkvm.c:581
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[], unsigned int nr_vcpus)
Definition: pkvm.c:296
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
Definition: mem_protect.c:269
static int hyp_page_count(void *addr)
Definition: memory.h:45
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
Definition: tlb.c:168
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pkvm_hyp_vcpu_to_hyp_vm()

static struct pkvm_hyp_vm* pkvm_hyp_vcpu_to_hyp_vm ( struct pkvm_hyp_vcpu hyp_vcpu)
inlinestatic

Definition at line 51 of file pkvm.h.

52 {
53  return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
54 }
struct kvm_vcpu vcpu
Definition: pkvm.h:19
Here is the caller graph for this function:

◆ pkvm_hyp_vm_table_init()

void pkvm_hyp_vm_table_init ( void *  tbl)

Definition at line 244 of file pkvm.c.

245 {
246  WARN_ON(vm_table);
247  vm_table = tbl;
248 }
static struct pkvm_hyp_vm ** vm_table
Definition: pkvm.c:242
Here is the caller graph for this function:

◆ pkvm_load_hyp_vcpu()

struct pkvm_hyp_vcpu* pkvm_load_hyp_vcpu ( pkvm_handle_t  handle,
unsigned int  vcpu_idx 
)

Definition at line 263 of file pkvm.c.

265 {
266  struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
267  struct pkvm_hyp_vm *hyp_vm;
268 
269  hyp_spin_lock(&vm_table_lock);
270  hyp_vm = get_vm_by_handle(handle);
271  if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
272  goto unlock;
273 
274  hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
276 unlock:
277  hyp_spin_unlock(&vm_table_lock);
278  return hyp_vcpu;
279 }
#define hyp_virt_to_page(virt)
Definition: memory.h:33
static void hyp_page_ref_inc(struct hyp_page *p)
Definition: memory.h:52
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pkvm_put_hyp_vcpu()

void pkvm_put_hyp_vcpu ( struct pkvm_hyp_vcpu hyp_vcpu)

Definition at line 281 of file pkvm.c.

282 {
283  struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
284 
285  hyp_spin_lock(&vm_table_lock);
287  hyp_spin_unlock(&vm_table_lock);
288 }
static void hyp_page_ref_dec(struct hyp_page *p)
Definition: memory.h:58
static struct pkvm_hyp_vm * pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
Definition: pkvm.h:51
Here is the call graph for this function:
Here is the caller graph for this function: