7 #include <linux/kvm_host.h>
33 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
35 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
42 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
44 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
51 if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
52 ID_AA64PFR0_EL1_RAS_V1P1) {
53 hcr_set |= HCR_TERR | HCR_TEA;
54 hcr_clear |= HCR_FIEN;
58 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
59 hcr_clear |= HCR_AMVOFFEN;
60 cptr_set |= CPTR_EL2_TAM;
64 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
66 cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
68 cptr_set |= CPTR_EL2_TZ;
71 vcpu->arch.hcr_el2 |= hcr_set;
72 vcpu->arch.hcr_el2 &= ~hcr_clear;
73 vcpu->arch.cptr_el2 |= cptr_set;
74 vcpu->arch.cptr_el2 &= ~cptr_clear;
87 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
89 hcr_clear |= HCR_DCT | HCR_ATA;
92 vcpu->arch.hcr_el2 |= hcr_set;
93 vcpu->arch.hcr_el2 &= ~hcr_clear;
107 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
108 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
109 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
114 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
115 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
118 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
119 mdcr_set |= MDCR_EL2_TDOSA;
122 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
123 mdcr_set |= MDCR_EL2_TPMS;
124 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
128 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
129 mdcr_set |= MDCR_EL2_TTRF;
132 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
134 cptr_set |= CPACR_EL1_TTA;
136 cptr_set |= CPTR_EL2_TTA;
140 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
141 mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
143 vcpu->arch.mdcr_el2 |= mdcr_set;
144 vcpu->arch.mdcr_el2 &= ~mdcr_clear;
145 vcpu->arch.cptr_el2 |= cptr_set;
157 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
158 mdcr_set |= MDCR_EL2_TDCC;
160 vcpu->arch.mdcr_el2 |= mdcr_set;
172 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
175 vcpu->arch.hcr_el2 |= hcr_set;
183 const u64 hcr_trap_feat_regs = HCR_TID3;
184 const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
191 vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
194 vcpu->arch.hcr_el2 &= ~(HCR_RES0);
195 vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
197 vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
198 vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
219 #define HANDLE_OFFSET 0x1000
257 if (unlikely(idx >= KVM_MAX_PVMS))
264 unsigned int vcpu_idx)
271 if (!hyp_vm || hyp_vm->
nr_vcpus <= vcpu_idx)
274 hyp_vcpu = hyp_vm->
vcpus[vcpu_idx];
315 struct kvm_vcpu *host_vcpu,
316 unsigned int vcpu_idx)
323 if (host_vcpu->vcpu_idx != vcpu_idx) {
330 hyp_vcpu->
vcpu.kvm = &hyp_vm->
kvm;
331 hyp_vcpu->
vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
332 hyp_vcpu->
vcpu.vcpu_idx = vcpu_idx;
334 hyp_vcpu->
vcpu.arch.hw_mmu = &hyp_vm->
kvm.arch.mmu;
335 hyp_vcpu->
vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
346 for (i = 0; i < KVM_MAX_PVMS; ++i) {
363 struct kvm_s2_mmu *mmu = &hyp_vm->
kvm.arch.mmu;
383 atomic64_set(&mmu->vmid.id, idx + 1);
385 mmu->arch = &hyp_vm->
kvm.arch;
386 mmu->pgt = &hyp_vm->
pgt;
389 return hyp_vm->
kvm.arch.pkvm.handle;
409 void *va = (
void *)kern_hyp_va(host_va);
411 if (!PAGE_ALIGNED(va))
415 PAGE_ALIGN(
size) >> PAGE_SHIFT))
434 PAGE_ALIGN(
size) >> PAGE_SHIFT));
471 unsigned long pgd_hva)
474 size_t vm_size, pgd_size;
496 goto err_remove_mappings;
500 goto err_remove_mappings;
511 goto err_remove_vm_table_entry;
514 return hyp_vm->
kvm.arch.pkvm.handle;
516 err_remove_vm_table_entry:
540 unsigned long vcpu_hva)
560 if (idx >= hyp_vm->
kvm.created_vcpus) {
569 hyp_vm->
vcpus[idx] = hyp_vcpu;
584 memset(addr, 0,
size);
586 for (
void *start = addr; start < addr +
size; start += PAGE_SIZE)
594 struct kvm_hyp_memcache *mc;
596 struct kvm *host_kvm;
621 mc = &host_kvm->arch.pkvm.teardown_mc;
626 for (idx = 0; idx < hyp_vm->
nr_vcpus; ++idx) {
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
#define PVM_ID_AA64PFR0_ALLOW
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED
static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
static void * map_donated_memory(unsigned long host_va, size_t size)
static struct pkvm_hyp_vm ** vm_table
unsigned int kvm_arm_vmid_bits
struct pkvm_hyp_vcpu * pkvm_load_hyp_vcpu(pkvm_handle_t handle, unsigned int vcpu_idx)
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva)
static void remove_vm_table_entry(pkvm_handle_t handle)
static int find_free_vm_table_entry(struct kvm *host_kvm)
static void teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
void pkvm_hyp_vm_table_init(void *tbl)
static void __unmap_donated_memory(void *va, size_t size)
static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm)
static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
static void unmap_donated_memory(void *va, size_t size)
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, struct pkvm_hyp_vm *hyp_vm, struct kvm_vcpu *host_vcpu, unsigned int vcpu_idx)
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsigned long vcpu_hva)
static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
static void * map_donated_memory_noclear(unsigned long host_va, size_t size)
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[], unsigned int nr_vcpus)
static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, unsigned int nr_vcpus)
int __pkvm_teardown_vm(pkvm_handle_t handle)
static void unmap_donated_memory_noclear(void *va, size_t size)
unsigned long __icache_flags
static DEFINE_HYP_SPINLOCK(vm_table_lock)
static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
static struct pkvm_hyp_vm * get_vm_by_handle(pkvm_handle_t handle)
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
void hyp_unpin_shared_mem(void *from, void *to)
int hyp_pin_shared_mem(void *from, void *to)
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
#define hyp_virt_to_page(virt)
static int hyp_page_count(void *addr)
static void hyp_page_ref_dec(struct hyp_page *p)
static void hyp_page_ref_inc(struct hyp_page *p)
static phys_addr_t hyp_virt_to_phys(void *addr)
#define hyp_virt_to_pfn(virt)
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
static struct pkvm_hyp_vm * pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
static void hyp_spin_unlock(hyp_spinlock_t *lock)
static void hyp_assert_lock_held(hyp_spinlock_t *lock)
static void hyp_spin_lock(hyp_spinlock_t *lock)
struct kvm_vcpu * host_vcpu
struct pkvm_hyp_vcpu * vcpus[]