10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
46 val = vcpu->arch.cptr_el2;
48 val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
49 if (cpus_have_final_cap(ARM64_SME)) {
51 val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
58 val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
59 CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
61 val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
66 kvm_write_cptr_el2(val);
67 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
69 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
70 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
78 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
80 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
86 extern char __kvm_hyp_host_vector[];
90 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
99 val = read_sysreg_el1(SYS_TCR);
100 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
102 val = read_sysreg_el1(SYS_SCTLR);
103 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
109 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
111 kvm_reset_cptr_el2(vcpu);
112 write_sysreg(__kvm_hyp_host_vector, vbar_el2);
136 #ifdef CONFIG_HW_PERF_EVENTS
139 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
141 if (pmu->events_host)
142 write_sysreg(pmu->events_host, pmcntenclr_el0);
144 if (pmu->events_guest)
145 write_sysreg(pmu->events_guest, pmcntenset_el0);
147 return (pmu->events_host || pmu->events_guest);
155 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
157 if (pmu->events_guest)
158 write_sysreg(pmu->events_guest, pmcntenclr_el0);
160 if (pmu->events_host)
161 write_sysreg(pmu->events_host, pmcntenset_el0);
164 #define __pmu_switch_to_guest(v) ({ false; })
165 #define __pmu_switch_to_host(v) do {} while (0)
186 [0 ... ESR_ELx_EC_MAX] = NULL,
199 [0 ... ESR_ELx_EC_MAX] = NULL,
212 if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
231 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
233 if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
241 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
242 *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
243 *exit_code |= ARM_EXCEPTION_IL;
250 struct kvm_cpu_context *host_ctxt;
251 struct kvm_cpu_context *guest_ctxt;
252 struct kvm_s2_mmu *mmu;
253 bool pmu_switch_needed;
262 if (system_uses_irq_prio_masking()) {
263 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
267 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
268 host_ctxt->__hyp_running_vcpu = vcpu;
269 guest_ctxt = &vcpu->arch.ctxt;
307 mmu = kern_hyp_va(vcpu->arch.hw_mmu);
308 __load_stage2(mmu, kern_hyp_va(mmu->arch));
318 exit_code = __guest_enter(vcpu);
340 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
350 if (pmu_switch_needed)
354 if (system_uses_irq_prio_masking())
355 gic_write_pmr(GIC_PRIO_IRQOFF);
357 host_ctxt->__hyp_running_vcpu = NULL;
364 u64 spsr = read_sysreg_el2(SYS_SPSR);
365 u64 elr = read_sysreg_el2(SYS_ELR);
366 u64 par = read_sysreg_par();
367 struct kvm_cpu_context *host_ctxt;
368 struct kvm_vcpu *vcpu;
370 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
371 vcpu = host_ctxt->__hyp_running_vcpu;
384 __hyp_do_panic(host_ctxt, spsr, elr, par);
struct vgic_global kvm_vgic_global_state
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
static __always_inline void __load_host_stage2(void)
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
static void __activate_traps(struct kvm_vcpu *vcpu)
static const exit_handler_fn pvm_exit_handlers[]
asmlinkage void kvm_unexpected_el2_exception(void)
asmlinkage void __noreturn hyp_panic_bad_stack(void)
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
static const exit_handler_fn hyp_exit_handlers[]
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
#define __pmu_switch_to_host(v)
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data)
asmlinkage void __noreturn hyp_panic(void)
#define __pmu_switch_to_guest(v)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
static const exit_handler_fn * kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
void __timer_disable_traps(struct kvm_vcpu *vcpu)
void __timer_enable_traps(struct kvm_vcpu *vcpu)
struct static_key_false gicv3_cpuif
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) __alias(kvm_hyp_handle_memory_fault)
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
bool(* exit_handler_fn)(struct kvm_vcpu *, u64 *)
static void __deactivate_traps_common(struct kvm_vcpu *vcpu)
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
static void __activate_traps_common(struct kvm_vcpu *vcpu)
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) __alias(kvm_hyp_handle_memory_fault)
static void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
static bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static void ___deactivate_traps(struct kvm_vcpu *vcpu)
static void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
static void ___activate_traps(struct kvm_vcpu *vcpu)
static void __kvm_unexpected_el2_exception(void)
static bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
static void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
static void __sysreg32_save_state(struct kvm_vcpu *vcpu)
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)