7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
13 #include <linux/arm-smccc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <uapi/linux/psci.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/extable.h>
24 #include <asm/kprobes.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_nested.h>
30 #include <asm/fpsimd.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/processor.h>
33 #include <asm/traps.h>
45 return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED;
51 if (!vcpu_el1_is_32bit(vcpu))
54 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
68 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
69 write_sysreg(1 << 30, fpexc32_el2);
74 #define compute_clr_set(vcpu, reg, clr, set) \
77 hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \
78 set |= hfg & __ ## reg ## _MASK; \
79 clr |= ~hfg & __ ## reg ## _nMASK; \
82 #define update_fgt_traps_cs(vcpu, reg, clr, set) \
84 struct kvm_cpu_context *hctxt = \
85 &this_cpu_ptr(&kvm_host_data)->host_ctxt; \
88 ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
89 compute_clr_set(vcpu, reg, c, s); \
93 u64 val = __ ## reg ## _nMASK; \
96 write_sysreg_s(val, SYS_ ## reg); \
100 #define update_fgt_traps(vcpu, reg) \
101 update_fgt_traps_cs(vcpu, reg, 0, 0)
107 #define CHECK_FGT_MASKS(reg) \
109 BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK)); \
110 BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^ \
111 (__ ## reg ## _nMASK))); \
116 u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
118 return cpuid_feature_extract_unsigned_field(pfr0,
119 ID_AA64PFR0_EL1_AMU_SHIFT);
124 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
125 u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
136 if (!cpus_have_final_cap(ARM64_HAS_FGT))
139 ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
140 ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
142 if (cpus_have_final_cap(ARM64_SME)) {
143 tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
152 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
153 w_set |= HFGxTR_EL2_TCR_EL1_MASK;
155 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
161 tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
162 HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
164 r_val = __HFGRTR_EL2_nMASK & ~tmp;
168 w_val = __HFGWTR_EL2_nMASK & ~tmp;
172 write_sysreg_s(r_val, SYS_HFGRTR_EL2);
173 write_sysreg_s(w_val, SYS_HFGWTR_EL2);
175 if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
188 struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
190 if (!cpus_have_final_cap(ARM64_HAS_FGT))
193 write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
194 write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
196 if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
199 write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
200 write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
201 write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
204 write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
210 write_sysreg(1 << 15, hstr_el2);
219 struct kvm_cpu_context *hctxt;
221 write_sysreg(0, pmselr_el0);
223 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
224 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
225 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
226 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
229 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
230 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
232 if (cpus_have_final_cap(ARM64_HAS_HCX)) {
233 u64 hcrx = HCRX_GUEST_FLAGS;
234 if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
235 u64 clr = 0, set = 0;
243 write_sysreg_s(hcrx, SYS_HCRX_EL2);
251 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
253 write_sysreg(0, hstr_el2);
255 struct kvm_cpu_context *hctxt;
257 hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
258 write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
259 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
262 if (cpus_have_final_cap(ARM64_HAS_HCX))
263 write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
270 u64 hcr = vcpu->arch.hcr_el2;
272 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
275 write_sysreg(hcr, hcr_el2);
277 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
278 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
289 if (vcpu->arch.hcr_el2 & HCR_VSE) {
290 vcpu->arch.hcr_el2 &= ~HCR_VSE;
291 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
302 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
303 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
304 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
310 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
311 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
318 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
319 __sve_restore_state(vcpu_sve_pffr(vcpu),
320 &vcpu->arch.ctxt.fp_regs.fpsr);
321 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
336 if (!system_supports_fpsimd())
339 sve_guest = vcpu_has_sve(vcpu);
340 esr_ec = kvm_vcpu_trap_get_class(vcpu);
344 case ESR_ELx_EC_FP_ASIMD:
357 if (has_vhe() || has_hvhe()) {
358 reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
360 reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
362 sysreg_clear_set(cpacr_el1, 0, reg);
368 sysreg_clear_set(cptr_el2, reg, 0);
373 if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
374 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
380 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
383 if (!(read_sysreg(hcr_el2) & HCR_RW))
384 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
386 vcpu->arch.fp_state = FP_STATE_GUEST_OWNED;
393 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
394 int rt = kvm_vcpu_sys_get_rt(vcpu);
395 u64 val = vcpu_get_reg(vcpu, rt);
401 if (vcpu->arch.hcr_el2 & HCR_TVM)
406 write_sysreg_el1(val, SYS_SCTLR);
409 write_sysreg_el1(val, SYS_TTBR0);
412 write_sysreg_el1(val, SYS_TTBR1);
415 write_sysreg_el1(val, SYS_TCR);
418 write_sysreg_el1(val, SYS_ESR);
421 write_sysreg_el1(val, SYS_FAR);
424 write_sysreg_el1(val, SYS_AFSR0);
427 write_sysreg_el1(val, SYS_AFSR1);
430 write_sysreg_el1(val, SYS_MAIR);
433 write_sysreg_el1(val, SYS_AMAIR);
435 case SYS_CONTEXTIDR_EL1:
436 write_sysreg_el1(val, SYS_CONTEXTIDR);
448 switch (esr_sys64_to_sysreg(esr)) {
449 case SYS_APIAKEYLO_EL1:
450 case SYS_APIAKEYHI_EL1:
451 case SYS_APIBKEYLO_EL1:
452 case SYS_APIBKEYHI_EL1:
453 case SYS_APDAKEYLO_EL1:
454 case SYS_APDAKEYHI_EL1:
455 case SYS_APDBKEYLO_EL1:
456 case SYS_APDBKEYHI_EL1:
457 case SYS_APGAKEYLO_EL1:
458 case SYS_APGAKEYHI_EL1:
465 #define __ptrauth_save_key(ctxt, key) \
468 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
469 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
470 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
471 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
478 struct kvm_cpu_context *ctxt;
481 if (!vcpu_has_ptrauth(vcpu))
484 ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
491 vcpu_ptrauth_enable(vcpu);
493 val = read_sysreg(hcr_el2);
494 val |= (HCR_API | HCR_APK);
495 write_sysreg(val, hcr_el2);
511 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(
vcpu));
515 case SYS_CNTPCTSS_EL0:
516 if (vcpu_has_nv(
vcpu)) {
517 if (is_hyp_ctxt(
vcpu)) {
523 val = __vcpu_sys_reg(
vcpu, CNTHCTL_EL2);
524 if (!vcpu_el2_e2h_is_set(
vcpu))
525 val = (val & CNTHCTL_EL1PCTEN) << 10;
527 if (!(val & (CNTHCTL_EL1PCTEN << 10)))
537 val = arch_timer_read_cntpct_el0();
544 vcpu_set_reg(
vcpu, kvm_vcpu_sys_get_rt(
vcpu), val);
551 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(
vcpu));
552 int rt = kvm_vcpu_sys_get_rt(
vcpu);
553 u64 val = vcpu_get_reg(
vcpu, rt);
555 if (sysreg != SYS_TCR_EL1)
567 val &= ~(TCR_HD | TCR_HA);
568 write_sysreg_el1(val, SYS_TCR);
575 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
579 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
625 valid = kvm_vcpu_trap_is_translation_fault(
vcpu) &&
626 kvm_vcpu_dabt_isvalid(
vcpu) &&
627 !kvm_vcpu_abt_issea(
vcpu) &&
628 !kvm_vcpu_abt_iss1tw(
vcpu);
638 *exit_code = ARM_EXCEPTION_EL1_SERROR;
662 fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
665 return fn(vcpu, exit_code);
679 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
680 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
681 *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
682 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
683 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
685 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
707 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
708 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
710 if (ARM_SERROR_PENDING(*exit_code) &&
711 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
712 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
722 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
723 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
732 if (*exit_code != ARM_EXCEPTION_TRAP)
744 asm(ALTERNATIVE(
"nop",
"dmb sy", ARM64_WORKAROUND_1508412));
750 extern char __guest_exit_panic[];
751 unsigned long addr, fixup;
753 unsigned long elr_el2 = read_sysreg(elr_el2);
758 while (entry <
end) {
759 addr = (
unsigned long)&entry->
insn + entry->
insn;
762 if (addr != elr_el2) {
767 write_sysreg(
fixup, elr_el2);
772 write_sysreg(__guest_exit_panic, elr_el2);
static void __kvm_skip_instr(struct kvm_vcpu *vcpu)
static bool kvm_arm_support_pmu_v3(void)
struct static_key_false vgic_v3_cpuif_trap
struct static_key_false vgic_v2_cpuif_trap
static bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
struct arch_timer_offset offset
#define CHECK_FGT_MASKS(reg)
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) __alias(kvm_hyp_handle_memory_fault)
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool esr_is_ptrauth_trap(u64 esr)
static bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
bool(* exit_handler_fn)(struct kvm_vcpu *, u64 *)
static void __deactivate_traps_common(struct kvm_vcpu *vcpu)
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool cpu_has_amu(void)
struct kvm_exception_table_entry __stop___kvm_ex_table
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
static void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
static void __activate_traps_common(struct kvm_vcpu *vcpu)
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) __alias(kvm_hyp_handle_memory_fault)
static bool __populate_fault_info(struct kvm_vcpu *vcpu)
static void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
#define update_fgt_traps(vcpu, reg)
static void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
static bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
#define compute_clr_set(vcpu, reg, clr, set)
#define __ptrauth_save_key(ctxt, key)
static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
static void ___deactivate_traps(struct kvm_vcpu *vcpu)
static void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
static void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
static void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
struct kvm_exception_table_entry __start___kvm_ex_table
static void ___activate_traps(struct kvm_vcpu *vcpu)
static bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt)
static void __kvm_unexpected_el2_exception(void)
static bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
static const exit_handler_fn * kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)