9 #include <linux/arm-smccc.h> 
   10 #include <linux/kvm_host.h> 
   11 #include <linux/types.h> 
   12 #include <linux/jump_label.h> 
   13 #include <linux/percpu.h> 
   14 #include <uapi/linux/psci.h> 
   18 #include <asm/barrier.h> 
   19 #include <asm/cpufeature.h> 
   20 #include <asm/kprobes.h> 
   21 #include <asm/kvm_asm.h> 
   22 #include <asm/kvm_emulate.h> 
   23 #include <asm/kvm_hyp.h> 
   24 #include <asm/kvm_mmu.h> 
   25 #include <asm/fpsimd.h> 
   26 #include <asm/debug-monitors.h> 
   27 #include <asm/processor.h> 
   28 #include <asm/thread_info.h> 
   29 #include <asm/vectors.h> 
   52             val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
 
   54             val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
 
   57             write_sysreg_el0(val, SYS_CNTP_CVAL);
 
   62     val = read_sysreg(cpacr_el1);
 
   64     val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
 
   65          CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
 
   79         if (vcpu_has_sve(vcpu))
 
   80             val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
 
   82         val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
 
   86     write_sysreg(val, cpacr_el1);
 
   88     write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
 
   94     const char *host_vectors = vectors;
 
   98     write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
 
  110         val = read_sysreg_el0(SYS_CNTP_CVAL);
 
  112             __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
 
  114             __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
 
  116         offset = read_sysreg_s(SYS_CNTPOFF_EL2);
 
  119             write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
 
  129     asm(ALTERNATIVE(
"nop", 
"isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
  131     kvm_reset_cptr_el2(vcpu);
 
  133     if (!arm64_kernel_unmapped_at_el0())
 
  134         host_vectors = __this_cpu_read(this_cpu_vector);
 
  135     write_sysreg(host_vectors, vbar_el1);
 
  149     local_irq_save(flags);
 
  151     local_irq_restore(flags);
 
  158     local_irq_save(flags);
 
  160     local_irq_restore(flags);
 
  167     __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
 
  177     [0 ... ESR_ELx_EC_MAX]      = NULL,
 
  200     if (unlikely(vcpu_get_flag(vcpu, VCPU_HYP_CONTEXT))) {
 
  201         u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
 
  205             mode = PSR_MODE_EL2t;
 
  208             mode = PSR_MODE_EL2h;
 
  212         *vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
 
  213         *vcpu_cpsr(vcpu) |= mode;
 
  220     struct kvm_cpu_context *host_ctxt;
 
  221     struct kvm_cpu_context *guest_ctxt;
 
  224     host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
 
  225     host_ctxt->__hyp_running_vcpu = vcpu;
 
  226     guest_ctxt = &vcpu->arch.ctxt;
 
  243     if (is_hyp_ctxt(vcpu))
 
  244         vcpu_set_flag(vcpu, VCPU_HYP_CONTEXT);
 
  246         vcpu_clear_flag(vcpu, VCPU_HYP_CONTEXT);
 
  250         exit_code = __guest_enter(vcpu);
 
  261     if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
 
  293     local_daif_restore(DAIF_PROCCTX_NOIRQ);
 
  306     struct kvm_cpu_context *host_ctxt;
 
  307     struct kvm_vcpu *vcpu;
 
  309     host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
 
  310     vcpu = host_ctxt->__hyp_running_vcpu;
 
  315     panic(
"HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n",
 
  317           read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
 
  318           read_sysreg(hpfar_el2), par, vcpu);
 
  324     u64 spsr = read_sysreg_el2(SYS_SPSR);
 
  325     u64 elr = read_sysreg_el2(SYS_ELR);
 
  326     u64 par = read_sysreg_par();
 
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
 
static bool has_cntpoff(void)
 
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
 
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
 
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
 
asmlinkage void kvm_unexpected_el2_exception(void)
 
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data)
 
asmlinkage void __noreturn hyp_panic(void)
 
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
struct arch_timer_context * direct_ptimer
 
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) __alias(kvm_hyp_handle_memory_fault)
 
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
 
static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
 
bool(* exit_handler_fn)(struct kvm_vcpu *, u64 *)
 
static void __deactivate_traps_common(struct kvm_vcpu *vcpu)
 
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
 
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
 
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
 
static void __activate_traps_common(struct kvm_vcpu *vcpu)
 
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) __alias(kvm_hyp_handle_memory_fault)
 
static void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
 
static bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 
static void ___deactivate_traps(struct kvm_vcpu *vcpu)
 
static void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
 
static void ___activate_traps(struct kvm_vcpu *vcpu)
 
static void __kvm_unexpected_el2_exception(void)
 
static bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
 
static void __activate_traps(struct kvm_vcpu *vcpu)
 
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
 
static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu)
 
static const exit_handler_fn hyp_exit_handlers[]
 
NOKPROBE_SYMBOL(__activate_traps)
 
static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
 
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
 
static void __deactivate_traps(struct kvm_vcpu *vcpu)
 
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
 
static const exit_handler_fn * kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
 
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
 
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
 
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
 
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
 
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
 
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)