250 struct kvm_cpu_context *host_ctxt;
251 struct kvm_cpu_context *guest_ctxt;
252 struct kvm_s2_mmu *mmu;
253 bool pmu_switch_needed;
262 if (system_uses_irq_prio_masking()) {
263 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
267 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
268 host_ctxt->__hyp_running_vcpu = vcpu;
269 guest_ctxt = &vcpu->arch.ctxt;
307 mmu = kern_hyp_va(vcpu->arch.hw_mmu);
308 __load_stage2(mmu, kern_hyp_va(mmu->arch));
318 exit_code = __guest_enter(vcpu);
340 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
350 if (pmu_switch_needed)
354 if (system_uses_irq_prio_masking())
355 gic_write_pmr(GIC_PRIO_IRQOFF);
357 host_ctxt->__hyp_running_vcpu = NULL;
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
static __always_inline void __load_host_stage2(void)
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
static void __activate_traps(struct kvm_vcpu *vcpu)
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
#define __pmu_switch_to_host(v)
#define __pmu_switch_to_guest(v)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
void __timer_disable_traps(struct kvm_vcpu *vcpu)
void __timer_enable_traps(struct kvm_vcpu *vcpu)
static bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
static void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
static void __sysreg32_save_state(struct kvm_vcpu *vcpu)