7 #include <linux/irqflags.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/tlbflush.h>
14 struct kvm_s2_mmu *
mmu;
26 local_irq_save(cxt->
flags);
28 if (vcpu && mmu != vcpu->arch.hw_mmu)
29 cxt->
mmu = vcpu->arch.hw_mmu;
33 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
43 val = cxt->
tcr = read_sysreg_el1(SYS_TCR);
44 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
45 write_sysreg_el1(val, SYS_TCR);
46 val = cxt->
sctlr = read_sysreg_el1(SYS_SCTLR);
48 write_sysreg_el1(val, SYS_SCTLR);
63 __load_stage2(mmu, mmu->arch);
64 val = read_sysreg(hcr_el2);
66 write_sysreg(val, hcr_el2);
76 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
81 __load_stage2(cxt->
mmu, cxt->
mmu->arch);
83 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
85 write_sysreg_el1(cxt->
tcr, SYS_TCR);
86 write_sysreg_el1(cxt->
sctlr, SYS_SCTLR);
89 local_irq_restore(cxt->
flags);
93 phys_addr_t ipa,
int level)
108 __tlbi_level(ipas2e1is, ipa, level);
125 phys_addr_t ipa,
int level)
140 __tlbi_level(ipas2e1, ipa, level);
157 phys_addr_t start,
unsigned long pages)
160 unsigned long stride;
167 start = round_down(start, stride);
174 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
194 __tlbi(vmalls12e1is);
209 asm volatile(
"ic iallu");
struct kvm_vcpu * kvm_get_running_vcpu(void)
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
void __kvm_flush_vm_context(void)
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt)
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)