7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
43 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
53 val = cxt->
tcr = read_sysreg_el1(SYS_TCR);
54 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
55 write_sysreg_el1(val, SYS_TCR);
65 __load_stage2(mmu, kern_hyp_va(mmu->arch));
66 asm(ALTERNATIVE(
"isb",
"nop", ARM64_WORKAROUND_SPECULATIVE_AT));
73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
77 write_sysreg_el1(cxt->
tcr, SYS_TCR);
82 phys_addr_t ipa,
int level)
95 __tlbi_level(ipas2e1is, ipa, level);
112 phys_addr_t ipa,
int level)
125 __tlbi_level(ipas2e1, ipa, level);
142 phys_addr_t start,
unsigned long pages)
145 unsigned long stride;
152 start = round_down(start, stride);
157 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
175 __tlbi(vmalls12e1is);
190 asm volatile(
"ic iallu");
static __always_inline void __load_host_stage2(void)
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt, bool nsh)
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
void __kvm_flush_vm_context(void)
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)