KVM
tlb.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/irqflags.h>
8 
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/tlbflush.h>
12 
13 struct tlb_inv_context {
14  struct kvm_s2_mmu *mmu;
15  unsigned long flags;
16  u64 tcr;
17  u64 sctlr;
18 };
19 
20 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
21  struct tlb_inv_context *cxt)
22 {
23  struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
24  u64 val;
25 
26  local_irq_save(cxt->flags);
27 
28  if (vcpu && mmu != vcpu->arch.hw_mmu)
29  cxt->mmu = vcpu->arch.hw_mmu;
30  else
31  cxt->mmu = NULL;
32 
33  if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
34  /*
35  * For CPUs that are affected by ARM errata 1165522 or 1530923,
36  * we cannot trust stage-1 to be in a correct state at that
37  * point. Since we do not want to force a full load of the
38  * vcpu state, we prevent the EL1 page-table walker to
39  * allocate new TLBs. This is done by setting the EPD bits
40  * in the TCR_EL1 register. We also need to prevent it to
41  * allocate IPA->PA walks, so we enable the S1 MMU...
42  */
43  val = cxt->tcr = read_sysreg_el1(SYS_TCR);
44  val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
45  write_sysreg_el1(val, SYS_TCR);
46  val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
47  val |= SCTLR_ELx_M;
48  write_sysreg_el1(val, SYS_SCTLR);
49  }
50 
51  /*
52  * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
53  * most TLB operations target EL2/EL0. In order to affect the
54  * guest TLBs (EL1/EL0), we need to change one of these two
55  * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
56  * let's flip TGE before executing the TLB operation.
57  *
58  * ARM erratum 1165522 requires some special handling (again),
59  * as we need to make sure both stages of translation are in
60  * place before clearing TGE. __load_stage2() already
61  * has an ISB in order to deal with this.
62  */
63  __load_stage2(mmu, mmu->arch);
64  val = read_sysreg(hcr_el2);
65  val &= ~HCR_TGE;
66  write_sysreg(val, hcr_el2);
67  isb();
68 }
69 
70 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
71 {
72  /*
73  * We're done with the TLB operation, let's restore the host's
74  * view of HCR_EL2.
75  */
76  write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
77  isb();
78 
79  /* ... and the stage-2 MMU context that we switched away from */
80  if (cxt->mmu)
81  __load_stage2(cxt->mmu, cxt->mmu->arch);
82 
83  if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
84  /* Restore the registers to what they were */
85  write_sysreg_el1(cxt->tcr, SYS_TCR);
86  write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
87  }
88 
89  local_irq_restore(cxt->flags);
90 }
91 
92 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
93  phys_addr_t ipa, int level)
94 {
95  struct tlb_inv_context cxt;
96 
97  dsb(ishst);
98 
99  /* Switch to requested VMID */
100  __tlb_switch_to_guest(mmu, &cxt);
101 
102  /*
103  * We could do so much better if we had the VA as well.
104  * Instead, we invalidate Stage-2 for this IPA, and the
105  * whole of Stage-1. Weep...
106  */
107  ipa >>= 12;
108  __tlbi_level(ipas2e1is, ipa, level);
109 
110  /*
111  * We have to ensure completion of the invalidation at Stage-2,
112  * since a table walk on another CPU could refill a TLB with a
113  * complete (S1 + S2) walk based on the old Stage-2 mapping if
114  * the Stage-1 invalidation happened first.
115  */
116  dsb(ish);
117  __tlbi(vmalle1is);
118  dsb(ish);
119  isb();
120 
121  __tlb_switch_to_host(&cxt);
122 }
123 
124 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
125  phys_addr_t ipa, int level)
126 {
127  struct tlb_inv_context cxt;
128 
129  dsb(nshst);
130 
131  /* Switch to requested VMID */
132  __tlb_switch_to_guest(mmu, &cxt);
133 
134  /*
135  * We could do so much better if we had the VA as well.
136  * Instead, we invalidate Stage-2 for this IPA, and the
137  * whole of Stage-1. Weep...
138  */
139  ipa >>= 12;
140  __tlbi_level(ipas2e1, ipa, level);
141 
142  /*
143  * We have to ensure completion of the invalidation at Stage-2,
144  * since a table walk on another CPU could refill a TLB with a
145  * complete (S1 + S2) walk based on the old Stage-2 mapping if
146  * the Stage-1 invalidation happened first.
147  */
148  dsb(nsh);
149  __tlbi(vmalle1);
150  dsb(nsh);
151  isb();
152 
153  __tlb_switch_to_host(&cxt);
154 }
155 
156 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
157  phys_addr_t start, unsigned long pages)
158 {
159  struct tlb_inv_context cxt;
160  unsigned long stride;
161 
162  /*
163  * Since the range of addresses may not be mapped at
164  * the same level, assume the worst case as PAGE_SIZE
165  */
166  stride = PAGE_SIZE;
167  start = round_down(start, stride);
168 
169  dsb(ishst);
170 
171  /* Switch to requested VMID */
172  __tlb_switch_to_guest(mmu, &cxt);
173 
174  __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
175  TLBI_TTL_UNKNOWN);
176 
177  dsb(ish);
178  __tlbi(vmalle1is);
179  dsb(ish);
180  isb();
181 
182  __tlb_switch_to_host(&cxt);
183 }
184 
185 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
186 {
187  struct tlb_inv_context cxt;
188 
189  dsb(ishst);
190 
191  /* Switch to requested VMID */
192  __tlb_switch_to_guest(mmu, &cxt);
193 
194  __tlbi(vmalls12e1is);
195  dsb(ish);
196  isb();
197 
198  __tlb_switch_to_host(&cxt);
199 }
200 
201 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
202 {
203  struct tlb_inv_context cxt;
204 
205  /* Switch to requested VMID */
206  __tlb_switch_to_guest(mmu, &cxt);
207 
208  __tlbi(vmalle1);
209  asm volatile("ic iallu");
210  dsb(nsh);
211  isb();
212 
213  __tlb_switch_to_host(&cxt);
214 }
215 
217 {
218  dsb(ishst);
219  __tlbi(alle1is);
220  dsb(ish);
221 }
struct kvm_vcpu * kvm_get_running_vcpu(void)
Definition: kvm_main.c:6338
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
Definition: tlb.c:141
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
Definition: tlb.c:111
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
Definition: tlb.c:81
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
Definition: tlb.c:168
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
Definition: tlb.c:182
void __kvm_flush_vm_context(void)
Definition: tlb.c:197
u64 sctlr
Definition: tlb.c:17
unsigned long flags
Definition: tlb.c:15
u64 tcr
Definition: tlb.c:14
struct kvm_s2_mmu * mmu
Definition: tlb.c:14
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt)
Definition: tlb.c:20
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
Definition: tlb.c:70