KVM
Classes | Functions
tlb.c File Reference
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
#include <nvhe/mem_protect.h>
Include dependency graph for tlb.c:

Go to the source code of this file.

Classes

struct  tlb_inv_context
 

Functions

static void __tlb_switch_to_guest (struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt, bool nsh)
 
static void __tlb_switch_to_host (struct tlb_inv_context *cxt)
 
void __kvm_tlb_flush_vmid_ipa (struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
 
void __kvm_tlb_flush_vmid_ipa_nsh (struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
 
void __kvm_tlb_flush_vmid_range (struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
 
void __kvm_tlb_flush_vmid (struct kvm_s2_mmu *mmu)
 
void __kvm_flush_cpu_context (struct kvm_s2_mmu *mmu)
 
void __kvm_flush_vm_context (void)
 

Function Documentation

◆ __kvm_flush_cpu_context()

void __kvm_flush_cpu_context ( struct kvm_s2_mmu *  mmu)

Definition at line 182 of file tlb.c.

183 {
184  struct tlb_inv_context cxt;
185 
186  /* Switch to requested VMID */
187  __tlb_switch_to_guest(mmu, &cxt, false);
188 
189  __tlbi(vmalle1);
190  asm volatile("ic iallu");
191  dsb(nsh);
192  isb();
193 
194  __tlb_switch_to_host(&cxt);
195 }
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt, bool nsh)
Definition: tlb.c:17
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
Definition: tlb.c:69
struct kvm_s2_mmu * mmu
Definition: tlb.c:14
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_flush_vm_context()

void __kvm_flush_vm_context ( void  )

Definition at line 197 of file tlb.c.

198 {
199  /* Same remark as in __tlb_switch_to_guest() */
200  dsb(ish);
201  __tlbi(alle1is);
202  dsb(ish);
203 }
Here is the caller graph for this function:

◆ __kvm_tlb_flush_vmid()

void __kvm_tlb_flush_vmid ( struct kvm_s2_mmu *  mmu)

Definition at line 168 of file tlb.c.

169 {
170  struct tlb_inv_context cxt;
171 
172  /* Switch to requested VMID */
173  __tlb_switch_to_guest(mmu, &cxt, false);
174 
175  __tlbi(vmalls12e1is);
176  dsb(ish);
177  isb();
178 
179  __tlb_switch_to_host(&cxt);
180 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_tlb_flush_vmid_ipa()

void __kvm_tlb_flush_vmid_ipa ( struct kvm_s2_mmu *  mmu,
phys_addr_t  ipa,
int  level 
)

Definition at line 81 of file tlb.c.

83 {
84  struct tlb_inv_context cxt;
85 
86  /* Switch to requested VMID */
87  __tlb_switch_to_guest(mmu, &cxt, false);
88 
89  /*
90  * We could do so much better if we had the VA as well.
91  * Instead, we invalidate Stage-2 for this IPA, and the
92  * whole of Stage-1. Weep...
93  */
94  ipa >>= 12;
95  __tlbi_level(ipas2e1is, ipa, level);
96 
97  /*
98  * We have to ensure completion of the invalidation at Stage-2,
99  * since a table walk on another CPU could refill a TLB with a
100  * complete (S1 + S2) walk based on the old Stage-2 mapping if
101  * the Stage-1 invalidation happened first.
102  */
103  dsb(ish);
104  __tlbi(vmalle1is);
105  dsb(ish);
106  isb();
107 
108  __tlb_switch_to_host(&cxt);
109 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_tlb_flush_vmid_ipa_nsh()

void __kvm_tlb_flush_vmid_ipa_nsh ( struct kvm_s2_mmu *  mmu,
phys_addr_t  ipa,
int  level 
)

Definition at line 111 of file tlb.c.

113 {
114  struct tlb_inv_context cxt;
115 
116  /* Switch to requested VMID */
117  __tlb_switch_to_guest(mmu, &cxt, true);
118 
119  /*
120  * We could do so much better if we had the VA as well.
121  * Instead, we invalidate Stage-2 for this IPA, and the
122  * whole of Stage-1. Weep...
123  */
124  ipa >>= 12;
125  __tlbi_level(ipas2e1, ipa, level);
126 
127  /*
128  * We have to ensure completion of the invalidation at Stage-2,
129  * since a table walk on another CPU could refill a TLB with a
130  * complete (S1 + S2) walk based on the old Stage-2 mapping if
131  * the Stage-1 invalidation happened first.
132  */
133  dsb(nsh);
134  __tlbi(vmalle1);
135  dsb(nsh);
136  isb();
137 
138  __tlb_switch_to_host(&cxt);
139 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_tlb_flush_vmid_range()

void __kvm_tlb_flush_vmid_range ( struct kvm_s2_mmu *  mmu,
phys_addr_t  start,
unsigned long  pages 
)

Definition at line 141 of file tlb.c.

143 {
144  struct tlb_inv_context cxt;
145  unsigned long stride;
146 
147  /*
148  * Since the range of addresses may not be mapped at
149  * the same level, assume the worst case as PAGE_SIZE
150  */
151  stride = PAGE_SIZE;
152  start = round_down(start, stride);
153 
154  /* Switch to requested VMID */
155  __tlb_switch_to_guest(mmu, &cxt, false);
156 
157  __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
158  TLBI_TTL_UNKNOWN);
159 
160  dsb(ish);
161  __tlbi(vmalle1is);
162  dsb(ish);
163  isb();
164 
165  __tlb_switch_to_host(&cxt);
166 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __tlb_switch_to_guest()

static void __tlb_switch_to_guest ( struct kvm_s2_mmu *  mmu,
struct tlb_inv_context cxt,
bool  nsh 
)
static

Definition at line 17 of file tlb.c.

20 {
21  /*
22  * We have two requirements:
23  *
24  * - ensure that the page table updates are visible to all
25  * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
26  * being either ish or nsh, depending on the invalidation
27  * type.
28  *
29  * - complete any speculative page table walk started before
30  * we trapped to EL2 so that we can mess with the MM
31  * registers out of context, for which dsb(nsh) is enough
32  *
33  * The composition of these two barriers is a dsb(DOMAIN), and
34  * the 'nsh' parameter tracks the distinction between
35  * Inner-Shareable and Non-Shareable, as specified by the
36  * callers.
37  */
38  if (nsh)
39  dsb(nsh);
40  else
41  dsb(ish);
42 
43  if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
44  u64 val;
45 
46  /*
47  * For CPUs that are affected by ARM 1319367, we need to
48  * avoid a host Stage-1 walk while we have the guest's
49  * VMID set in the VTTBR in order to invalidate TLBs.
50  * We're guaranteed that the S1 MMU is enabled, so we can
51  * simply set the EPD bits to avoid any further TLB fill.
52  */
53  val = cxt->tcr = read_sysreg_el1(SYS_TCR);
54  val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
55  write_sysreg_el1(val, SYS_TCR);
56  isb();
57  }
58 
59  /*
60  * __load_stage2() includes an ISB only when the AT
61  * workaround is applied. Take care of the opposite condition,
62  * ensuring that we always have an ISB, but not two ISBs back
63  * to back.
64  */
65  __load_stage2(mmu, kern_hyp_va(mmu->arch));
66  asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
67 }
u64 tcr
Definition: tlb.c:14
Here is the caller graph for this function:

◆ __tlb_switch_to_host()

static void __tlb_switch_to_host ( struct tlb_inv_context cxt)
static

Definition at line 69 of file tlb.c.

70 {
72 
73  if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
74  /* Ensure write of the host VMID */
75  isb();
76  /* Restore the host's TCR_EL1 */
77  write_sysreg_el1(cxt->tcr, SYS_TCR);
78  }
79 }
static __always_inline void __load_host_stage2(void)
Definition: mem_protect.h:86
Here is the call graph for this function:
Here is the caller graph for this function: