KVM
setup.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/fixed_config.h>
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
22 
23 unsigned long hyp_nr_cpus;
24 
25 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
26  (unsigned long)__per_cpu_start)
27 
28 static void *vmemmap_base;
29 static void *vm_table_base;
30 static void *hyp_pgt_base;
31 static void *host_s2_pgt_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
35 
36 static int divide_memory_pool(void *virt, unsigned long size)
37 {
38  unsigned long nr_pages;
39 
41 
42  nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
44  if (!vmemmap_base)
45  return -ENOMEM;
46 
47  nr_pages = hyp_vm_table_pages();
49  if (!vm_table_base)
50  return -ENOMEM;
51 
52  nr_pages = hyp_s1_pgtable_pages();
54  if (!hyp_pgt_base)
55  return -ENOMEM;
56 
57  nr_pages = host_s2_pgtable_pages();
59  if (!host_s2_pgt_base)
60  return -ENOMEM;
61 
62  nr_pages = hyp_ffa_proxy_pages();
64  if (!ffa_proxy_pages)
65  return -ENOMEM;
66 
67  return 0;
68 }
69 
70 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
71  unsigned long *per_cpu_base,
72  u32 hyp_va_bits)
73 {
74  void *start, *end, *virt = hyp_phys_to_virt(phys);
75  unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
76  enum kvm_pgtable_prot prot;
77  int ret, i;
78 
79  /* Recreate the hyp page-table using the early page allocator */
81  ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
83  if (ret)
84  return ret;
85 
86  ret = hyp_create_idmap(hyp_va_bits);
87  if (ret)
88  return ret;
89 
90  ret = hyp_map_vectors();
91  if (ret)
92  return ret;
93 
95  if (ret)
96  return ret;
97 
98  ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
99  if (ret)
100  return ret;
101 
102  ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
103  if (ret)
104  return ret;
105 
106  ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
107  if (ret)
108  return ret;
109 
110  ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
111  if (ret)
112  return ret;
113 
114  for (i = 0; i < hyp_nr_cpus; i++) {
115  struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
116 
117  start = (void *)kern_hyp_va(per_cpu_base[i]);
118  end = start + PAGE_ALIGN(hyp_percpu_size);
119  ret = pkvm_create_mappings(start, end, PAGE_HYP);
120  if (ret)
121  return ret;
122 
123  ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
124  if (ret)
125  return ret;
126  }
127 
128  /*
129  * Map the host sections RO in the hypervisor, but transfer the
130  * ownership from the host to the hypervisor itself to make sure they
131  * can't be donated or shared with another entity.
132  *
133  * The ownership transition requires matching changes in the host
134  * stage-2. This will be done later (see finalize_host_mappings()) once
135  * the hyp_vmemmap is addressable.
136  */
137  prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
139  &kvm_vgic_global_state + 1, prot);
140  if (ret)
141  return ret;
142 
143  return 0;
144 }
145 
146 static void update_nvhe_init_params(void)
147 {
148  struct kvm_nvhe_init_params *params;
149  unsigned long i;
150 
151  for (i = 0; i < hyp_nr_cpus; i++) {
152  params = per_cpu_ptr(&kvm_init_params, i);
153  params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
154  dcache_clean_inval_poc((unsigned long)params,
155  (unsigned long)params + sizeof(*params));
156  }
157 }
158 
159 static void *hyp_zalloc_hyp_page(void *arg)
160 {
161  return hyp_alloc_pages(&hpool, 0);
162 }
163 
164 static void hpool_get_page(void *addr)
165 {
166  hyp_get_page(&hpool, addr);
167 }
168 
169 static void hpool_put_page(void *addr)
170 {
171  hyp_put_page(&hpool, addr);
172 }
173 
174 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
175  enum kvm_pgtable_walk_flags visit)
176 {
177  enum kvm_pgtable_prot prot;
178  enum pkvm_page_state state;
179  phys_addr_t phys;
180 
181  if (!kvm_pte_valid(ctx->old))
182  return 0;
183 
184  if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
185  return -EINVAL;
186 
187  phys = kvm_pte_to_phys(ctx->old);
188  if (!addr_is_memory(phys))
189  return -EINVAL;
190 
191  /*
192  * Adjust the host stage-2 mappings to match the ownership attributes
193  * configured in the hypervisor stage-1.
194  */
195  state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
196  switch (state) {
197  case PKVM_PAGE_OWNED:
198  return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
200  prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
201  break;
203  prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
204  break;
205  default:
206  return -EINVAL;
207  }
208 
209  return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
210 }
211 
212 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
213  enum kvm_pgtable_walk_flags visit)
214 {
215  /*
216  * Fix-up the refcount for the page-table pages as the early allocator
217  * was unable to access the hyp_vmemmap and so the buddy allocator has
218  * initialised the refcount to '1'.
219  */
220  if (kvm_pte_valid(ctx->old))
221  ctx->mm_ops->get_page(ctx->ptep);
222 
223  return 0;
224 }
225 
226 static int fix_host_ownership(void)
227 {
228  struct kvm_pgtable_walker walker = {
230  .flags = KVM_PGTABLE_WALK_LEAF,
231  };
232  int i, ret;
233 
234  for (i = 0; i < hyp_memblock_nr; i++) {
235  struct memblock_region *reg = &hyp_memory[i];
236  u64 start = (u64)hyp_phys_to_virt(reg->base);
237 
238  ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
239  if (ret)
240  return ret;
241  }
242 
243  return 0;
244 }
245 
246 static int fix_hyp_pgtable_refcnt(void)
247 {
248  struct kvm_pgtable_walker walker = {
250  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
251  .arg = pkvm_pgtable.mm_ops,
252  };
253 
254  return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
255  &walker);
256 }
257 
258 void __noreturn __pkvm_init_finalise(void)
259 {
260  struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
261  struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
262  unsigned long nr_pages, reserved_pages, pfn;
263  int ret;
264 
265  /* Now that the vmemmap is backed, install the full-fledged allocator */
267  nr_pages = hyp_s1_pgtable_pages();
268  reserved_pages = hyp_early_alloc_nr_used_pages();
269  ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
270  if (ret)
271  goto out;
272 
274  if (ret)
275  goto out;
276 
277  pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
278  .zalloc_page = hyp_zalloc_hyp_page,
279  .phys_to_virt = hyp_phys_to_virt,
280  .virt_to_phys = hyp_virt_to_phys,
281  .get_page = hpool_get_page,
282  .put_page = hpool_put_page,
283  .page_count = hyp_page_count,
284  };
286 
287  ret = fix_host_ownership();
288  if (ret)
289  goto out;
290 
291  ret = fix_hyp_pgtable_refcnt();
292  if (ret)
293  goto out;
294 
295  ret = hyp_create_pcpu_fixmap();
296  if (ret)
297  goto out;
298 
300  if (ret)
301  goto out;
302 
304 out:
305  /*
306  * We tail-called to here from handle___pkvm_init() and will not return,
307  * so make sure to propagate the return value to the host.
308  */
309  cpu_reg(host_ctxt, 1) = ret;
310 
311  __host_enter(host_ctxt);
312 }
313 
314 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
315  unsigned long *per_cpu_base, u32 hyp_va_bits)
316 {
317  struct kvm_nvhe_init_params *params;
318  void *virt = hyp_phys_to_virt(phys);
319  void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
320  int ret;
321 
322  BUG_ON(kvm_check_pvm_sysreg_table());
323 
324  if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
325  return -EINVAL;
326 
328  hyp_nr_cpus = nr_cpus;
329 
330  ret = divide_memory_pool(virt, size);
331  if (ret)
332  return ret;
333 
334  ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
335  if (ret)
336  return ret;
337 
339 
340  /* Jump in the idmap page to switch to the new page-tables */
341  params = this_cpu_ptr(&kvm_init_params);
342  fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
343  fn(__hyp_pa(params), __pkvm_init_finalise);
344 
345  unreachable();
346 }
struct vgic_global kvm_vgic_global_state
static unsigned long end
Definition: early_alloc.c:16
struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops
Definition: early_alloc.c:12
unsigned long hyp_early_alloc_nr_used_pages(void)
Definition: early_alloc.c:19
void * hyp_early_alloc_contig(unsigned int nr_pages)
Definition: early_alloc.c:24
void hyp_early_alloc_init(void *virt, unsigned long size)
Definition: early_alloc.c:49
int hyp_ffa_init(void *pages)
Definition: ffa.c:700
int kvm_check_pvm_sysreg_table(void)
Definition: sys_regs.c:456
size_t size
Definition: gen-hyprel.c:133
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, unsigned int reserved_pages)
Definition: page_alloc.c:223
void hyp_get_page(struct hyp_pool *pool, void *addr)
Definition: page_alloc.c:175
void * hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
Definition: page_alloc.c:198
void hyp_put_page(struct hyp_pool *pool, void *addr)
Definition: page_alloc.c:166
static enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
Definition: mem_protect.h:43
static enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot, enum pkvm_page_state state)
Definition: mem_protect.h:37
bool addr_is_memory(phys_addr_t phys)
Definition: mem_protect.c:378
@ PKVM_ID_HYP
Definition: mem_protect.h:59
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot)
Definition: mem_protect.c:474
pkvm_page_state
Definition: mem_protect.h:25
@ PKVM_PAGE_OWNED
Definition: mem_protect.h:26
@ PKVM_PAGE_SHARED_OWNED
Definition: mem_protect.h:27
@ PKVM_PAGE_SHARED_BORROWED
Definition: mem_protect.h:28
int kvm_host_prepare_stage2(void *pgt_pool_base)
Definition: mem_protect.c:138
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
Definition: mem_protect.c:480
static void * hyp_phys_to_virt(phys_addr_t phys)
Definition: memory.h:20
static int hyp_page_count(void *addr)
Definition: memory.h:45
static phys_addr_t hyp_virt_to_phys(void *addr)
Definition: memory.h:25
#define hyp_virt_to_pfn(virt)
Definition: memory.h:34
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]
Definition: mm.c:24
unsigned int hyp_memblock_nr
Definition: mm.c:25
int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
Definition: mm.c:353
hyp_spinlock_t pkvm_pgd_lock
Definition: mm.c:22
int hyp_create_idmap(u32 hyp_va_bits)
Definition: mm.c:328
struct kvm_pgtable pkvm_pgtable
Definition: mm.c:21
int hyp_map_vectors(void)
Definition: mm.c:210
int hyp_create_pcpu_fixmap(void)
Definition: mm.c:305
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
Definition: mm.c:138
int hyp_back_vmemmap(phys_addr_t back)
Definition: mm.c:149
int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker)
Definition: pgtable.c:324
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:568
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
Definition: pgtable.c:424
void pkvm_hyp_vm_table_init(void *tbl)
Definition: pkvm.c:244
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt)
#define hyp_percpu_size
Definition: setup.c:25
static void * host_s2_pgt_base
Definition: setup.c:31
static int fix_hyp_pgtable_refcnt(void)
Definition: setup.c:246
static void * hyp_zalloc_hyp_page(void *arg)
Definition: setup.c:159
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, unsigned long *per_cpu_base, u32 hyp_va_bits)
Definition: setup.c:314
static void hpool_get_page(void *addr)
Definition: setup.c:164
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, unsigned long *per_cpu_base, u32 hyp_va_bits)
Definition: setup.c:70
static void * ffa_proxy_pages
Definition: setup.c:32
static void * hyp_pgt_base
Definition: setup.c:30
unsigned long hyp_nr_cpus
Definition: setup.c:23
void __noreturn __pkvm_init_finalise(void)
Definition: setup.c:258
static int divide_memory_pool(void *virt, unsigned long size)
Definition: setup.c:36
static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: setup.c:174
static struct hyp_pool hpool
Definition: setup.c:34
static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: setup.c:212
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops
Definition: setup.c:33
static int fix_host_ownership(void)
Definition: setup.c:226
static void hpool_put_page(void *addr)
Definition: setup.c:169
static void * vm_table_base
Definition: setup.c:29
static void * vmemmap_base
Definition: setup.c:28
static void update_nvhe_init_params(void)
Definition: setup.c:146
#define hyp_spin_lock_init(l)
Definition: spinlock.h:39
Definition: gfp.h:12
#define cpu_reg(ctxt, r)
Definition: trap_handler.h:14