KVM
mmu.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
9 #include <linux/io.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_pgtable.h>
18 #include <asm/kvm_ras.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
21 #include <asm/virt.h>
22 
23 #include "trace.h"
24 
25 static struct kvm_pgtable *hyp_pgtable;
26 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
27 
28 static unsigned long __ro_after_init hyp_idmap_start;
29 static unsigned long __ro_after_init hyp_idmap_end;
30 static phys_addr_t __ro_after_init hyp_idmap_vector;
31 
32 static unsigned long __ro_after_init io_map_base;
33 
34 static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
35  phys_addr_t size)
36 {
37  phys_addr_t boundary = ALIGN_DOWN(addr + size, size);
38 
39  return (boundary - 1 < end - 1) ? boundary : end;
40 }
41 
42 static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
43 {
44  phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
45 
46  return __stage2_range_addr_end(addr, end, size);
47 }
48 
49 /*
50  * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
51  * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
52  * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
53  * long will also starve other vCPUs. We have to also make sure that the page
54  * tables are not freed while we released the lock.
55  */
56 static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr,
57  phys_addr_t end,
58  int (*fn)(struct kvm_pgtable *, u64, u64),
59  bool resched)
60 {
61  struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
62  int ret;
63  u64 next;
64 
65  do {
66  struct kvm_pgtable *pgt = mmu->pgt;
67  if (!pgt)
68  return -EINVAL;
69 
70  next = stage2_range_addr_end(addr, end);
71  ret = fn(pgt, addr, next - addr);
72  if (ret)
73  break;
74 
75  if (resched && next != end)
76  cond_resched_rwlock_write(&kvm->mmu_lock);
77  } while (addr = next, addr != end);
78 
79  return ret;
80 }
81 
82 #define stage2_apply_range_resched(mmu, addr, end, fn) \
83  stage2_apply_range(mmu, addr, end, fn, true)
84 
85 /*
86  * Get the maximum number of page-tables pages needed to split a range
87  * of blocks into PAGE_SIZE PTEs. It assumes the range is already
88  * mapped at level 2, or at level 1 if allowed.
89  */
90 static int kvm_mmu_split_nr_page_tables(u64 range)
91 {
92  int n = 0;
93 
94  if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2)
95  n += DIV_ROUND_UP(range, PUD_SIZE);
96  n += DIV_ROUND_UP(range, PMD_SIZE);
97  return n;
98 }
99 
100 static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
101 {
102  struct kvm_mmu_memory_cache *cache;
103  u64 chunk_size, min;
104 
105  if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
106  return true;
107 
108  chunk_size = kvm->arch.mmu.split_page_chunk_size;
109  min = kvm_mmu_split_nr_page_tables(chunk_size);
110  cache = &kvm->arch.mmu.split_page_cache;
111  return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
112 }
113 
114 static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
115  phys_addr_t end)
116 {
117  struct kvm_mmu_memory_cache *cache;
118  struct kvm_pgtable *pgt;
119  int ret, cache_capacity;
120  u64 next, chunk_size;
121 
122  lockdep_assert_held_write(&kvm->mmu_lock);
123 
124  chunk_size = kvm->arch.mmu.split_page_chunk_size;
125  cache_capacity = kvm_mmu_split_nr_page_tables(chunk_size);
126 
127  if (chunk_size == 0)
128  return 0;
129 
130  cache = &kvm->arch.mmu.split_page_cache;
131 
132  do {
134  write_unlock(&kvm->mmu_lock);
135  cond_resched();
136  /* Eager page splitting is best-effort. */
137  ret = __kvm_mmu_topup_memory_cache(cache,
138  cache_capacity,
139  cache_capacity);
140  write_lock(&kvm->mmu_lock);
141  if (ret)
142  break;
143  }
144 
145  pgt = kvm->arch.mmu.pgt;
146  if (!pgt)
147  return -EINVAL;
148 
149  next = __stage2_range_addr_end(addr, end, chunk_size);
150  ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
151  if (ret)
152  break;
153  } while (addr = next, addr != end);
154 
155  return ret;
156 }
157 
158 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
159 {
160  return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
161 }
162 
163 /**
164  * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8
165  * @kvm: pointer to kvm structure.
166  *
167  * Interface to HYP function to flush all VM TLB entries
168  */
169 int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
170 {
171  kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
172  return 0;
173 }
174 
176  gfn_t gfn, u64 nr_pages)
177 {
178  kvm_tlb_flush_vmid_range(&kvm->arch.mmu,
179  gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
180  return 0;
181 }
182 
183 static bool kvm_is_device_pfn(unsigned long pfn)
184 {
185  return !pfn_is_map_memory(pfn);
186 }
187 
188 static void *stage2_memcache_zalloc_page(void *arg)
189 {
190  struct kvm_mmu_memory_cache *mc = arg;
191  void *virt;
192 
193  /* Allocated with __GFP_ZERO, so no need to zero */
194  virt = kvm_mmu_memory_cache_alloc(mc);
195  if (virt)
196  kvm_account_pgtable_pages(virt, 1);
197  return virt;
198 }
199 
200 static void *kvm_host_zalloc_pages_exact(size_t size)
201 {
202  return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
203 }
204 
205 static void *kvm_s2_zalloc_pages_exact(size_t size)
206 {
207  void *virt = kvm_host_zalloc_pages_exact(size);
208 
209  if (virt)
210  kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT));
211  return virt;
212 }
213 
214 static void kvm_s2_free_pages_exact(void *virt, size_t size)
215 {
216  kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT));
217  free_pages_exact(virt, size);
218 }
219 
220 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
221 
222 static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
223 {
224  struct page *page = container_of(head, struct page, rcu_head);
225  void *pgtable = page_to_virt(page);
226  s8 level = page_private(page);
227 
229 }
230 
231 static void stage2_free_unlinked_table(void *addr, s8 level)
232 {
233  struct page *page = virt_to_page(addr);
234 
235  set_page_private(page, (unsigned long)level);
236  call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb);
237 }
238 
239 static void kvm_host_get_page(void *addr)
240 {
241  get_page(virt_to_page(addr));
242 }
243 
244 static void kvm_host_put_page(void *addr)
245 {
246  put_page(virt_to_page(addr));
247 }
248 
249 static void kvm_s2_put_page(void *addr)
250 {
251  struct page *p = virt_to_page(addr);
252  /* Dropping last refcount, the page will be freed */
253  if (page_count(p) == 1)
254  kvm_account_pgtable_pages(addr, -1);
255  put_page(p);
256 }
257 
258 static int kvm_host_page_count(void *addr)
259 {
260  return page_count(virt_to_page(addr));
261 }
262 
263 static phys_addr_t kvm_host_pa(void *addr)
264 {
265  return __pa(addr);
266 }
267 
268 static void *kvm_host_va(phys_addr_t phys)
269 {
270  return __va(phys);
271 }
272 
273 static void clean_dcache_guest_page(void *va, size_t size)
274 {
275  __clean_dcache_guest_page(va, size);
276 }
277 
278 static void invalidate_icache_guest_page(void *va, size_t size)
279 {
280  __invalidate_icache_guest_page(va, size);
281 }
282 
283 /*
284  * Unmapping vs dcache management:
285  *
286  * If a guest maps certain memory pages as uncached, all writes will
287  * bypass the data cache and go directly to RAM. However, the CPUs
288  * can still speculate reads (not writes) and fill cache lines with
289  * data.
290  *
291  * Those cache lines will be *clean* cache lines though, so a
292  * clean+invalidate operation is equivalent to an invalidate
293  * operation, because no cache lines are marked dirty.
294  *
295  * Those clean cache lines could be filled prior to an uncached write
296  * by the guest, and the cache coherent IO subsystem would therefore
297  * end up writing old data to disk.
298  *
299  * This is why right after unmapping a page/section and invalidating
300  * the corresponding TLBs, we flush to make sure the IO subsystem will
301  * never hit in the cache.
302  *
303  * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
304  * we then fully enforce cacheability of RAM, no matter what the guest
305  * does.
306  */
307 /**
308  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
309  * @mmu: The KVM stage-2 MMU pointer
310  * @start: The intermediate physical base address of the range to unmap
311  * @size: The size of the area to unmap
312  * @may_block: Whether or not we are permitted to block
313  *
314  * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
315  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
316  * destroying the VM), otherwise another faulting VCPU may come in and mess
317  * with things behind our backs.
318  */
319 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
320  bool may_block)
321 {
322  struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
323  phys_addr_t end = start + size;
324 
325  lockdep_assert_held_write(&kvm->mmu_lock);
326  WARN_ON(size & ~PAGE_MASK);
327  WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap,
328  may_block));
329 }
330 
331 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
332 {
333  __unmap_stage2_range(mmu, start, size, true);
334 }
335 
336 static void stage2_flush_memslot(struct kvm *kvm,
337  struct kvm_memory_slot *memslot)
338 {
339  phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340  phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
341 
343 }
344 
345 /**
346  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
347  * @kvm: The struct kvm pointer
348  *
349  * Go through the stage 2 page tables and invalidate any cache lines
350  * backing memory already mapped to the VM.
351  */
352 static void stage2_flush_vm(struct kvm *kvm)
353 {
354  struct kvm_memslots *slots;
355  struct kvm_memory_slot *memslot;
356  int idx, bkt;
357 
358  idx = srcu_read_lock(&kvm->srcu);
359  write_lock(&kvm->mmu_lock);
360 
361  slots = kvm_memslots(kvm);
362  kvm_for_each_memslot(memslot, bkt, slots)
363  stage2_flush_memslot(kvm, memslot);
364 
365  write_unlock(&kvm->mmu_lock);
366  srcu_read_unlock(&kvm->srcu, idx);
367 }
368 
369 /**
370  * free_hyp_pgds - free Hyp-mode page tables
371  */
372 void __init free_hyp_pgds(void)
373 {
374  mutex_lock(&kvm_hyp_pgd_mutex);
375  if (hyp_pgtable) {
377  kfree(hyp_pgtable);
378  hyp_pgtable = NULL;
379  }
380  mutex_unlock(&kvm_hyp_pgd_mutex);
381 }
382 
383 static bool kvm_host_owns_hyp_mappings(void)
384 {
385  if (is_kernel_in_hyp_mode())
386  return false;
387 
388  if (static_branch_likely(&kvm_protected_mode_initialized))
389  return false;
390 
391  /*
392  * This can happen at boot time when __create_hyp_mappings() is called
393  * after the hyp protection has been enabled, but the static key has
394  * not been flipped yet.
395  */
396  if (!hyp_pgtable && is_protected_kvm_enabled())
397  return false;
398 
399  WARN_ON(!hyp_pgtable);
400 
401  return true;
402 }
403 
404 int __create_hyp_mappings(unsigned long start, unsigned long size,
405  unsigned long phys, enum kvm_pgtable_prot prot)
406 {
407  int err;
408 
409  if (WARN_ON(!kvm_host_owns_hyp_mappings()))
410  return -EINVAL;
411 
412  mutex_lock(&kvm_hyp_pgd_mutex);
413  err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
414  mutex_unlock(&kvm_hyp_pgd_mutex);
415 
416  return err;
417 }
418 
419 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
420 {
421  if (!is_vmalloc_addr(kaddr)) {
422  BUG_ON(!virt_addr_valid(kaddr));
423  return __pa(kaddr);
424  } else {
425  return page_to_phys(vmalloc_to_page(kaddr)) +
426  offset_in_page(kaddr);
427  }
428 }
429 
431  u64 pfn;
432  int count;
433  struct rb_node node;
434 };
435 
436 static DEFINE_MUTEX(hyp_shared_pfns_lock);
437 static struct rb_root hyp_shared_pfns = RB_ROOT;
438 
439 static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
440  struct rb_node **parent)
441 {
442  struct hyp_shared_pfn *this;
443 
444  *node = &hyp_shared_pfns.rb_node;
445  *parent = NULL;
446  while (**node) {
447  this = container_of(**node, struct hyp_shared_pfn, node);
448  *parent = **node;
449  if (this->pfn < pfn)
450  *node = &((**node)->rb_left);
451  else if (this->pfn > pfn)
452  *node = &((**node)->rb_right);
453  else
454  return this;
455  }
456 
457  return NULL;
458 }
459 
460 static int share_pfn_hyp(u64 pfn)
461 {
462  struct rb_node **node, *parent;
463  struct hyp_shared_pfn *this;
464  int ret = 0;
465 
466  mutex_lock(&hyp_shared_pfns_lock);
467  this = find_shared_pfn(pfn, &node, &parent);
468  if (this) {
469  this->count++;
470  goto unlock;
471  }
472 
473  this = kzalloc(sizeof(*this), GFP_KERNEL);
474  if (!this) {
475  ret = -ENOMEM;
476  goto unlock;
477  }
478 
479  this->pfn = pfn;
480  this->count = 1;
481  rb_link_node(&this->node, parent, node);
482  rb_insert_color(&this->node, &hyp_shared_pfns);
483  ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
484 unlock:
485  mutex_unlock(&hyp_shared_pfns_lock);
486 
487  return ret;
488 }
489 
490 static int unshare_pfn_hyp(u64 pfn)
491 {
492  struct rb_node **node, *parent;
493  struct hyp_shared_pfn *this;
494  int ret = 0;
495 
496  mutex_lock(&hyp_shared_pfns_lock);
497  this = find_shared_pfn(pfn, &node, &parent);
498  if (WARN_ON(!this)) {
499  ret = -ENOENT;
500  goto unlock;
501  }
502 
503  this->count--;
504  if (this->count)
505  goto unlock;
506 
507  rb_erase(&this->node, &hyp_shared_pfns);
508  kfree(this);
509  ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
510 unlock:
511  mutex_unlock(&hyp_shared_pfns_lock);
512 
513  return ret;
514 }
515 
516 int kvm_share_hyp(void *from, void *to)
517 {
518  phys_addr_t start, end, cur;
519  u64 pfn;
520  int ret;
521 
522  if (is_kernel_in_hyp_mode())
523  return 0;
524 
525  /*
526  * The share hcall maps things in the 'fixed-offset' region of the hyp
527  * VA space, so we can only share physically contiguous data-structures
528  * for now.
529  */
530  if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
531  return -EINVAL;
532 
534  return create_hyp_mappings(from, to, PAGE_HYP);
535 
536  start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
537  end = PAGE_ALIGN(__pa(to));
538  for (cur = start; cur < end; cur += PAGE_SIZE) {
539  pfn = __phys_to_pfn(cur);
540  ret = share_pfn_hyp(pfn);
541  if (ret)
542  return ret;
543  }
544 
545  return 0;
546 }
547 
548 void kvm_unshare_hyp(void *from, void *to)
549 {
550  phys_addr_t start, end, cur;
551  u64 pfn;
552 
553  if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
554  return;
555 
556  start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
557  end = PAGE_ALIGN(__pa(to));
558  for (cur = start; cur < end; cur += PAGE_SIZE) {
559  pfn = __phys_to_pfn(cur);
560  WARN_ON(unshare_pfn_hyp(pfn));
561  }
562 }
563 
564 /**
565  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
566  * @from: The virtual kernel start address of the range
567  * @to: The virtual kernel end address of the range (exclusive)
568  * @prot: The protection to be applied to this range
569  *
570  * The same virtual address as the kernel virtual address is also used
571  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
572  * physical pages.
573  */
574 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
575 {
576  phys_addr_t phys_addr;
577  unsigned long virt_addr;
578  unsigned long start = kern_hyp_va((unsigned long)from);
579  unsigned long end = kern_hyp_va((unsigned long)to);
580 
581  if (is_kernel_in_hyp_mode())
582  return 0;
583 
585  return -EPERM;
586 
587  start = start & PAGE_MASK;
588  end = PAGE_ALIGN(end);
589 
590  for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
591  int err;
592 
593  phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
594  err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
595  prot);
596  if (err)
597  return err;
598  }
599 
600  return 0;
601 }
602 
603 static int __hyp_alloc_private_va_range(unsigned long base)
604 {
605  lockdep_assert_held(&kvm_hyp_pgd_mutex);
606 
607  if (!PAGE_ALIGNED(base))
608  return -EINVAL;
609 
610  /*
611  * Verify that BIT(VA_BITS - 1) hasn't been flipped by
612  * allocating the new area, as it would indicate we've
613  * overflowed the idmap/IO address range.
614  */
615  if ((base ^ io_map_base) & BIT(VA_BITS - 1))
616  return -ENOMEM;
617 
618  io_map_base = base;
619 
620  return 0;
621 }
622 
623 /**
624  * hyp_alloc_private_va_range - Allocates a private VA range.
625  * @size: The size of the VA range to reserve.
626  * @haddr: The hypervisor virtual start address of the allocation.
627  *
628  * The private virtual address (VA) range is allocated below io_map_base
629  * and aligned based on the order of @size.
630  *
631  * Return: 0 on success or negative error code on failure.
632  */
633 int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
634 {
635  unsigned long base;
636  int ret = 0;
637 
638  mutex_lock(&kvm_hyp_pgd_mutex);
639 
640  /*
641  * This assumes that we have enough space below the idmap
642  * page to allocate our VAs. If not, the check in
643  * __hyp_alloc_private_va_range() will kick. A potential
644  * alternative would be to detect that overflow and switch
645  * to an allocation above the idmap.
646  *
647  * The allocated size is always a multiple of PAGE_SIZE.
648  */
649  size = PAGE_ALIGN(size);
650  base = io_map_base - size;
652 
653  mutex_unlock(&kvm_hyp_pgd_mutex);
654 
655  if (!ret)
656  *haddr = base;
657 
658  return ret;
659 }
660 
661 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
662  unsigned long *haddr,
663  enum kvm_pgtable_prot prot)
664 {
665  unsigned long addr;
666  int ret = 0;
667 
669  addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
670  phys_addr, size, prot);
671  if (IS_ERR_VALUE(addr))
672  return addr;
673  *haddr = addr;
674 
675  return 0;
676  }
677 
678  size = PAGE_ALIGN(size + offset_in_page(phys_addr));
679  ret = hyp_alloc_private_va_range(size, &addr);
680  if (ret)
681  return ret;
682 
683  ret = __create_hyp_mappings(addr, size, phys_addr, prot);
684  if (ret)
685  return ret;
686 
687  *haddr = addr + offset_in_page(phys_addr);
688  return ret;
689 }
690 
691 int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
692 {
693  unsigned long base;
694  size_t size;
695  int ret;
696 
697  mutex_lock(&kvm_hyp_pgd_mutex);
698  /*
699  * Efficient stack verification using the PAGE_SHIFT bit implies
700  * an alignment of our allocation on the order of the size.
701  */
702  size = PAGE_SIZE * 2;
703  base = ALIGN_DOWN(io_map_base - size, size);
704 
706 
707  mutex_unlock(&kvm_hyp_pgd_mutex);
708 
709  if (ret) {
710  kvm_err("Cannot allocate hyp stack guard page\n");
711  return ret;
712  }
713 
714  /*
715  * Since the stack grows downwards, map the stack to the page
716  * at the higher address and leave the lower guard page
717  * unbacked.
718  *
719  * Any valid stack address now has the PAGE_SHIFT bit as 1
720  * and addresses corresponding to the guard page have the
721  * PAGE_SHIFT bit as 0 - this is used for overflow detection.
722  */
723  ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
724  PAGE_HYP);
725  if (ret)
726  kvm_err("Cannot map hyp stack\n");
727 
728  *haddr = base + size;
729 
730  return ret;
731 }
732 
733 /**
734  * create_hyp_io_mappings - Map IO into both kernel and HYP
735  * @phys_addr: The physical start address which gets mapped
736  * @size: Size of the region being mapped
737  * @kaddr: Kernel VA for this mapping
738  * @haddr: HYP VA for this mapping
739  */
740 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
741  void __iomem **kaddr,
742  void __iomem **haddr)
743 {
744  unsigned long addr;
745  int ret;
746 
747  if (is_protected_kvm_enabled())
748  return -EPERM;
749 
750  *kaddr = ioremap(phys_addr, size);
751  if (!*kaddr)
752  return -ENOMEM;
753 
754  if (is_kernel_in_hyp_mode()) {
755  *haddr = *kaddr;
756  return 0;
757  }
758 
759  ret = __create_hyp_private_mapping(phys_addr, size,
760  &addr, PAGE_HYP_DEVICE);
761  if (ret) {
762  iounmap(*kaddr);
763  *kaddr = NULL;
764  *haddr = NULL;
765  return ret;
766  }
767 
768  *haddr = (void __iomem *)addr;
769  return 0;
770 }
771 
772 /**
773  * create_hyp_exec_mappings - Map an executable range into HYP
774  * @phys_addr: The physical start address which gets mapped
775  * @size: Size of the region being mapped
776  * @haddr: HYP VA for this mapping
777  */
778 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
779  void **haddr)
780 {
781  unsigned long addr;
782  int ret;
783 
784  BUG_ON(is_kernel_in_hyp_mode());
785 
786  ret = __create_hyp_private_mapping(phys_addr, size,
787  &addr, PAGE_HYP_EXEC);
788  if (ret) {
789  *haddr = NULL;
790  return ret;
791  }
792 
793  *haddr = (void *)addr;
794  return 0;
795 }
796 
797 static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
798  /* We shouldn't need any other callback to walk the PT */
799  .phys_to_virt = kvm_host_va,
800 };
801 
802 static int get_user_mapping_size(struct kvm *kvm, u64 addr)
803 {
804  struct kvm_pgtable pgt = {
805  .pgd = (kvm_pteref_t)kvm->mm->pgd,
806  .ia_bits = vabits_actual,
807  .start_level = (KVM_PGTABLE_LAST_LEVEL -
808  CONFIG_PGTABLE_LEVELS + 1),
809  .mm_ops = &kvm_user_mm_ops,
810  };
811  unsigned long flags;
812  kvm_pte_t pte = 0; /* Keep GCC quiet... */
813  s8 level = S8_MAX;
814  int ret;
815 
816  /*
817  * Disable IRQs so that we hazard against a concurrent
818  * teardown of the userspace page tables (which relies on
819  * IPI-ing threads).
820  */
821  local_irq_save(flags);
822  ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
823  local_irq_restore(flags);
824 
825  if (ret)
826  return ret;
827 
828  /*
829  * Not seeing an error, but not updating level? Something went
830  * deeply wrong...
831  */
832  if (WARN_ON(level > KVM_PGTABLE_LAST_LEVEL))
833  return -EFAULT;
834  if (WARN_ON(level < KVM_PGTABLE_FIRST_LEVEL))
835  return -EFAULT;
836 
837  /* Oops, the userspace PTs are gone... Replay the fault */
838  if (!kvm_pte_valid(pte))
839  return -EAGAIN;
840 
841  return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
842 }
843 
844 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
845  .zalloc_page = stage2_memcache_zalloc_page,
846  .zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
847  .free_pages_exact = kvm_s2_free_pages_exact,
848  .free_unlinked_table = stage2_free_unlinked_table,
849  .get_page = kvm_host_get_page,
850  .put_page = kvm_s2_put_page,
851  .page_count = kvm_host_page_count,
852  .phys_to_virt = kvm_host_va,
853  .virt_to_phys = kvm_host_pa,
854  .dcache_clean_inval_poc = clean_dcache_guest_page,
855  .icache_inval_pou = invalidate_icache_guest_page,
856 };
857 
858 /**
859  * kvm_init_stage2_mmu - Initialise a S2 MMU structure
860  * @kvm: The pointer to the KVM structure
861  * @mmu: The pointer to the s2 MMU structure
862  * @type: The machine type of the virtual machine
863  *
864  * Allocates only the stage-2 HW PGD level table(s).
865  * Note we don't need locking here as this is only called when the VM is
866  * created, which can only be done once.
867  */
868 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
869 {
871  int cpu, err;
872  struct kvm_pgtable *pgt;
873  u64 mmfr0, mmfr1;
874  u32 phys_shift;
875 
876  if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
877  return -EINVAL;
878 
879  phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
880  if (is_protected_kvm_enabled()) {
881  phys_shift = kvm_ipa_limit;
882  } else if (phys_shift) {
883  if (phys_shift > kvm_ipa_limit ||
884  phys_shift < ARM64_MIN_PARANGE_BITS)
885  return -EINVAL;
886  } else {
887  phys_shift = KVM_PHYS_SHIFT;
888  if (phys_shift > kvm_ipa_limit) {
889  pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
890  current->comm);
891  return -EINVAL;
892  }
893  }
894 
895  mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
896  mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
897  mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
898 
899  if (mmu->pgt != NULL) {
900  kvm_err("kvm_arch already initialized?\n");
901  return -EINVAL;
902  }
903 
904  pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
905  if (!pgt)
906  return -ENOMEM;
907 
908  mmu->arch = &kvm->arch;
909  err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
910  if (err)
911  goto out_free_pgtable;
912 
913  mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
914  if (!mmu->last_vcpu_ran) {
915  err = -ENOMEM;
916  goto out_destroy_pgtable;
917  }
918 
919  for_each_possible_cpu(cpu)
920  *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
921 
922  /* The eager page splitting is disabled by default */
923  mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
924  mmu->split_page_cache.gfp_zero = __GFP_ZERO;
925 
926  mmu->pgt = pgt;
927  mmu->pgd_phys = __pa(pgt->pgd);
928  return 0;
929 
930 out_destroy_pgtable:
932 out_free_pgtable:
933  kfree(pgt);
934  return err;
935 }
936 
937 void kvm_uninit_stage2_mmu(struct kvm *kvm)
938 {
939  kvm_free_stage2_pgd(&kvm->arch.mmu);
940  kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
941 }
942 
943 static void stage2_unmap_memslot(struct kvm *kvm,
944  struct kvm_memory_slot *memslot)
945 {
946  hva_t hva = memslot->userspace_addr;
947  phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
948  phys_addr_t size = PAGE_SIZE * memslot->npages;
949  hva_t reg_end = hva + size;
950 
951  /*
952  * A memory region could potentially cover multiple VMAs, and any holes
953  * between them, so iterate over all of them to find out if we should
954  * unmap any of them.
955  *
956  * +--------------------------------------------+
957  * +---------------+----------------+ +----------------+
958  * | : VMA 1 | VMA 2 | | VMA 3 : |
959  * +---------------+----------------+ +----------------+
960  * | memory region |
961  * +--------------------------------------------+
962  */
963  do {
964  struct vm_area_struct *vma;
965  hva_t vm_start, vm_end;
966 
967  vma = find_vma_intersection(current->mm, hva, reg_end);
968  if (!vma)
969  break;
970 
971  /*
972  * Take the intersection of this VMA with the memory region
973  */
974  vm_start = max(hva, vma->vm_start);
975  vm_end = min(reg_end, vma->vm_end);
976 
977  if (!(vma->vm_flags & VM_PFNMAP)) {
978  gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
979  unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
980  }
981  hva = vm_end;
982  } while (hva < reg_end);
983 }
984 
985 /**
986  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
987  * @kvm: The struct kvm pointer
988  *
989  * Go through the memregions and unmap any regular RAM
990  * backing memory already mapped to the VM.
991  */
992 void stage2_unmap_vm(struct kvm *kvm)
993 {
994  struct kvm_memslots *slots;
995  struct kvm_memory_slot *memslot;
996  int idx, bkt;
997 
998  idx = srcu_read_lock(&kvm->srcu);
999  mmap_read_lock(current->mm);
1000  write_lock(&kvm->mmu_lock);
1001 
1002  slots = kvm_memslots(kvm);
1003  kvm_for_each_memslot(memslot, bkt, slots)
1004  stage2_unmap_memslot(kvm, memslot);
1005 
1006  write_unlock(&kvm->mmu_lock);
1007  mmap_read_unlock(current->mm);
1008  srcu_read_unlock(&kvm->srcu, idx);
1009 }
1010 
1011 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
1012 {
1013  struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
1014  struct kvm_pgtable *pgt = NULL;
1015 
1016  write_lock(&kvm->mmu_lock);
1017  pgt = mmu->pgt;
1018  if (pgt) {
1019  mmu->pgd_phys = 0;
1020  mmu->pgt = NULL;
1021  free_percpu(mmu->last_vcpu_ran);
1022  }
1023  write_unlock(&kvm->mmu_lock);
1024 
1025  if (pgt) {
1027  kfree(pgt);
1028  }
1029 }
1030 
1031 static void hyp_mc_free_fn(void *addr, void *unused)
1032 {
1033  free_page((unsigned long)addr);
1034 }
1035 
1036 static void *hyp_mc_alloc_fn(void *unused)
1037 {
1038  return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
1039 }
1040 
1041 void free_hyp_memcache(struct kvm_hyp_memcache *mc)
1042 {
1043  if (is_protected_kvm_enabled())
1044  __free_hyp_memcache(mc, hyp_mc_free_fn,
1045  kvm_host_va, NULL);
1046 }
1047 
1048 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
1049 {
1050  if (!is_protected_kvm_enabled())
1051  return 0;
1052 
1053  return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
1054  kvm_host_pa, NULL);
1055 }
1056 
1057 /**
1058  * kvm_phys_addr_ioremap - map a device range to guest IPA
1059  *
1060  * @kvm: The KVM pointer
1061  * @guest_ipa: The IPA at which to insert the mapping
1062  * @pa: The physical address of the device
1063  * @size: The size of the mapping
1064  * @writable: Whether or not to create a writable mapping
1065  */
1066 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1067  phys_addr_t pa, unsigned long size, bool writable)
1068 {
1069  phys_addr_t addr;
1070  int ret = 0;
1071  struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
1072  struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
1073  struct kvm_pgtable *pgt = mmu->pgt;
1074  enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
1075  KVM_PGTABLE_PROT_R |
1076  (writable ? KVM_PGTABLE_PROT_W : 0);
1077 
1078  if (is_protected_kvm_enabled())
1079  return -EPERM;
1080 
1081  size += offset_in_page(guest_ipa);
1082  guest_ipa &= PAGE_MASK;
1083 
1084  for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
1085  ret = kvm_mmu_topup_memory_cache(&cache,
1086  kvm_mmu_cache_min_pages(mmu));
1087  if (ret)
1088  break;
1089 
1090  write_lock(&kvm->mmu_lock);
1091  ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
1092  &cache, 0);
1093  write_unlock(&kvm->mmu_lock);
1094  if (ret)
1095  break;
1096 
1097  pa += PAGE_SIZE;
1098  }
1099 
1100  kvm_mmu_free_memory_cache(&cache);
1101  return ret;
1102 }
1103 
1104 /**
1105  * stage2_wp_range() - write protect stage2 memory region range
1106  * @mmu: The KVM stage-2 MMU pointer
1107  * @addr: Start address of range
1108  * @end: End address of range
1109  */
1110 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
1111 {
1113 }
1114 
1115 /**
1116  * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1117  * @kvm: The KVM pointer
1118  * @slot: The memory slot to write protect
1119  *
1120  * Called to start logging dirty pages after memory region
1121  * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1122  * all present PUD, PMD and PTEs are write protected in the memory region.
1123  * Afterwards read of dirty page log can be called.
1124  *
1125  * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1126  * serializing operations for VM memory regions.
1127  */
1128 static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1129 {
1130  struct kvm_memslots *slots = kvm_memslots(kvm);
1131  struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1132  phys_addr_t start, end;
1133 
1134  if (WARN_ON_ONCE(!memslot))
1135  return;
1136 
1137  start = memslot->base_gfn << PAGE_SHIFT;
1138  end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1139 
1140  write_lock(&kvm->mmu_lock);
1141  stage2_wp_range(&kvm->arch.mmu, start, end);
1142  write_unlock(&kvm->mmu_lock);
1143  kvm_flush_remote_tlbs_memslot(kvm, memslot);
1144 }
1145 
1146 /**
1147  * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
1148  * pages for memory slot
1149  * @kvm: The KVM pointer
1150  * @slot: The memory slot to split
1151  *
1152  * Acquires kvm->mmu_lock. Called with kvm->slots_lock mutex acquired,
1153  * serializing operations for VM memory regions.
1154  */
1155 static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
1156 {
1157  struct kvm_memslots *slots;
1158  struct kvm_memory_slot *memslot;
1159  phys_addr_t start, end;
1160 
1161  lockdep_assert_held(&kvm->slots_lock);
1162 
1163  slots = kvm_memslots(kvm);
1164  memslot = id_to_memslot(slots, slot);
1165 
1166  start = memslot->base_gfn << PAGE_SHIFT;
1167  end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1168 
1169  write_lock(&kvm->mmu_lock);
1170  kvm_mmu_split_huge_pages(kvm, start, end);
1171  write_unlock(&kvm->mmu_lock);
1172 }
1173 
1174 /*
1175  * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
1176  * @kvm: The KVM pointer
1177  * @slot: The memory slot associated with mask
1178  * @gfn_offset: The gfn offset in memory slot
1179  * @mask: The mask of pages at offset 'gfn_offset' in this memory
1180  * slot to enable dirty logging on
1181  *
1182  * Writes protect selected pages to enable dirty logging, and then
1183  * splits them to PAGE_SIZE. Caller must acquire kvm->mmu_lock.
1184  */
1186  struct kvm_memory_slot *slot,
1187  gfn_t gfn_offset, unsigned long mask)
1188 {
1189  phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1190  phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1191  phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1192 
1193  lockdep_assert_held_write(&kvm->mmu_lock);
1194 
1195  stage2_wp_range(&kvm->arch.mmu, start, end);
1196 
1197  /*
1198  * Eager-splitting is done when manual-protect is set. We
1199  * also check for initially-all-set because we can avoid
1200  * eager-splitting if initially-all-set is false.
1201  * Initially-all-set equal false implies that huge-pages were
1202  * already split when enabling dirty logging: no need to do it
1203  * again.
1204  */
1205  if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1206  kvm_mmu_split_huge_pages(kvm, start, end);
1207 }
1208 
1209 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1210 {
1211  send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1212 }
1213 
1214 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1215  unsigned long hva,
1216  unsigned long map_size)
1217 {
1218  gpa_t gpa_start;
1219  hva_t uaddr_start, uaddr_end;
1220  size_t size;
1221 
1222  /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1223  if (map_size == PAGE_SIZE)
1224  return true;
1225 
1226  size = memslot->npages * PAGE_SIZE;
1227 
1228  gpa_start = memslot->base_gfn << PAGE_SHIFT;
1229 
1230  uaddr_start = memslot->userspace_addr;
1231  uaddr_end = uaddr_start + size;
1232 
1233  /*
1234  * Pages belonging to memslots that don't have the same alignment
1235  * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1236  * PMD/PUD entries, because we'll end up mapping the wrong pages.
1237  *
1238  * Consider a layout like the following:
1239  *
1240  * memslot->userspace_addr:
1241  * +-----+--------------------+--------------------+---+
1242  * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1243  * +-----+--------------------+--------------------+---+
1244  *
1245  * memslot->base_gfn << PAGE_SHIFT:
1246  * +---+--------------------+--------------------+-----+
1247  * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1248  * +---+--------------------+--------------------+-----+
1249  *
1250  * If we create those stage-2 blocks, we'll end up with this incorrect
1251  * mapping:
1252  * d -> f
1253  * e -> g
1254  * f -> h
1255  */
1256  if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1257  return false;
1258 
1259  /*
1260  * Next, let's make sure we're not trying to map anything not covered
1261  * by the memslot. This means we have to prohibit block size mappings
1262  * for the beginning and end of a non-block aligned and non-block sized
1263  * memory slot (illustrated by the head and tail parts of the
1264  * userspace view above containing pages 'abcde' and 'xyz',
1265  * respectively).
1266  *
1267  * Note that it doesn't matter if we do the check using the
1268  * userspace_addr or the base_gfn, as both are equally aligned (per
1269  * the check above) and equally sized.
1270  */
1271  return (hva & ~(map_size - 1)) >= uaddr_start &&
1272  (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1273 }
1274 
1275 /*
1276  * Check if the given hva is backed by a transparent huge page (THP) and
1277  * whether it can be mapped using block mapping in stage2. If so, adjust
1278  * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
1279  * supported. This will need to be updated to support other THP sizes.
1280  *
1281  * Returns the size of the mapping.
1282  */
1283 static long
1284 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
1285  unsigned long hva, kvm_pfn_t *pfnp,
1286  phys_addr_t *ipap)
1287 {
1288  kvm_pfn_t pfn = *pfnp;
1289 
1290  /*
1291  * Make sure the adjustment is done only for THP pages. Also make
1292  * sure that the HVA and IPA are sufficiently aligned and that the
1293  * block map is contained within the memslot.
1294  */
1295  if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1296  int sz = get_user_mapping_size(kvm, hva);
1297 
1298  if (sz < 0)
1299  return sz;
1300 
1301  if (sz < PMD_SIZE)
1302  return PAGE_SIZE;
1303 
1304  *ipap &= PMD_MASK;
1305  pfn &= ~(PTRS_PER_PMD - 1);
1306  *pfnp = pfn;
1307 
1308  return PMD_SIZE;
1309  }
1310 
1311  /* Use page mapping if we cannot use block mapping. */
1312  return PAGE_SIZE;
1313 }
1314 
1315 static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
1316 {
1317  unsigned long pa;
1318 
1319  if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
1320  return huge_page_shift(hstate_vma(vma));
1321 
1322  if (!(vma->vm_flags & VM_PFNMAP))
1323  return PAGE_SHIFT;
1324 
1325  VM_BUG_ON(is_vm_hugetlb_page(vma));
1326 
1327  pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
1328 
1329 #ifndef __PAGETABLE_PMD_FOLDED
1330  if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
1331  ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
1332  ALIGN(hva, PUD_SIZE) <= vma->vm_end)
1333  return PUD_SHIFT;
1334 #endif
1335 
1336  if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1337  ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1338  ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1339  return PMD_SHIFT;
1340 
1341  return PAGE_SHIFT;
1342 }
1343 
1344 /*
1345  * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
1346  * able to see the page's tags and therefore they must be initialised first. If
1347  * PG_mte_tagged is set, tags have already been initialised.
1348  *
1349  * The race in the test/set of the PG_mte_tagged flag is handled by:
1350  * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
1351  * racing to santise the same page
1352  * - mmap_lock protects between a VM faulting a page in and the VMM performing
1353  * an mprotect() to add VM_MTE
1354  */
1355 static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
1356  unsigned long size)
1357 {
1358  unsigned long i, nr_pages = size >> PAGE_SHIFT;
1359  struct page *page = pfn_to_page(pfn);
1360 
1361  if (!kvm_has_mte(kvm))
1362  return;
1363 
1364  for (i = 0; i < nr_pages; i++, page++) {
1365  if (try_page_mte_tagging(page)) {
1366  mte_clear_page_tags(page_address(page));
1367  set_page_mte_tagged(page);
1368  }
1369  }
1370 }
1371 
1372 static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
1373 {
1374  return vma->vm_flags & VM_MTE_ALLOWED;
1375 }
1376 
1377 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1378  struct kvm_memory_slot *memslot, unsigned long hva,
1379  bool fault_is_perm)
1380 {
1381  int ret = 0;
1382  bool write_fault, writable, force_pte = false;
1383  bool exec_fault, mte_allowed;
1384  bool device = false;
1385  unsigned long mmu_seq;
1386  struct kvm *kvm = vcpu->kvm;
1387  struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1388  struct vm_area_struct *vma;
1389  short vma_shift;
1390  gfn_t gfn;
1391  kvm_pfn_t pfn;
1392  bool logging_active = memslot_is_logging(memslot);
1393  long vma_pagesize, fault_granule;
1394  enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
1395  struct kvm_pgtable *pgt;
1396 
1397  if (fault_is_perm)
1398  fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
1399  write_fault = kvm_is_write_fault(vcpu);
1400  exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
1401  VM_BUG_ON(write_fault && exec_fault);
1402 
1403  if (fault_is_perm && !write_fault && !exec_fault) {
1404  kvm_err("Unexpected L2 read permission error\n");
1405  return -EFAULT;
1406  }
1407 
1408  /*
1409  * Permission faults just need to update the existing leaf entry,
1410  * and so normally don't require allocations from the memcache. The
1411  * only exception to this is when dirty logging is enabled at runtime
1412  * and a write fault needs to collapse a block entry into a table.
1413  */
1414  if (!fault_is_perm || (logging_active && write_fault)) {
1415  ret = kvm_mmu_topup_memory_cache(memcache,
1416  kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
1417  if (ret)
1418  return ret;
1419  }
1420 
1421  /*
1422  * Let's check if we will get back a huge page backed by hugetlbfs, or
1423  * get block mapping for device MMIO region.
1424  */
1425  mmap_read_lock(current->mm);
1426  vma = vma_lookup(current->mm, hva);
1427  if (unlikely(!vma)) {
1428  kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1429  mmap_read_unlock(current->mm);
1430  return -EFAULT;
1431  }
1432 
1433  /*
1434  * logging_active is guaranteed to never be true for VM_PFNMAP
1435  * memslots.
1436  */
1437  if (logging_active) {
1438  force_pte = true;
1439  vma_shift = PAGE_SHIFT;
1440  } else {
1441  vma_shift = get_vma_page_shift(vma, hva);
1442  }
1443 
1444  switch (vma_shift) {
1445 #ifndef __PAGETABLE_PMD_FOLDED
1446  case PUD_SHIFT:
1447  if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1448  break;
1449  fallthrough;
1450 #endif
1451  case CONT_PMD_SHIFT:
1452  vma_shift = PMD_SHIFT;
1453  fallthrough;
1454  case PMD_SHIFT:
1455  if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1456  break;
1457  fallthrough;
1458  case CONT_PTE_SHIFT:
1459  vma_shift = PAGE_SHIFT;
1460  force_pte = true;
1461  fallthrough;
1462  case PAGE_SHIFT:
1463  break;
1464  default:
1465  WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
1466  }
1467 
1468  vma_pagesize = 1UL << vma_shift;
1469  if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
1470  fault_ipa &= ~(vma_pagesize - 1);
1471 
1472  gfn = fault_ipa >> PAGE_SHIFT;
1473  mte_allowed = kvm_vma_mte_allowed(vma);
1474 
1475  /* Don't use the VMA after the unlock -- it may have vanished */
1476  vma = NULL;
1477 
1478  /*
1479  * Read mmu_invalidate_seq so that KVM can detect if the results of
1480  * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
1481  * acquiring kvm->mmu_lock.
1482  *
1483  * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
1484  * with the smp_wmb() in kvm_mmu_invalidate_end().
1485  */
1486  mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1487  mmap_read_unlock(current->mm);
1488 
1489  pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
1490  write_fault, &writable, NULL);
1491  if (pfn == KVM_PFN_ERR_HWPOISON) {
1492  kvm_send_hwpoison_signal(hva, vma_shift);
1493  return 0;
1494  }
1495  if (is_error_noslot_pfn(pfn))
1496  return -EFAULT;
1497 
1498  if (kvm_is_device_pfn(pfn)) {
1499  /*
1500  * If the page was identified as device early by looking at
1501  * the VMA flags, vma_pagesize is already representing the
1502  * largest quantity we can map. If instead it was mapped
1503  * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
1504  * and must not be upgraded.
1505  *
1506  * In both cases, we don't let transparent_hugepage_adjust()
1507  * change things at the last minute.
1508  */
1509  device = true;
1510  } else if (logging_active && !write_fault) {
1511  /*
1512  * Only actually map the page as writable if this was a write
1513  * fault.
1514  */
1515  writable = false;
1516  }
1517 
1518  if (exec_fault && device)
1519  return -ENOEXEC;
1520 
1521  read_lock(&kvm->mmu_lock);
1522  pgt = vcpu->arch.hw_mmu->pgt;
1523  if (mmu_invalidate_retry(kvm, mmu_seq))
1524  goto out_unlock;
1525 
1526  /*
1527  * If we are not forced to use page mapping, check if we are
1528  * backed by a THP and thus use block mapping if possible.
1529  */
1530  if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
1531  if (fault_is_perm && fault_granule > PAGE_SIZE)
1532  vma_pagesize = fault_granule;
1533  else
1534  vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
1535  hva, &pfn,
1536  &fault_ipa);
1537 
1538  if (vma_pagesize < 0) {
1539  ret = vma_pagesize;
1540  goto out_unlock;
1541  }
1542  }
1543 
1544  if (!fault_is_perm && !device && kvm_has_mte(kvm)) {
1545  /* Check the VMM hasn't introduced a new disallowed VMA */
1546  if (mte_allowed) {
1547  sanitise_mte_tags(kvm, pfn, vma_pagesize);
1548  } else {
1549  ret = -EFAULT;
1550  goto out_unlock;
1551  }
1552  }
1553 
1554  if (writable)
1555  prot |= KVM_PGTABLE_PROT_W;
1556 
1557  if (exec_fault)
1558  prot |= KVM_PGTABLE_PROT_X;
1559 
1560  if (device)
1561  prot |= KVM_PGTABLE_PROT_DEVICE;
1562  else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
1563  prot |= KVM_PGTABLE_PROT_X;
1564 
1565  /*
1566  * Under the premise of getting a FSC_PERM fault, we just need to relax
1567  * permissions only if vma_pagesize equals fault_granule. Otherwise,
1568  * kvm_pgtable_stage2_map() should be called to change block size.
1569  */
1570  if (fault_is_perm && vma_pagesize == fault_granule)
1571  ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1572  else
1573  ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
1574  __pfn_to_phys(pfn), prot,
1575  memcache,
1576  KVM_PGTABLE_WALK_HANDLE_FAULT |
1577  KVM_PGTABLE_WALK_SHARED);
1578 
1579  /* Mark the page dirty only if the fault is handled successfully */
1580  if (writable && !ret) {
1581  kvm_set_pfn_dirty(pfn);
1582  mark_page_dirty_in_slot(kvm, memslot, gfn);
1583  }
1584 
1585 out_unlock:
1586  read_unlock(&kvm->mmu_lock);
1587  kvm_release_pfn_clean(pfn);
1588  return ret != -EAGAIN ? ret : 0;
1589 }
1590 
1591 /* Resolve the access fault by making the page young again. */
1592 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1593 {
1594  kvm_pte_t pte;
1595  struct kvm_s2_mmu *mmu;
1596 
1597  trace_kvm_access_fault(fault_ipa);
1598 
1599  read_lock(&vcpu->kvm->mmu_lock);
1600  mmu = vcpu->arch.hw_mmu;
1601  pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1602  read_unlock(&vcpu->kvm->mmu_lock);
1603 
1604  if (kvm_pte_valid(pte))
1605  kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));
1606 }
1607 
1608 /**
1609  * kvm_handle_guest_abort - handles all 2nd stage aborts
1610  * @vcpu: the VCPU pointer
1611  *
1612  * Any abort that gets to the host is almost guaranteed to be caused by a
1613  * missing second stage translation table entry, which can mean that either the
1614  * guest simply needs more memory and we must allocate an appropriate page or it
1615  * can mean that the guest tried to access I/O memory, which is emulated by user
1616  * space. The distinction is based on the IPA causing the fault and whether this
1617  * memory region has been registered as standard RAM by user space.
1618  */
1619 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1620 {
1621  unsigned long esr;
1622  phys_addr_t fault_ipa;
1623  struct kvm_memory_slot *memslot;
1624  unsigned long hva;
1625  bool is_iabt, write_fault, writable;
1626  gfn_t gfn;
1627  int ret, idx;
1628 
1629  esr = kvm_vcpu_get_esr(vcpu);
1630 
1631  fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1632  is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1633 
1634  if (esr_fsc_is_translation_fault(esr)) {
1635  /* Beyond sanitised PARange (which is the IPA limit) */
1636  if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
1637  kvm_inject_size_fault(vcpu);
1638  return 1;
1639  }
1640 
1641  /* Falls between the IPA range and the PARange? */
1642  if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
1643  fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
1644 
1645  if (is_iabt)
1646  kvm_inject_pabt(vcpu, fault_ipa);
1647  else
1648  kvm_inject_dabt(vcpu, fault_ipa);
1649  return 1;
1650  }
1651  }
1652 
1653  /* Synchronous External Abort? */
1654  if (kvm_vcpu_abt_issea(vcpu)) {
1655  /*
1656  * For RAS the host kernel may handle this abort.
1657  * There is no need to pass the error into the guest.
1658  */
1659  if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1660  kvm_inject_vabt(vcpu);
1661 
1662  return 1;
1663  }
1664 
1665  trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1666  kvm_vcpu_get_hfar(vcpu), fault_ipa);
1667 
1668  /* Check the stage-2 fault is trans. fault or write fault */
1669  if (!esr_fsc_is_translation_fault(esr) &&
1670  !esr_fsc_is_permission_fault(esr) &&
1671  !esr_fsc_is_access_flag_fault(esr)) {
1672  kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1673  kvm_vcpu_trap_get_class(vcpu),
1674  (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1675  (unsigned long)kvm_vcpu_get_esr(vcpu));
1676  return -EFAULT;
1677  }
1678 
1679  idx = srcu_read_lock(&vcpu->kvm->srcu);
1680 
1681  gfn = fault_ipa >> PAGE_SHIFT;
1682  memslot = gfn_to_memslot(vcpu->kvm, gfn);
1683  hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1684  write_fault = kvm_is_write_fault(vcpu);
1685  if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1686  /*
1687  * The guest has put either its instructions or its page-tables
1688  * somewhere it shouldn't have. Userspace won't be able to do
1689  * anything about this (there's no syndrome for a start), so
1690  * re-inject the abort back into the guest.
1691  */
1692  if (is_iabt) {
1693  ret = -ENOEXEC;
1694  goto out;
1695  }
1696 
1697  if (kvm_vcpu_abt_iss1tw(vcpu)) {
1698  kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1699  ret = 1;
1700  goto out_unlock;
1701  }
1702 
1703  /*
1704  * Check for a cache maintenance operation. Since we
1705  * ended-up here, we know it is outside of any memory
1706  * slot. But we can't find out if that is for a device,
1707  * or if the guest is just being stupid. The only thing
1708  * we know for sure is that this range cannot be cached.
1709  *
1710  * So let's assume that the guest is just being
1711  * cautious, and skip the instruction.
1712  */
1713  if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1714  kvm_incr_pc(vcpu);
1715  ret = 1;
1716  goto out_unlock;
1717  }
1718 
1719  /*
1720  * The IPA is reported as [MAX:12], so we need to
1721  * complement it with the bottom 12 bits from the
1722  * faulting VA. This is always 12 bits, irrespective
1723  * of the page size.
1724  */
1725  fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1726  ret = io_mem_abort(vcpu, fault_ipa);
1727  goto out_unlock;
1728  }
1729 
1730  /* Userspace should not be able to register out-of-bounds IPAs */
1731  VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
1732 
1733  if (esr_fsc_is_access_flag_fault(esr)) {
1734  handle_access_fault(vcpu, fault_ipa);
1735  ret = 1;
1736  goto out_unlock;
1737  }
1738 
1739  ret = user_mem_abort(vcpu, fault_ipa, memslot, hva,
1740  esr_fsc_is_permission_fault(esr));
1741  if (ret == 0)
1742  ret = 1;
1743 out:
1744  if (ret == -ENOEXEC) {
1745  kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1746  ret = 1;
1747  }
1748 out_unlock:
1749  srcu_read_unlock(&vcpu->kvm->srcu, idx);
1750  return ret;
1751 }
1752 
1753 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1754 {
1755  if (!kvm->arch.mmu.pgt)
1756  return false;
1757 
1758  __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
1759  (range->end - range->start) << PAGE_SHIFT,
1760  range->may_block);
1761 
1762  return false;
1763 }
1764 
1765 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1766 {
1767  kvm_pfn_t pfn = pte_pfn(range->arg.pte);
1768 
1769  if (!kvm->arch.mmu.pgt)
1770  return false;
1771 
1772  WARN_ON(range->end - range->start != 1);
1773 
1774  /*
1775  * If the page isn't tagged, defer to user_mem_abort() for sanitising
1776  * the MTE tags. The S2 pte should have been unmapped by
1777  * mmu_notifier_invalidate_range_end().
1778  */
1779  if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
1780  return false;
1781 
1782  /*
1783  * We've moved a page around, probably through CoW, so let's treat
1784  * it just like a translation fault and the map handler will clean
1785  * the cache to the PoC.
1786  *
1787  * The MMU notifiers will have unmapped a huge PMD before calling
1788  * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
1789  * therefore we never need to clear out a huge PMD through this
1790  * calling path and a memcache is not required.
1791  */
1792  kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
1793  PAGE_SIZE, __pfn_to_phys(pfn),
1794  KVM_PGTABLE_PROT_R, NULL, 0);
1795 
1796  return false;
1797 }
1798 
1799 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1800 {
1801  u64 size = (range->end - range->start) << PAGE_SHIFT;
1802 
1803  if (!kvm->arch.mmu.pgt)
1804  return false;
1805 
1806  return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1807  range->start << PAGE_SHIFT,
1808  size, true);
1809 }
1810 
1811 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1812 {
1813  u64 size = (range->end - range->start) << PAGE_SHIFT;
1814 
1815  if (!kvm->arch.mmu.pgt)
1816  return false;
1817 
1818  return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1819  range->start << PAGE_SHIFT,
1820  size, false);
1821 }
1822 
1823 phys_addr_t kvm_mmu_get_httbr(void)
1824 {
1825  return __pa(hyp_pgtable->pgd);
1826 }
1827 
1828 phys_addr_t kvm_get_idmap_vector(void)
1829 {
1830  return hyp_idmap_vector;
1831 }
1832 
1833 static int kvm_map_idmap_text(void)
1834 {
1835  unsigned long size = hyp_idmap_end - hyp_idmap_start;
1837  PAGE_HYP_EXEC);
1838  if (err)
1839  kvm_err("Failed to idmap %lx-%lx\n",
1841 
1842  return err;
1843 }
1844 
1845 static void *kvm_hyp_zalloc_page(void *arg)
1846 {
1847  return (void *)get_zeroed_page(GFP_KERNEL);
1848 }
1849 
1850 static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
1851  .zalloc_page = kvm_hyp_zalloc_page,
1852  .get_page = kvm_host_get_page,
1853  .put_page = kvm_host_put_page,
1854  .phys_to_virt = kvm_host_va,
1855  .virt_to_phys = kvm_host_pa,
1856 };
1857 
1858 int __init kvm_mmu_init(u32 *hyp_va_bits)
1859 {
1860  int err;
1861  u32 idmap_bits;
1862  u32 kernel_bits;
1863 
1864  hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
1865  hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
1866  hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
1867  hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
1868  hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
1869 
1870  /*
1871  * We rely on the linker script to ensure at build time that the HYP
1872  * init code does not cross a page boundary.
1873  */
1874  BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1875 
1876  /*
1877  * The ID map may be configured to use an extended virtual address
1878  * range. This is only the case if system RAM is out of range for the
1879  * currently configured page size and VA_BITS_MIN, in which case we will
1880  * also need the extended virtual range for the HYP ID map, or we won't
1881  * be able to enable the EL2 MMU.
1882  *
1883  * However, in some cases the ID map may be configured for fewer than
1884  * the number of VA bits used by the regular kernel stage 1. This
1885  * happens when VA_BITS=52 and the kernel image is placed in PA space
1886  * below 48 bits.
1887  *
1888  * At EL2, there is only one TTBR register, and we can't switch between
1889  * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
1890  * line: we need to use the extended range with *both* our translation
1891  * tables.
1892  *
1893  * So use the maximum of the idmap VA bits and the regular kernel stage
1894  * 1 VA bits to assure that the hypervisor can both ID map its code page
1895  * and map any kernel memory.
1896  */
1897  idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1898  kernel_bits = vabits_actual;
1899  *hyp_va_bits = max(idmap_bits, kernel_bits);
1900 
1901  kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
1902  kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1903  kvm_debug("HYP VA range: %lx:%lx\n",
1904  kern_hyp_va(PAGE_OFFSET),
1905  kern_hyp_va((unsigned long)high_memory - 1));
1906 
1907  if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1908  hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
1909  hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1910  /*
1911  * The idmap page is intersecting with the VA space,
1912  * it is not safe to continue further.
1913  */
1914  kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1915  err = -EINVAL;
1916  goto out;
1917  }
1918 
1919  hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
1920  if (!hyp_pgtable) {
1921  kvm_err("Hyp mode page-table not allocated\n");
1922  err = -ENOMEM;
1923  goto out;
1924  }
1925 
1926  err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
1927  if (err)
1928  goto out_free_pgtable;
1929 
1930  err = kvm_map_idmap_text();
1931  if (err)
1932  goto out_destroy_pgtable;
1933 
1935  return 0;
1936 
1937 out_destroy_pgtable:
1939 out_free_pgtable:
1940  kfree(hyp_pgtable);
1941  hyp_pgtable = NULL;
1942 out:
1943  return err;
1944 }
1945 
1946 void kvm_arch_commit_memory_region(struct kvm *kvm,
1947  struct kvm_memory_slot *old,
1948  const struct kvm_memory_slot *new,
1949  enum kvm_mr_change change)
1950 {
1951  bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES;
1952 
1953  /*
1954  * At this point memslot has been committed and there is an
1955  * allocated dirty_bitmap[], dirty pages will be tracked while the
1956  * memory slot is write protected.
1957  */
1958  if (log_dirty_pages) {
1959 
1960  if (change == KVM_MR_DELETE)
1961  return;
1962 
1963  /*
1964  * Huge and normal pages are write-protected and split
1965  * on either of these two cases:
1966  *
1967  * 1. with initial-all-set: gradually with CLEAR ioctls,
1968  */
1969  if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1970  return;
1971  /*
1972  * or
1973  * 2. without initial-all-set: all in one shot when
1974  * enabling dirty logging.
1975  */
1976  kvm_mmu_wp_memory_region(kvm, new->id);
1977  kvm_mmu_split_memory_region(kvm, new->id);
1978  } else {
1979  /*
1980  * Free any leftovers from the eager page splitting cache. Do
1981  * this when deleting, moving, disabling dirty logging, or
1982  * creating the memslot (a nop). Doing it for deletes makes
1983  * sure we don't leak memory, and there's no need to keep the
1984  * cache around for any of the other cases.
1985  */
1986  kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
1987  }
1988 }
1989 
1991  const struct kvm_memory_slot *old,
1992  struct kvm_memory_slot *new,
1993  enum kvm_mr_change change)
1994 {
1995  hva_t hva, reg_end;
1996  int ret = 0;
1997 
1998  if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1999  change != KVM_MR_FLAGS_ONLY)
2000  return 0;
2001 
2002  /*
2003  * Prevent userspace from creating a memory region outside of the IPA
2004  * space addressable by the KVM guest IPA space.
2005  */
2006  if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT))
2007  return -EFAULT;
2008 
2009  hva = new->userspace_addr;
2010  reg_end = hva + (new->npages << PAGE_SHIFT);
2011 
2012  mmap_read_lock(current->mm);
2013  /*
2014  * A memory region could potentially cover multiple VMAs, and any holes
2015  * between them, so iterate over all of them.
2016  *
2017  * +--------------------------------------------+
2018  * +---------------+----------------+ +----------------+
2019  * | : VMA 1 | VMA 2 | | VMA 3 : |
2020  * +---------------+----------------+ +----------------+
2021  * | memory region |
2022  * +--------------------------------------------+
2023  */
2024  do {
2025  struct vm_area_struct *vma;
2026 
2027  vma = find_vma_intersection(current->mm, hva, reg_end);
2028  if (!vma)
2029  break;
2030 
2031  if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) {
2032  ret = -EINVAL;
2033  break;
2034  }
2035 
2036  if (vma->vm_flags & VM_PFNMAP) {
2037  /* IO region dirty page logging not allowed */
2038  if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2039  ret = -EINVAL;
2040  break;
2041  }
2042  }
2043  hva = min(reg_end, vma->vm_end);
2044  } while (hva < reg_end);
2045 
2046  mmap_read_unlock(current->mm);
2047  return ret;
2048 }
2049 
2050 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
2051 {
2052 }
2053 
2054 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2055 {
2056 }
2057 
2058 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2059 {
2060  kvm_uninit_stage2_mmu(kvm);
2061 }
2062 
2063 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2064  struct kvm_memory_slot *slot)
2065 {
2066  gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2067  phys_addr_t size = slot->npages << PAGE_SHIFT;
2068 
2069  write_lock(&kvm->mmu_lock);
2070  unmap_stage2_range(&kvm->arch.mmu, gpa, size);
2071  write_unlock(&kvm->mmu_lock);
2072 }
2073 
2074 /*
2075  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2076  *
2077  * Main problems:
2078  * - S/W ops are local to a CPU (not broadcast)
2079  * - We have line migration behind our back (speculation)
2080  * - System caches don't support S/W at all (damn!)
2081  *
2082  * In the face of the above, the best we can do is to try and convert
2083  * S/W ops to VA ops. Because the guest is not allowed to infer the
2084  * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2085  * which is a rather good thing for us.
2086  *
2087  * Also, it is only used when turning caches on/off ("The expected
2088  * usage of the cache maintenance instructions that operate by set/way
2089  * is associated with the cache maintenance instructions associated
2090  * with the powerdown and powerup of caches, if this is required by
2091  * the implementation.").
2092  *
2093  * We use the following policy:
2094  *
2095  * - If we trap a S/W operation, we enable VM trapping to detect
2096  * caches being turned on/off, and do a full clean.
2097  *
2098  * - We flush the caches on both caches being turned on and off.
2099  *
2100  * - Once the caches are enabled, we stop trapping VM ops.
2101  */
2102 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2103 {
2104  unsigned long hcr = *vcpu_hcr(vcpu);
2105 
2106  /*
2107  * If this is the first time we do a S/W operation
2108  * (i.e. HCR_TVM not set) flush the whole memory, and set the
2109  * VM trapping.
2110  *
2111  * Otherwise, rely on the VM trapping to wait for the MMU +
2112  * Caches to be turned off. At that point, we'll be able to
2113  * clean the caches again.
2114  */
2115  if (!(hcr & HCR_TVM)) {
2116  trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2117  vcpu_has_cache_enabled(vcpu));
2118  stage2_flush_vm(vcpu->kvm);
2119  *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2120  }
2121 }
2122 
2123 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2124 {
2125  bool now_enabled = vcpu_has_cache_enabled(vcpu);
2126 
2127  /*
2128  * If switching the MMU+caches on, need to invalidate the caches.
2129  * If switching it off, need to clean the caches.
2130  * Clean + invalidate does the trick always.
2131  */
2132  if (now_enabled != was_enabled)
2133  stage2_flush_vm(vcpu->kvm);
2134 
2135  /* Caches are now on, stop trapping VM ops (until a S/W op) */
2136  if (now_enabled)
2137  *vcpu_hcr(vcpu) &= ~HCR_TVM;
2138 
2139  trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2140 }
static unsigned long base
Definition: early_alloc.c:15
static unsigned long cur
Definition: early_alloc.c:17
static unsigned long end
Definition: early_alloc.c:16
size_t size
Definition: gen-hyprel.c:133
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:251
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
Definition: inject_fault.c:166
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:190
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
Definition: inject_fault.c:182
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
Definition: kvm_main.c:3285
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva)
Definition: kvm_main.c:3031
struct kvm_memory_slot * gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
Definition: kvm_main.c:2630
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot)
Definition: kvm_main.c:380
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
Definition: kvm_main.c:3295
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable)
Definition: kvm_main.c:2762
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
Definition: kvm_main.c:3635
void kvm_release_pfn_clean(kvm_pfn_t pfn)
Definition: kvm_main.c:3241
int __pkvm_host_unshare_hyp(u64 pfn)
Definition: mem_protect.c:1119
int __pkvm_host_share_hyp(u64 pfn)
Definition: mem_protect.c:1086
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot, unsigned long *haddr)
Definition: mm.c:93
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
Definition: mmio.c:123
static void * stage2_memcache_zalloc_page(void *arg)
Definition: mmu.c:188
phys_addr_t kvm_mmu_get_httbr(void)
Definition: mmu.c:1823
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
Definition: mmu.c:2102
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
Definition: mmu.c:1185
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
Definition: mmu.c:1048
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void **haddr)
Definition: mmu.c:778
int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change)
Definition: mmu.c:1990
static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
Definition: mmu.c:1155
int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
Definition: mmu.c:633
int kvm_share_hyp(void *from, void *to)
Definition: mmu.c:516
static bool kvm_is_device_pfn(unsigned long pfn)
Definition: mmu.c:183
static int kvm_map_idmap_text(void)
Definition: mmu.c:1833
static int kvm_host_page_count(void *addr)
Definition: mmu.c:258
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
Definition: mmu.c:175
static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
Definition: mmu.c:1315
static void * kvm_host_va(phys_addr_t phys)
Definition: mmu.c:268
static unsigned long __ro_after_init hyp_idmap_start
Definition: mmu.c:28
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
Definition: mmu.c:1619
phys_addr_t kvm_get_idmap_vector(void)
Definition: mmu.c:1828
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
Definition: mmu.c:574
static void stage2_unmap_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Definition: mmu.c:943
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Definition: mmu.c:1799
static void stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Definition: mmu.c:336
static DEFINE_MUTEX(kvm_hyp_pgd_mutex)
#define stage2_apply_range_resched(mmu, addr, end, fn)
Definition: mmu.c:82
static int __hyp_alloc_private_va_range(unsigned long base)
Definition: mmu.c:603
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm)
Definition: mmu.c:1377
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
Definition: mmu.c:691
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
Definition: mmu.c:331
static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, unsigned long size)
Definition: mmu.c:1355
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
Definition: mmu.c:1110
static struct rb_root hyp_shared_pfns
Definition: mmu.c:437
int __create_hyp_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot)
Definition: mmu.c:404
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, bool may_block)
Definition: mmu.c:319
static struct kvm_pgtable * hyp_pgtable
Definition: mmu.c:25
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
Definition: mmu.c:2063
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
Definition: mmu.c:2050
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
Definition: mmu.c:419
static phys_addr_t __ro_after_init hyp_idmap_vector
Definition: mmu.c:30
static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
Definition: mmu.c:1128
static int kvm_mmu_split_nr_page_tables(u64 range)
Definition: mmu.c:90
static phys_addr_t kvm_host_pa(void *addr)
Definition: mmu.c:263
static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops
Definition: mmu.c:1850
static long transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap)
Definition: mmu.c:1284
void kvm_arch_flush_shadow_all(struct kvm *kvm)
Definition: mmu.c:2058
static void kvm_host_get_page(void *addr)
Definition: mmu.c:239
static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
Definition: mmu.c:42
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Definition: mmu.c:1811
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
Definition: mmu.c:1011
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops
Definition: mmu.c:220
static void clean_dcache_guest_page(void *va, size_t size)
Definition: mmu.c:273
static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, phys_addr_t size)
Definition: mmu.c:34
int __init kvm_mmu_init(u32 *hyp_va_bits)
Definition: mmu.c:1858
static unsigned long __ro_after_init io_map_base
Definition: mmu.c:32
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Definition: mmu.c:1765
static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end, int(*fn)(struct kvm_pgtable *, u64, u64), bool resched)
Definition: mmu.c:56
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
Definition: mmu.c:2123
static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
Definition: mmu.c:222
static void * hyp_mc_alloc_fn(void *unused)
Definition: mmu.c:1036
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
Definition: mmu.c:1753
static int share_pfn_hyp(u64 pfn)
Definition: mmu.c:460
static void * kvm_s2_zalloc_pages_exact(size_t size)
Definition: mmu.c:205
static void invalidate_icache_guest_page(void *va, size_t size)
Definition: mmu.c:278
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
Definition: mmu.c:1209
static void * kvm_hyp_zalloc_page(void *arg)
Definition: mmu.c:1845
static int unshare_pfn_hyp(u64 pfn)
Definition: mmu.c:490
static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size)
Definition: mmu.c:1214
void kvm_unshare_hyp(void *from, void *to)
Definition: mmu.c:548
static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
Definition: mmu.c:100
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
Definition: mmu.c:1592
void stage2_unmap_vm(struct kvm *kvm)
Definition: mmu.c:992
static void kvm_s2_put_page(void *addr)
Definition: mmu.c:249
static void hyp_mc_free_fn(void *addr, void *unused)
Definition: mmu.c:1031
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable)
Definition: mmu.c:1066
static void kvm_s2_free_pages_exact(void *virt, size_t size)
Definition: mmu.c:214
static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
Definition: mmu.c:1372
void kvm_uninit_stage2_mmu(struct kvm *kvm)
Definition: mmu.c:937
void __init free_hyp_pgds(void)
Definition: mmu.c:372
static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
Definition: mmu.c:114
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
Definition: mmu.c:158
static void * kvm_host_zalloc_pages_exact(size_t size)
Definition: mmu.c:200
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
Definition: mmu.c:2054
static struct hyp_shared_pfn * find_shared_pfn(u64 pfn, struct rb_node ***node, struct rb_node **parent)
Definition: mmu.c:439
static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, unsigned long *haddr, enum kvm_pgtable_prot prot)
Definition: mmu.c:661
static void stage2_flush_vm(struct kvm *kvm)
Definition: mmu.c:352
static void stage2_free_unlinked_table(void *addr, s8 level)
Definition: mmu.c:231
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
Definition: mmu.c:1041
int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
Definition: mmu.c:169
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, void __iomem **haddr)
Definition: mmu.c:740
static bool kvm_host_owns_hyp_mappings(void)
Definition: mmu.c:383
static struct kvm_pgtable_mm_ops kvm_user_mm_ops
Definition: mmu.c:797
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
Definition: mmu.c:868
static unsigned long __ro_after_init hyp_idmap_end
Definition: mmu.c:29
void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change)
Definition: mmu.c:1946
static void kvm_host_put_page(void *addr)
Definition: mmu.c:244
static int get_user_mapping_size(struct kvm *kvm, u64 addr)
Definition: mmu.c:802
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
Definition: tlb.c:168
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot)
Definition: pgtable.c:1322
int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:1364
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
Definition: pgtable.c:1257
int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:1250
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size)
Definition: pgtable.c:695
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
Definition: pgtable.c:634
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
Definition: pgtable.c:1601
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:568
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
Definition: pgtable.c:1160
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags)
Definition: pgtable.c:1061
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
Definition: pgtable.c:1586
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot)
Definition: pgtable.c:489
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc)
Definition: pgtable.c:1521
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold)
Definition: pgtable.c:1306
void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
Definition: pgtable.c:607
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level)
Definition: pgtable.c:361
u32 get_kvm_ipa_limit(void)
Definition: reset.c:269
static u32 __ro_after_init kvm_ipa_limit
Definition: reset.c:34
int count
Definition: mmu.c:432
u64 pfn
Definition: mmu.c:431
struct rb_node node
Definition: mmu.c:433
struct vgic_global kvm_vgic_global_state __ro_after_init
Definition: vgic.c:20