7 #include <linux/mman.h>
8 #include <linux/kvm_host.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sched/signal.h>
12 #include <trace/events/kvm.h>
13 #include <asm/pgalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_pgtable.h>
18 #include <asm/kvm_ras.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
37 phys_addr_t boundary = ALIGN_DOWN(addr +
size,
size);
39 return (boundary - 1 <
end - 1) ? boundary :
end;
44 phys_addr_t
size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL);
58 int (*fn)(
struct kvm_pgtable *, u64, u64),
61 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
66 struct kvm_pgtable *pgt = mmu->pgt;
71 ret = fn(pgt, addr, next - addr);
75 if (resched && next !=
end)
76 cond_resched_rwlock_write(&kvm->mmu_lock);
77 }
while (addr = next, addr !=
end);
82 #define stage2_apply_range_resched(mmu, addr, end, fn) \
83 stage2_apply_range(mmu, addr, end, fn, true)
94 if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2)
95 n += DIV_ROUND_UP(range, PUD_SIZE);
96 n += DIV_ROUND_UP(range, PMD_SIZE);
102 struct kvm_mmu_memory_cache *cache;
105 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
108 chunk_size = kvm->arch.mmu.split_page_chunk_size;
110 cache = &kvm->arch.mmu.split_page_cache;
111 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
117 struct kvm_mmu_memory_cache *cache;
118 struct kvm_pgtable *pgt;
119 int ret, cache_capacity;
120 u64 next, chunk_size;
122 lockdep_assert_held_write(&kvm->mmu_lock);
124 chunk_size = kvm->arch.mmu.split_page_chunk_size;
130 cache = &kvm->arch.mmu.split_page_cache;
134 write_unlock(&kvm->mmu_lock);
137 ret = __kvm_mmu_topup_memory_cache(cache,
140 write_lock(&kvm->mmu_lock);
145 pgt = kvm->arch.mmu.pgt;
153 }
while (addr = next, addr !=
end);
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
176 gfn_t gfn, u64 nr_pages)
179 gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
185 return !pfn_is_map_memory(pfn);
190 struct kvm_mmu_memory_cache *mc = arg;
194 virt = kvm_mmu_memory_cache_alloc(mc);
196 kvm_account_pgtable_pages(virt, 1);
202 return alloc_pages_exact(
size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
210 kvm_account_pgtable_pages(virt, (
size >> PAGE_SHIFT));
216 kvm_account_pgtable_pages(virt, -(
size >> PAGE_SHIFT));
217 free_pages_exact(virt,
size);
224 struct page *page = container_of(head,
struct page, rcu_head);
225 void *pgtable = page_to_virt(page);
226 s8 level = page_private(page);
233 struct page *page = virt_to_page(addr);
235 set_page_private(page, (
unsigned long)level);
241 get_page(virt_to_page(addr));
246 put_page(virt_to_page(addr));
251 struct page *p = virt_to_page(addr);
253 if (page_count(p) == 1)
254 kvm_account_pgtable_pages(addr, -1);
260 return page_count(virt_to_page(addr));
275 __clean_dcache_guest_page(va,
size);
280 __invalidate_icache_guest_page(va,
size);
322 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
323 phys_addr_t
end = start +
size;
325 lockdep_assert_held_write(&kvm->mmu_lock);
326 WARN_ON(
size & ~PAGE_MASK);
337 struct kvm_memory_slot *memslot)
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340 phys_addr_t
end = addr + PAGE_SIZE * memslot->npages;
354 struct kvm_memslots *slots;
355 struct kvm_memory_slot *memslot;
358 idx = srcu_read_lock(&kvm->srcu);
359 write_lock(&kvm->mmu_lock);
361 slots = kvm_memslots(kvm);
362 kvm_for_each_memslot(memslot, bkt, slots)
365 write_unlock(&kvm->mmu_lock);
366 srcu_read_unlock(&kvm->srcu, idx);
374 mutex_lock(&kvm_hyp_pgd_mutex);
380 mutex_unlock(&kvm_hyp_pgd_mutex);
385 if (is_kernel_in_hyp_mode())
388 if (static_branch_likely(&kvm_protected_mode_initialized))
405 unsigned long phys,
enum kvm_pgtable_prot prot)
412 mutex_lock(&kvm_hyp_pgd_mutex);
414 mutex_unlock(&kvm_hyp_pgd_mutex);
421 if (!is_vmalloc_addr(kaddr)) {
422 BUG_ON(!virt_addr_valid(kaddr));
425 return page_to_phys(vmalloc_to_page(kaddr)) +
426 offset_in_page(kaddr);
440 struct rb_node **parent)
450 *
node = &((**node)->rb_left);
451 else if (this->pfn >
pfn)
452 *
node = &((**node)->rb_right);
462 struct rb_node **node, *parent;
466 mutex_lock(&hyp_shared_pfns_lock);
473 this = kzalloc(
sizeof(*
this), GFP_KERNEL);
481 rb_link_node(&this->node, parent,
node);
485 mutex_unlock(&hyp_shared_pfns_lock);
492 struct rb_node **node, *parent;
496 mutex_lock(&hyp_shared_pfns_lock);
498 if (WARN_ON(!
this)) {
511 mutex_unlock(&hyp_shared_pfns_lock);
518 phys_addr_t start,
end,
cur;
522 if (is_kernel_in_hyp_mode())
530 if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
536 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
537 end = PAGE_ALIGN(__pa(to));
550 phys_addr_t start,
end,
cur;
556 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
557 end = PAGE_ALIGN(__pa(to));
576 phys_addr_t phys_addr;
577 unsigned long virt_addr;
578 unsigned long start = kern_hyp_va((
unsigned long)from);
579 unsigned long end = kern_hyp_va((
unsigned long)to);
581 if (is_kernel_in_hyp_mode())
587 start = start & PAGE_MASK;
590 for (virt_addr = start; virt_addr <
end; virt_addr += PAGE_SIZE) {
605 lockdep_assert_held(&kvm_hyp_pgd_mutex);
607 if (!PAGE_ALIGNED(
base))
638 mutex_lock(&kvm_hyp_pgd_mutex);
653 mutex_unlock(&kvm_hyp_pgd_mutex);
662 unsigned long *haddr,
663 enum kvm_pgtable_prot prot)
670 phys_addr,
size, prot);
671 if (IS_ERR_VALUE(addr))
678 size = PAGE_ALIGN(
size + offset_in_page(phys_addr));
687 *haddr = addr + offset_in_page(phys_addr);
697 mutex_lock(&kvm_hyp_pgd_mutex);
702 size = PAGE_SIZE * 2;
707 mutex_unlock(&kvm_hyp_pgd_mutex);
710 kvm_err(
"Cannot allocate hyp stack guard page\n");
726 kvm_err(
"Cannot map hyp stack\n");
741 void __iomem **kaddr,
742 void __iomem **haddr)
747 if (is_protected_kvm_enabled())
750 *kaddr = ioremap(phys_addr,
size);
754 if (is_kernel_in_hyp_mode()) {
760 &addr, PAGE_HYP_DEVICE);
768 *haddr = (
void __iomem *)addr;
784 BUG_ON(is_kernel_in_hyp_mode());
787 &addr, PAGE_HYP_EXEC);
793 *haddr = (
void *)addr;
804 struct kvm_pgtable pgt = {
805 .pgd = (kvm_pteref_t)kvm->mm->pgd,
806 .ia_bits = vabits_actual,
807 .start_level = (KVM_PGTABLE_LAST_LEVEL -
808 CONFIG_PGTABLE_LEVELS + 1),
821 local_irq_save(flags);
823 local_irq_restore(flags);
832 if (WARN_ON(level > KVM_PGTABLE_LAST_LEVEL))
834 if (WARN_ON(level < KVM_PGTABLE_FIRST_LEVEL))
838 if (!kvm_pte_valid(pte))
841 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
872 struct kvm_pgtable *pgt;
876 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
879 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
880 if (is_protected_kvm_enabled()) {
882 }
else if (phys_shift) {
884 phys_shift < ARM64_MIN_PARANGE_BITS)
887 phys_shift = KVM_PHYS_SHIFT;
889 pr_warn_once(
"%s using unsupported default IPA limit, upgrade your VMM\n",
895 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
896 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
899 if (mmu->pgt != NULL) {
900 kvm_err(
"kvm_arch already initialized?\n");
904 pgt = kzalloc(
sizeof(*pgt), GFP_KERNEL_ACCOUNT);
908 mmu->arch = &kvm->arch;
911 goto out_free_pgtable;
913 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
914 if (!mmu->last_vcpu_ran) {
916 goto out_destroy_pgtable;
919 for_each_possible_cpu(cpu)
920 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
923 mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
924 mmu->split_page_cache.gfp_zero = __GFP_ZERO;
927 mmu->pgd_phys = __pa(pgt->pgd);
940 kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
944 struct kvm_memory_slot *memslot)
946 hva_t hva = memslot->userspace_addr;
947 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
948 phys_addr_t
size = PAGE_SIZE * memslot->npages;
949 hva_t reg_end = hva +
size;
964 struct vm_area_struct *vma;
965 hva_t vm_start, vm_end;
967 vma = find_vma_intersection(current->mm, hva, reg_end);
974 vm_start = max(hva, vma->vm_start);
975 vm_end = min(reg_end, vma->vm_end);
977 if (!(vma->vm_flags & VM_PFNMAP)) {
978 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
982 }
while (hva < reg_end);
994 struct kvm_memslots *slots;
995 struct kvm_memory_slot *memslot;
998 idx = srcu_read_lock(&kvm->srcu);
999 mmap_read_lock(current->mm);
1000 write_lock(&kvm->mmu_lock);
1002 slots = kvm_memslots(kvm);
1003 kvm_for_each_memslot(memslot, bkt, slots)
1006 write_unlock(&kvm->mmu_lock);
1007 mmap_read_unlock(current->mm);
1008 srcu_read_unlock(&kvm->srcu, idx);
1013 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
1014 struct kvm_pgtable *pgt = NULL;
1016 write_lock(&kvm->mmu_lock);
1021 free_percpu(mmu->last_vcpu_ran);
1023 write_unlock(&kvm->mmu_lock);
1033 free_page((
unsigned long)addr);
1038 return (
void *)__get_free_page(GFP_KERNEL_ACCOUNT);
1043 if (is_protected_kvm_enabled())
1050 if (!is_protected_kvm_enabled())
1067 phys_addr_t pa,
unsigned long size,
bool writable)
1071 struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
1072 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
1073 struct kvm_pgtable *pgt = mmu->pgt;
1074 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
1075 KVM_PGTABLE_PROT_R |
1076 (writable ? KVM_PGTABLE_PROT_W : 0);
1078 if (is_protected_kvm_enabled())
1081 size += offset_in_page(guest_ipa);
1082 guest_ipa &= PAGE_MASK;
1084 for (addr = guest_ipa; addr < guest_ipa +
size; addr += PAGE_SIZE) {
1085 ret = kvm_mmu_topup_memory_cache(&cache,
1086 kvm_mmu_cache_min_pages(mmu));
1090 write_lock(&kvm->mmu_lock);
1093 write_unlock(&kvm->mmu_lock);
1100 kvm_mmu_free_memory_cache(&cache);
1130 struct kvm_memslots *slots = kvm_memslots(kvm);
1131 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1132 phys_addr_t start,
end;
1134 if (WARN_ON_ONCE(!memslot))
1137 start = memslot->base_gfn << PAGE_SHIFT;
1138 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1140 write_lock(&kvm->mmu_lock);
1142 write_unlock(&kvm->mmu_lock);
1157 struct kvm_memslots *slots;
1158 struct kvm_memory_slot *memslot;
1159 phys_addr_t start,
end;
1161 lockdep_assert_held(&kvm->slots_lock);
1163 slots = kvm_memslots(kvm);
1164 memslot = id_to_memslot(slots, slot);
1166 start = memslot->base_gfn << PAGE_SHIFT;
1167 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1169 write_lock(&kvm->mmu_lock);
1171 write_unlock(&kvm->mmu_lock);
1186 struct kvm_memory_slot *slot,
1187 gfn_t gfn_offset,
unsigned long mask)
1189 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1190 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1191 phys_addr_t
end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1193 lockdep_assert_held_write(&kvm->mmu_lock);
1205 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1211 send_sig_mceerr(BUS_MCEERR_AR, (
void __user *)address, lsb, current);
1216 unsigned long map_size)
1219 hva_t uaddr_start, uaddr_end;
1223 if (map_size == PAGE_SIZE)
1226 size = memslot->npages * PAGE_SIZE;
1228 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1230 uaddr_start = memslot->userspace_addr;
1231 uaddr_end = uaddr_start +
size;
1256 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1271 return (hva & ~(map_size - 1)) >= uaddr_start &&
1272 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1285 unsigned long hva, kvm_pfn_t *pfnp,
1288 kvm_pfn_t pfn = *pfnp;
1305 pfn &= ~(PTRS_PER_PMD - 1);
1319 if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
1320 return huge_page_shift(hstate_vma(vma));
1322 if (!(vma->vm_flags & VM_PFNMAP))
1325 VM_BUG_ON(is_vm_hugetlb_page(vma));
1327 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
1329 #ifndef __PAGETABLE_PMD_FOLDED
1330 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
1331 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
1332 ALIGN(hva, PUD_SIZE) <= vma->vm_end)
1336 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1337 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1338 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1358 unsigned long i, nr_pages =
size >> PAGE_SHIFT;
1359 struct page *page = pfn_to_page(pfn);
1361 if (!kvm_has_mte(kvm))
1364 for (i = 0; i < nr_pages; i++, page++) {
1365 if (try_page_mte_tagging(page)) {
1366 mte_clear_page_tags(page_address(page));
1367 set_page_mte_tagged(page);
1374 return vma->vm_flags & VM_MTE_ALLOWED;
1378 struct kvm_memory_slot *memslot,
unsigned long hva,
1382 bool write_fault, writable, force_pte =
false;
1383 bool exec_fault, mte_allowed;
1384 bool device =
false;
1385 unsigned long mmu_seq;
1386 struct kvm *kvm = vcpu->kvm;
1387 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1388 struct vm_area_struct *vma;
1393 long vma_pagesize, fault_granule;
1394 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
1395 struct kvm_pgtable *pgt;
1398 fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
1399 write_fault = kvm_is_write_fault(vcpu);
1400 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
1401 VM_BUG_ON(write_fault && exec_fault);
1403 if (fault_is_perm && !write_fault && !exec_fault) {
1404 kvm_err(
"Unexpected L2 read permission error\n");
1414 if (!fault_is_perm || (logging_active && write_fault)) {
1415 ret = kvm_mmu_topup_memory_cache(memcache,
1416 kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
1425 mmap_read_lock(current->mm);
1426 vma = vma_lookup(current->mm, hva);
1427 if (unlikely(!vma)) {
1428 kvm_err(
"Failed to find VMA for hva 0x%lx\n", hva);
1429 mmap_read_unlock(current->mm);
1437 if (logging_active) {
1439 vma_shift = PAGE_SHIFT;
1444 switch (vma_shift) {
1445 #ifndef __PAGETABLE_PMD_FOLDED
1451 case CONT_PMD_SHIFT:
1452 vma_shift = PMD_SHIFT;
1458 case CONT_PTE_SHIFT:
1459 vma_shift = PAGE_SHIFT;
1465 WARN_ONCE(1,
"Unknown vma_shift %d", vma_shift);
1468 vma_pagesize = 1UL << vma_shift;
1469 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
1470 fault_ipa &= ~(vma_pagesize - 1);
1472 gfn = fault_ipa >> PAGE_SHIFT;
1486 mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1487 mmap_read_unlock(current->mm);
1490 write_fault, &writable, NULL);
1491 if (pfn == KVM_PFN_ERR_HWPOISON) {
1495 if (is_error_noslot_pfn(pfn))
1510 }
else if (logging_active && !write_fault) {
1518 if (exec_fault && device)
1521 read_lock(&kvm->mmu_lock);
1522 pgt = vcpu->arch.hw_mmu->pgt;
1523 if (mmu_invalidate_retry(kvm, mmu_seq))
1530 if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
1531 if (fault_is_perm && fault_granule > PAGE_SIZE)
1532 vma_pagesize = fault_granule;
1538 if (vma_pagesize < 0) {
1544 if (!fault_is_perm && !device && kvm_has_mte(kvm)) {
1555 prot |= KVM_PGTABLE_PROT_W;
1558 prot |= KVM_PGTABLE_PROT_X;
1561 prot |= KVM_PGTABLE_PROT_DEVICE;
1562 else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
1563 prot |= KVM_PGTABLE_PROT_X;
1570 if (fault_is_perm && vma_pagesize == fault_granule)
1574 __pfn_to_phys(pfn), prot,
1576 KVM_PGTABLE_WALK_HANDLE_FAULT |
1577 KVM_PGTABLE_WALK_SHARED);
1580 if (writable && !ret) {
1586 read_unlock(&kvm->mmu_lock);
1588 return ret != -EAGAIN ? ret : 0;
1595 struct kvm_s2_mmu *mmu;
1597 trace_kvm_access_fault(fault_ipa);
1599 read_lock(&vcpu->kvm->mmu_lock);
1600 mmu = vcpu->arch.hw_mmu;
1602 read_unlock(&vcpu->kvm->mmu_lock);
1604 if (kvm_pte_valid(pte))
1622 phys_addr_t fault_ipa;
1623 struct kvm_memory_slot *memslot;
1625 bool is_iabt, write_fault, writable;
1629 esr = kvm_vcpu_get_esr(vcpu);
1631 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1632 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1634 if (esr_fsc_is_translation_fault(esr)) {
1642 if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
1643 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
1654 if (kvm_vcpu_abt_issea(vcpu)) {
1659 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1665 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1666 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1669 if (!esr_fsc_is_translation_fault(esr) &&
1670 !esr_fsc_is_permission_fault(esr) &&
1671 !esr_fsc_is_access_flag_fault(esr)) {
1672 kvm_err(
"Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1673 kvm_vcpu_trap_get_class(vcpu),
1674 (
unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1675 (
unsigned long)kvm_vcpu_get_esr(vcpu));
1679 idx = srcu_read_lock(&vcpu->kvm->srcu);
1681 gfn = fault_ipa >> PAGE_SHIFT;
1684 write_fault = kvm_is_write_fault(vcpu);
1685 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1697 if (kvm_vcpu_abt_iss1tw(vcpu)) {
1713 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1725 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1731 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
1733 if (esr_fsc_is_access_flag_fault(esr)) {
1740 esr_fsc_is_permission_fault(esr));
1744 if (ret == -ENOEXEC) {
1749 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1755 if (!kvm->arch.mmu.pgt)
1759 (range->end - range->start) << PAGE_SHIFT,
1767 kvm_pfn_t pfn = pte_pfn(range->arg.pte);
1769 if (!kvm->arch.mmu.pgt)
1772 WARN_ON(range->end - range->start != 1);
1779 if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
1793 PAGE_SIZE, __pfn_to_phys(pfn),
1794 KVM_PGTABLE_PROT_R, NULL, 0);
1801 u64
size = (range->end - range->start) << PAGE_SHIFT;
1803 if (!kvm->arch.mmu.pgt)
1807 range->start << PAGE_SHIFT,
1813 u64
size = (range->end - range->start) << PAGE_SHIFT;
1815 if (!kvm->arch.mmu.pgt)
1819 range->start << PAGE_SHIFT,
1839 kvm_err(
"Failed to idmap %lx-%lx\n",
1847 return (
void *)get_zeroed_page(GFP_KERNEL);
1897 idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1898 kernel_bits = vabits_actual;
1899 *hyp_va_bits = max(idmap_bits, kernel_bits);
1901 kvm_debug(
"Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
1903 kvm_debug(
"HYP VA range: %lx:%lx\n",
1904 kern_hyp_va(PAGE_OFFSET),
1905 kern_hyp_va((
unsigned long)high_memory - 1));
1914 kvm_err(
"IDMAP intersecting with HYP VA, unable to continue\n");
1921 kvm_err(
"Hyp mode page-table not allocated\n");
1928 goto out_free_pgtable;
1932 goto out_destroy_pgtable;
1937 out_destroy_pgtable:
1947 struct kvm_memory_slot *old,
1948 const struct kvm_memory_slot *
new,
1949 enum kvm_mr_change change)
1951 bool log_dirty_pages =
new &&
new->flags & KVM_MEM_LOG_DIRTY_PAGES;
1958 if (log_dirty_pages) {
1960 if (change == KVM_MR_DELETE)
1969 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1986 kvm_mmu_free_memory_cache(&kvm->arch.mmu.split_page_cache);
1991 const struct kvm_memory_slot *old,
1992 struct kvm_memory_slot *
new,
1993 enum kvm_mr_change change)
1998 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1999 change != KVM_MR_FLAGS_ONLY)
2006 if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT))
2009 hva =
new->userspace_addr;
2010 reg_end = hva + (
new->npages << PAGE_SHIFT);
2012 mmap_read_lock(current->mm);
2025 struct vm_area_struct *vma;
2027 vma = find_vma_intersection(current->mm, hva, reg_end);
2036 if (vma->vm_flags & VM_PFNMAP) {
2038 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2043 hva = min(reg_end, vma->vm_end);
2044 }
while (hva < reg_end);
2046 mmap_read_unlock(current->mm);
2064 struct kvm_memory_slot *slot)
2066 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2067 phys_addr_t
size = slot->npages << PAGE_SHIFT;
2069 write_lock(&kvm->mmu_lock);
2071 write_unlock(&kvm->mmu_lock);
2104 unsigned long hcr = *vcpu_hcr(vcpu);
2115 if (!(hcr & HCR_TVM)) {
2116 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2117 vcpu_has_cache_enabled(vcpu));
2119 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2125 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2132 if (now_enabled != was_enabled)
2137 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2139 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
static unsigned long base
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva)
struct kvm_memory_slot * gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot)
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable)
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
void kvm_release_pfn_clean(kvm_pfn_t pfn)
int __pkvm_host_unshare_hyp(u64 pfn)
int __pkvm_host_share_hyp(u64 pfn)
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot, unsigned long *haddr)
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
static void * stage2_memcache_zalloc_page(void *arg)
phys_addr_t kvm_mmu_get_httbr(void)
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void **haddr)
int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change)
static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
int kvm_share_hyp(void *from, void *to)
static bool kvm_is_device_pfn(unsigned long pfn)
static int kvm_map_idmap_text(void)
static int kvm_host_page_count(void *addr)
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
static void * kvm_host_va(phys_addr_t phys)
static unsigned long __ro_after_init hyp_idmap_start
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
phys_addr_t kvm_get_idmap_vector(void)
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
static void stage2_unmap_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static void stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
static DEFINE_MUTEX(kvm_hyp_pgd_mutex)
#define stage2_apply_range_resched(mmu, addr, end, fn)
static int __hyp_alloc_private_va_range(unsigned long base)
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm)
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, unsigned long size)
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
static struct rb_root hyp_shared_pfns
int __create_hyp_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot)
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, bool may_block)
static struct kvm_pgtable * hyp_pgtable
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
static phys_addr_t __ro_after_init hyp_idmap_vector
static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
static int kvm_mmu_split_nr_page_tables(u64 range)
static phys_addr_t kvm_host_pa(void *addr)
static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops
static long transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
static void kvm_host_get_page(void *addr)
static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops
static void clean_dcache_guest_page(void *va, size_t size)
static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, phys_addr_t size)
int __init kvm_mmu_init(u32 *hyp_va_bits)
static unsigned long __ro_after_init io_map_base
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end, int(*fn)(struct kvm_pgtable *, u64, u64), bool resched)
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
static void * hyp_mc_alloc_fn(void *unused)
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
static int share_pfn_hyp(u64 pfn)
static void * kvm_s2_zalloc_pages_exact(size_t size)
static void invalidate_icache_guest_page(void *va, size_t size)
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
static void * kvm_hyp_zalloc_page(void *arg)
static int unshare_pfn_hyp(u64 pfn)
static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size)
void kvm_unshare_hyp(void *from, void *to)
static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
void stage2_unmap_vm(struct kvm *kvm)
static void kvm_s2_put_page(void *addr)
static void hyp_mc_free_fn(void *addr, void *unused)
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable)
static void kvm_s2_free_pages_exact(void *virt, size_t size)
static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
void kvm_uninit_stage2_mmu(struct kvm *kvm)
void __init free_hyp_pgds(void)
static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
static void * kvm_host_zalloc_pages_exact(size_t size)
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
static struct hyp_shared_pfn * find_shared_pfn(u64 pfn, struct rb_node ***node, struct rb_node **parent)
static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, unsigned long *haddr, enum kvm_pgtable_prot prot)
static void stage2_flush_vm(struct kvm *kvm)
static void stage2_free_unlinked_table(void *addr, s8 level)
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, void __iomem **haddr)
static bool kvm_host_owns_hyp_mappings(void)
static struct kvm_pgtable_mm_ops kvm_user_mm_ops
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
static unsigned long __ro_after_init hyp_idmap_end
void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change)
static void kvm_host_put_page(void *addr)
static int get_user_mapping_size(struct kvm *kvm, u64 addr)
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot)
int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size)
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops)
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags)
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot)
int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc)
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold)
void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level)
u32 get_kvm_ipa_limit(void)
static u32 __ro_after_init kvm_ipa_limit
struct vgic_global kvm_vgic_global_state __ro_after_init