8 #include <linux/cpu_pm.h>
9 #include <linux/entry-kvm.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
17 #include <linux/mman.h>
18 #include <linux/sched.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_irqfd.h>
21 #include <linux/irqbypass.h>
22 #include <linux/sched/stat.h>
23 #include <linux/psci.h>
24 #include <trace/events/kvm.h>
26 #define CREATE_TRACE_POINTS
29 #include <linux/uaccess.h>
30 #include <asm/ptrace.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cacheflush.h>
34 #include <asm/cpufeature.h>
36 #include <asm/kvm_arm.h>
37 #include <asm/kvm_asm.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/kvm_nested.h>
40 #include <asm/kvm_pkvm.h>
41 #include <asm/kvm_emulate.h>
42 #include <asm/sections.h>
69 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
73 struct kvm_enable_cap *cap)
82 case KVM_CAP_ARM_NISV_TO_USER:
84 set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
88 mutex_lock(&kvm->lock);
89 if (!system_supports_mte() || kvm->created_vcpus) {
93 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
95 mutex_unlock(&kvm->lock);
97 case KVM_CAP_ARM_SYSTEM_SUSPEND:
99 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
101 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
102 new_cap = cap->args[0];
104 mutex_lock(&kvm->slots_lock);
111 }
else if (new_cap && !kvm_is_block_size_supported(new_cap)) {
115 kvm->arch.mmu.split_page_chunk_size = new_cap;
117 mutex_unlock(&kvm->slots_lock);
140 mutex_init(&kvm->arch.config_lock);
142 #ifdef CONFIG_LOCKDEP
144 mutex_lock(&kvm->lock);
145 mutex_lock(&kvm->arch.config_lock);
146 mutex_unlock(&kvm->arch.config_lock);
147 mutex_unlock(&kvm->lock);
156 goto err_unshare_kvm;
158 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
160 goto err_unshare_kvm;
162 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
166 goto err_free_cpumask;
177 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
182 free_cpumask_var(kvm->arch.supported_cpus);
190 return VM_FAULT_SIGBUS;
200 bitmap_free(kvm->arch.pmu_filter);
201 free_cpumask_var(kvm->arch.supported_cpus);
205 if (is_protected_kvm_enabled())
208 kfree(kvm->arch.mpidr_data);
220 case KVM_CAP_IRQCHIP:
223 case KVM_CAP_IOEVENTFD:
224 case KVM_CAP_USER_MEMORY:
225 case KVM_CAP_SYNC_MMU:
226 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
227 case KVM_CAP_ONE_REG:
228 case KVM_CAP_ARM_PSCI:
229 case KVM_CAP_ARM_PSCI_0_2:
230 case KVM_CAP_READONLY_MEM:
231 case KVM_CAP_MP_STATE:
232 case KVM_CAP_IMMEDIATE_EXIT:
233 case KVM_CAP_VCPU_EVENTS:
234 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
235 case KVM_CAP_ARM_NISV_TO_USER:
236 case KVM_CAP_ARM_INJECT_EXT_DABT:
237 case KVM_CAP_SET_GUEST_DEBUG:
238 case KVM_CAP_VCPU_ATTRIBUTES:
239 case KVM_CAP_PTP_KVM:
240 case KVM_CAP_ARM_SYSTEM_SUSPEND:
241 case KVM_CAP_IRQFD_RESAMPLE:
242 case KVM_CAP_COUNTER_OFFSET:
245 case KVM_CAP_SET_GUEST_DEBUG2:
246 return KVM_GUESTDBG_VALID_MASK;
247 case KVM_CAP_ARM_SET_DEVICE_ADDR:
250 case KVM_CAP_NR_VCPUS:
257 r = min_t(
unsigned int, num_online_cpus(),
260 case KVM_CAP_MAX_VCPUS:
261 case KVM_CAP_MAX_VCPU_ID:
267 case KVM_CAP_MSI_DEVID:
271 r = kvm->arch.vgic.msis_require_devid;
273 case KVM_CAP_ARM_USER_IRQ:
280 case KVM_CAP_ARM_MTE:
281 r = system_supports_mte();
283 case KVM_CAP_STEAL_TIME:
286 case KVM_CAP_ARM_EL1_32BIT:
287 r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
289 case KVM_CAP_GUEST_DEBUG_HW_BPS:
292 case KVM_CAP_GUEST_DEBUG_HW_WPS:
295 case KVM_CAP_ARM_PMU_V3:
298 case KVM_CAP_ARM_INJECT_SERROR_ESR:
299 r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
301 case KVM_CAP_ARM_VM_IPA_SIZE:
304 case KVM_CAP_ARM_SVE:
305 r = system_supports_sve();
307 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
308 case KVM_CAP_ARM_PTRAUTH_GENERIC:
309 r = system_has_full_ptr_auth();
311 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
313 r = kvm->arch.mmu.split_page_chunk_size;
315 r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
317 case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
318 r = kvm_supported_block_sizes();
320 case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
331 unsigned int ioctl,
unsigned long arg)
338 size_t sz =
sizeof(
struct kvm);
341 return kzalloc(sz, GFP_KERNEL_ACCOUNT);
343 return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
351 if (
id >= kvm->max_vcpus)
361 spin_lock_init(&vcpu->arch.mp_state_lock);
363 #ifdef CONFIG_LOCKDEP
365 mutex_lock(&vcpu->mutex);
366 mutex_lock(&vcpu->kvm->arch.config_lock);
367 mutex_unlock(&vcpu->kvm->arch.config_lock);
368 mutex_unlock(&vcpu->mutex);
372 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
374 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
380 vcpu->arch.fp_state = FP_STATE_FREE;
389 kvm_arm_pvtime_vcpu_init(&vcpu->arch);
391 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
407 static_branch_dec(&userspace_irqchip_in_use);
409 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
428 struct kvm_s2_mmu *mmu;
431 mmu = vcpu->arch.hw_mmu;
432 last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
443 if (*last_ran != vcpu->vcpu_idx) {
445 *last_ran = vcpu->vcpu_idx;
456 if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
457 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
459 if (single_task_running())
460 vcpu_clear_wfx_traps(vcpu);
462 vcpu_set_wfx_traps(vcpu);
464 if (vcpu_has_ptrauth(vcpu))
465 vcpu_ptrauth_disable(vcpu);
468 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
469 vcpu_set_on_unsupported_cpu(vcpu);
483 vcpu_clear_on_unsupported_cpu(vcpu);
489 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
490 kvm_make_request(KVM_REQ_SLEEP, vcpu);
496 spin_lock(&vcpu->arch.mp_state_lock);
498 spin_unlock(&vcpu->arch.mp_state_lock);
503 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
508 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
509 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
515 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
519 struct kvm_mp_state *mp_state)
521 *mp_state = READ_ONCE(vcpu->arch.mp_state);
527 struct kvm_mp_state *mp_state)
531 spin_lock(&vcpu->arch.mp_state_lock);
533 switch (mp_state->mp_state) {
534 case KVM_MP_STATE_RUNNABLE:
535 WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
537 case KVM_MP_STATE_STOPPED:
540 case KVM_MP_STATE_SUSPENDED:
547 spin_unlock(&vcpu->arch.mp_state_lock);
561 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
568 return vcpu_mode_priv(vcpu);
571 #ifdef CONFIG_GUEST_PERF_EVENTS
572 unsigned long kvm_arch_vcpu_get_ip(
struct kvm_vcpu *vcpu)
574 return *vcpu_pc(vcpu);
580 return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
585 struct kvm_mpidr_data *data = NULL;
586 unsigned long c, mask, nr_entries;
587 u64 aff_set = 0, aff_clr = ~0UL;
588 struct kvm_vcpu *vcpu;
590 mutex_lock(&kvm->arch.config_lock);
592 if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
595 kvm_for_each_vcpu(c, vcpu, kvm) {
596 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
605 mask = aff_set ^ aff_clr;
606 nr_entries = BIT_ULL(hweight_long(mask));
613 if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
614 data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries),
620 data->mpidr_mask = mask;
622 kvm_for_each_vcpu(c, vcpu, kvm) {
623 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
624 u16 index = kvm_mpidr_index(data, aff);
626 data->cmpidr_to_idx[index] = c;
629 kvm->arch.mpidr_data = data;
631 mutex_unlock(&kvm->arch.config_lock);
641 struct kvm *kvm = vcpu->kvm;
654 if (likely(vcpu_has_run_once(vcpu)))
671 if (vcpu_has_nv(vcpu)) {
685 if (is_protected_kvm_enabled()) {
696 static_branch_inc(&userspace_irqchip_in_use);
704 if (kvm_vm_is_protected(kvm))
707 mutex_lock(&kvm->arch.config_lock);
708 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
709 mutex_unlock(&kvm->arch.config_lock);
722 struct kvm_vcpu *vcpu;
724 kvm_for_each_vcpu(i, vcpu, kvm)
725 vcpu->arch.pause =
true;
732 struct kvm_vcpu *vcpu;
734 kvm_for_each_vcpu(i, vcpu, kvm) {
735 vcpu->arch.pause =
false;
736 __kvm_vcpu_wake_up(vcpu);
742 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
744 rcuwait_wait_event(wait,
750 kvm_make_request(KVM_REQ_SLEEP, vcpu);
783 vcpu_set_flag(vcpu, IN_WFI);
788 vcpu_clear_flag(vcpu, IN_WFIT);
791 vcpu_clear_flag(vcpu, IN_WFI);
808 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
815 memset(&vcpu->run->system_event, 0,
sizeof(vcpu->run->system_event));
816 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
817 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
840 if (kvm_request_pending(vcpu)) {
841 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
844 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
851 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
853 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
856 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
864 if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
867 if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
870 if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
882 if (likely(!vcpu_mode_is_32bit(vcpu)))
885 if (vcpu_has_nv(vcpu))
888 return !kvm_supports_32bit_el0();
907 struct kvm_run *run = vcpu->run;
916 if (static_branch_unlikely(&userspace_irqchip_in_use)) {
920 run->exit_reason = KVM_EXIT_INTR;
925 if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
926 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
927 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
928 run->fail_entry.cpu = smp_processor_id();
933 return kvm_request_pending(vcpu) ||
934 xfer_to_guest_mode_work_pending();
948 guest_state_enter_irqoff();
950 guest_state_exit_irqoff();
967 struct kvm_run *run = vcpu->run;
970 if (run->exit_reason == KVM_EXIT_MMIO) {
978 if (run->immediate_exit) {
986 run->exit_reason = KVM_EXIT_UNKNOWN;
992 ret = xfer_to_guest_mode_handle_work(vcpu);
1015 __load_stage2(vcpu->arch.hw_mmu,
1016 vcpu->arch.hw_mmu->arch);
1020 local_irq_disable();
1032 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1035 vcpu->mode = OUTSIDE_GUEST_MODE;
1038 if (static_branch_unlikely(&userspace_irqchip_in_use))
1052 trace_kvm_entry(*vcpu_pc(vcpu));
1053 guest_timing_enter_irqoff();
1057 vcpu->mode = OUTSIDE_GUEST_MODE;
1084 if (static_branch_unlikely(&userspace_irqchip_in_use))
1099 if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
1102 local_irq_disable();
1105 guest_timing_exit_irqoff();
1109 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
1131 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
1132 ret = ARM_EXCEPTION_IL;
1154 if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1155 vcpu_get_flag(vcpu, INCREMENT_PC)))
1168 if (number == KVM_ARM_IRQ_CPU_IRQ)
1169 bit_index = __ffs(HCR_VI);
1171 bit_index = __ffs(HCR_VF);
1173 hcr = vcpu_hcr(vcpu);
1175 set = test_and_set_bit(bit_index, hcr);
1177 set = test_and_clear_bit(bit_index, hcr);
1190 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1199 u32 irq = irq_level->irq;
1200 unsigned int irq_type, vcpu_id, irq_num;
1201 struct kvm_vcpu *vcpu = NULL;
1202 bool level = irq_level->level;
1204 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1205 vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1206 vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1207 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1209 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
1212 case KVM_ARM_IRQ_TYPE_CPU:
1216 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1220 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1224 case KVM_ARM_IRQ_TYPE_PPI:
1228 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1236 case KVM_ARM_IRQ_TYPE_SPI:
1251 unsigned long features = KVM_VCPU_VALID_FEATURES;
1253 if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1254 clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1257 clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1259 if (!system_supports_sve())
1260 clear_bit(KVM_ARM_VCPU_SVE, &features);
1262 if (!system_has_full_ptr_auth()) {
1263 clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1264 clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1267 if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1268 clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1276 unsigned long features = init->features[0];
1279 if (features & ~KVM_VCPU_VALID_FEATURES)
1282 for (i = 1; i < ARRAY_SIZE(init->features); i++) {
1283 if (init->features[i])
1294 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1295 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1299 if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
1300 test_bit(KVM_ARM_VCPU_SVE, &features))
1303 if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1307 if (kvm_has_mte(vcpu->kvm))
1311 if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
1320 unsigned long features = init->features[0];
1322 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1323 KVM_VCPU_MAX_FEATURES);
1328 struct kvm *kvm = vcpu->kvm;
1344 unsigned long features = init->features[0];
1345 struct kvm *kvm = vcpu->kvm;
1348 mutex_lock(&kvm->arch.config_lock);
1350 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1354 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
1363 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
1364 vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1367 mutex_unlock(&kvm->arch.config_lock);
1376 if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
1397 bool power_off =
false;
1405 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
1406 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
1423 if (vcpu_has_run_once(vcpu)) {
1424 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1427 icache_inval_all_pou();
1430 vcpu_reset_hcr(vcpu);
1431 vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
1436 spin_lock(&vcpu->arch.mp_state_lock);
1441 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
1443 spin_unlock(&vcpu->arch.mp_state_lock);
1449 struct kvm_device_attr *attr)
1453 switch (attr->group) {
1463 struct kvm_device_attr *attr)
1467 switch (attr->group) {
1477 struct kvm_device_attr *attr)
1481 switch (attr->group) {
1491 struct kvm_vcpu_events *events)
1493 memset(events, 0,
sizeof(*events));
1499 struct kvm_vcpu_events *events)
1504 for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1505 if (events->reserved[i])
1509 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1510 if (events->exception.pad[i])
1517 unsigned int ioctl,
unsigned long arg)
1519 struct kvm_vcpu *vcpu = filp->private_data;
1520 void __user *argp = (
void __user *)arg;
1521 struct kvm_device_attr attr;
1525 case KVM_ARM_VCPU_INIT: {
1529 if (copy_from_user(&init, argp,
sizeof(init)))
1535 case KVM_SET_ONE_REG:
1536 case KVM_GET_ONE_REG: {
1537 struct kvm_one_reg reg;
1544 if (copy_from_user(®, argp,
sizeof(reg)))
1552 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1555 if (ioctl == KVM_SET_ONE_REG)
1561 case KVM_GET_REG_LIST: {
1562 struct kvm_reg_list __user *user_list = argp;
1563 struct kvm_reg_list reg_list;
1575 if (copy_from_user(®_list, user_list,
sizeof(reg_list)))
1579 if (copy_to_user(user_list, ®_list,
sizeof(reg_list)))
1587 case KVM_SET_DEVICE_ATTR: {
1589 if (copy_from_user(&attr, argp,
sizeof(attr)))
1594 case KVM_GET_DEVICE_ATTR: {
1596 if (copy_from_user(&attr, argp,
sizeof(attr)))
1601 case KVM_HAS_DEVICE_ATTR: {
1603 if (copy_from_user(&attr, argp,
sizeof(attr)))
1608 case KVM_GET_VCPU_EVENTS: {
1609 struct kvm_vcpu_events events;
1614 if (copy_to_user(argp, &events,
sizeof(events)))
1619 case KVM_SET_VCPU_EVENTS: {
1620 struct kvm_vcpu_events events;
1622 if (copy_from_user(&events, argp,
sizeof(events)))
1627 case KVM_ARM_VCPU_FINALIZE: {
1633 if (get_user(what, (
const int __user *)argp))
1651 struct kvm_arm_device_addr *dev_addr)
1653 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1654 case KVM_ARM_DEVICE_VGIC_V2:
1665 switch (attr->group) {
1666 case KVM_ARM_VM_SMCCC_CTRL:
1675 switch (attr->group) {
1676 case KVM_ARM_VM_SMCCC_CTRL:
1685 struct kvm *kvm = filp->private_data;
1686 void __user *argp = (
void __user *)arg;
1687 struct kvm_device_attr attr;
1690 case KVM_CREATE_IRQCHIP: {
1694 mutex_lock(&kvm->lock);
1696 mutex_unlock(&kvm->lock);
1699 case KVM_ARM_SET_DEVICE_ADDR: {
1700 struct kvm_arm_device_addr dev_addr;
1702 if (copy_from_user(&dev_addr, argp,
sizeof(dev_addr)))
1706 case KVM_ARM_PREFERRED_TARGET: {
1708 .target = KVM_ARM_TARGET_GENERIC_V8,
1711 if (copy_to_user(argp, &init,
sizeof(init)))
1716 case KVM_ARM_MTE_COPY_TAGS: {
1717 struct kvm_arm_copy_mte_tags copy_tags;
1719 if (copy_from_user(©_tags, argp,
sizeof(copy_tags)))
1723 case KVM_ARM_SET_COUNTER_OFFSET: {
1724 struct kvm_arm_counter_offset offset;
1726 if (copy_from_user(&offset, argp,
sizeof(offset)))
1730 case KVM_HAS_DEVICE_ATTR: {
1731 if (copy_from_user(&attr, argp,
sizeof(attr)))
1736 case KVM_SET_DEVICE_ATTR: {
1737 if (copy_from_user(&attr, argp,
sizeof(attr)))
1742 case KVM_ARM_GET_REG_WRITABLE_MASKS: {
1743 struct reg_mask_range range;
1745 if (copy_from_user(&range, argp,
sizeof(range)))
1757 struct kvm_vcpu *tmp_vcpu;
1759 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1760 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1761 mutex_unlock(&tmp_vcpu->mutex);
1767 lockdep_assert_held(&kvm->lock);
1769 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
1775 struct kvm_vcpu *tmp_vcpu;
1778 lockdep_assert_held(&kvm->lock);
1787 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
1788 if (!mutex_trylock(&tmp_vcpu->mutex)) {
1799 return (
unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
1800 (
unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
1823 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
1826 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
1829 if (kvm_system_needs_idmapped_vectors() &&
1830 !is_protected_kvm_enabled()) {
1832 __BP_HARDEN_HYP_VECS_SZ, &
base);
1844 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
1845 u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
1854 params->tpidr_el2 = (
unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
1855 (
unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
1857 params->mair_el2 = read_sysreg(mair_el1);
1859 tcr = read_sysreg(tcr_el1);
1860 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
1861 tcr |= TCR_EPD1_MASK;
1863 tcr &= TCR_EL2_MASK;
1864 tcr |= TCR_EL2_RES1;
1866 tcr &= ~TCR_T0SZ_MASK;
1867 tcr |= TCR_T0SZ(hyp_va_bits);
1868 tcr &= ~TCR_EL2_PS_MASK;
1869 tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
1870 if (kvm_lpa2_is_enabled())
1872 params->tcr_el2 = tcr;
1875 if (is_protected_kvm_enabled())
1876 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
1878 params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
1879 if (cpus_have_final_cap(ARM64_KVM_HVHE))
1880 params->hcr_el2 |= HCR_E2H;
1881 params->vttbr = params->vtcr = 0;
1887 kvm_flush_dcache_to_poc(params,
sizeof(*params));
1892 struct kvm_nvhe_init_params *params;
1893 struct arm_smccc_res res;
1904 BUG_ON(!system_capabilities_finalized());
1905 params = this_cpu_ptr_nvhe_sym(kvm_init_params);
1906 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
1907 WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
1918 if (this_cpu_has_cap(ARM64_SSBS) &&
1919 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
1920 kvm_call_hyp_nvhe(__kvm_enable_ssbs);
1926 if (!is_kernel_in_hyp_mode())
1927 __hyp_reset_vectors();
1952 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
1955 if (!is_protected_kvm_enabled())
1956 *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (
unsigned long)vector;
1958 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
1963 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
1965 if (!is_kernel_in_hyp_mode())
1974 if (is_kernel_in_hyp_mode())
1990 if (!__this_cpu_read(kvm_hyp_initialized)) {
1992 __this_cpu_write(kvm_hyp_initialized, 1);
1998 if (__this_cpu_read(kvm_hyp_initialized)) {
2000 __this_cpu_write(kvm_hyp_initialized, 0);
2029 if (!is_protected_kvm_enabled())
2033 #ifdef CONFIG_CPU_PM
2034 static int hyp_init_cpu_pm_notifier(
struct notifier_block *
self,
2045 if (__this_cpu_read(kvm_hyp_initialized))
2054 case CPU_PM_ENTER_FAILED:
2056 if (__this_cpu_read(kvm_hyp_initialized))
2067 static struct notifier_block hyp_init_cpu_pm_nb = {
2068 .notifier_call = hyp_init_cpu_pm_notifier,
2073 if (!is_protected_kvm_enabled())
2074 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
2078 if (!is_protected_kvm_enabled())
2079 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
2100 for_each_online_cpu(cpu)
2104 #define init_psci_0_1_impl_state(config, what) \
2105 config.psci_0_1_ ## what ## _implemented = psci_ops.what
2113 if (!psci_ops.get_version) {
2114 kvm_err(
"Cannot initialize protected mode without PSCI\n");
2169 kvm_register_perf_callbacks(NULL);
2175 if (err || !is_protected_kvm_enabled())
2183 kvm_unregister_perf_callbacks();
2192 for_each_possible_cpu(cpu) {
2193 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2206 num_possible_cpus(), kern_hyp_va(per_cpu_base),
2214 __this_cpu_write(kvm_hyp_initialized, 1);
2231 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2233 val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
2234 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
2236 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
2237 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2238 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
2239 arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
2279 struct kvm_cpu_context *hyp_ctxt;
2282 for_each_possible_cpu(cpu) {
2283 hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
2284 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
2285 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
2286 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
2287 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
2288 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
2289 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
2290 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
2291 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
2292 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
2293 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
2321 for_each_possible_cpu(cpu) {
2322 unsigned long stack_page;
2324 stack_page = __get_free_page(GFP_KERNEL);
2330 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2336 for_each_possible_cpu(cpu) {
2346 page_addr = page_address(page);
2355 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
2357 kvm_err(
"Cannot map world-switch code\n");
2362 kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
2364 kvm_err(
"Cannot map .hyp.rodata section\n");
2369 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
2371 kvm_err(
"Cannot map rodata section\n");
2381 kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
2383 kvm_err(
"Cannot map hyp bss section: %d\n", err);
2388 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
2390 kvm_err(
"Cannot map bss section\n");
2397 for_each_possible_cpu(cpu) {
2398 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2399 char *stack_page = (
char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2403 kvm_err(
"Cannot map hyp stack\n");
2413 params->stack_pa = __pa(stack_page);
2416 for_each_possible_cpu(cpu) {
2423 kvm_err(
"Cannot map hyp percpu region\n");
2433 if (is_protected_kvm_enabled()) {
2434 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
2435 cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
2447 kvm_err(
"Failed to init hyp memory protection\n");
2456 kvm_err(
"error initializing Hyp mode: %d\n", err);
2462 struct kvm_vcpu *vcpu;
2465 mpidr &= MPIDR_HWID_BITMASK;
2467 if (kvm->arch.mpidr_data) {
2468 u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
2470 vcpu = kvm_get_vcpu(kvm,
2471 kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
2472 if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
2478 kvm_for_each_vcpu(i, vcpu, kvm) {
2479 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2496 struct irq_bypass_producer *prod)
2498 struct kvm_kernel_irqfd *irqfd =
2499 container_of(cons,
struct kvm_kernel_irqfd, consumer);
2505 struct irq_bypass_producer *prod)
2507 struct kvm_kernel_irqfd *irqfd =
2508 container_of(cons,
struct kvm_kernel_irqfd, consumer);
2516 struct kvm_kernel_irqfd *irqfd =
2517 container_of(cons,
struct kvm_kernel_irqfd, consumer);
2524 struct kvm_kernel_irqfd *irqfd =
2525 container_of(cons,
struct kvm_kernel_irqfd, consumer);
2536 if (!is_hyp_mode_available()) {
2537 kvm_info(
"HYP mode not available\n");
2542 kvm_info(
"KVM disabled from command line\n");
2548 kvm_info(
"Error initializing system register tables");
2552 in_hyp_mode = is_kernel_in_hyp_mode();
2554 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
2555 cpus_have_final_cap(ARM64_WORKAROUND_1508412))
2556 kvm_info(
"Guests without required CPU erratum workarounds can deadlock system!\n" \
2557 "Only trusted guests should be used on this system.\n");
2569 kvm_err(
"Failed to initialize VMID allocator.\n");
2581 kvm_err(
"Cannot initialise vector slots\n");
2589 if (is_protected_kvm_enabled()) {
2590 kvm_info(
"Protected nVHE mode initialized successfully\n");
2591 }
else if (in_hyp_mode) {
2592 kvm_info(
"VHE mode initialized successfully\n");
2594 kvm_info(
"Hyp mode initialized successfully\n");
2601 err =
kvm_init(
sizeof(
struct kvm_vcpu), 0, THIS_MODULE);
2624 if (strcmp(arg,
"none") == 0) {
2629 if (!is_hyp_mode_available()) {
2630 pr_warn_once(
"KVM is not available. Ignoring kvm-arm.mode\n");
2634 if (strcmp(arg,
"protected") == 0) {
2635 if (!is_kernel_in_hyp_mode())
2638 pr_warn_once(
"Protected KVM not available with VHE\n");
2643 if (strcmp(arg,
"nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
2648 if (strcmp(arg,
"nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
int __init kvm_timer_hyp_init(bool has_gic)
void kvm_timer_cpu_up(void)
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
void kvm_timer_update_run(struct kvm_vcpu *vcpu)
void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
int kvm_timer_enable(struct kvm_vcpu *vcpu)
void kvm_timer_init_vm(struct kvm *kvm)
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, struct kvm_arm_counter_offset *offset)
void kvm_timer_cpu_down(void)
void kvm_timer_init_vhe(void)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
bool lock_all_vcpus(struct kvm *kvm)
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use)
static int __init init_subsystems(void)
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
static int check_vcpu_requests(struct kvm_vcpu *vcpu)
static void cpu_hyp_init(void *discard)
struct kvm_vcpu * kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static void cpu_set_hyp_vector(void)
static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
void kvm_arm_resume_guest(struct kvm *kvm)
static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
static int __init early_kvm_mode_cfg(char *arg)
static int __init init_hyp_mode(void)
void unlock_all_vcpus(struct kvm *kvm)
static void cpu_hyp_reinit(void)
static void cpu_hyp_uninit(void *discard)
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
static bool __init init_psci_relay(void)
static int kvm_init_vector_slots(void)
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
static void cpu_hyp_init_features(void)
static int __init do_pkvm_init(u32 hyp_va_bits)
static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
static bool kvm_arm_initialised
static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
bool is_kvm_arm_initialised(void)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
static __init int kvm_arm_init(void)
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
bool kvm_arch_has_irq_bypass(void)
static int kvm_arm_default_max_vcpus(void)
static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
static void kvm_init_mpidr_data(struct kvm *kvm)
bool kvm_arch_intc_initialized(struct kvm *kvm)
static void __init hyp_cpu_pm_exit(void)
static void kvm_hyp_init_symbols(void)
static u64 get_hyp_id_aa64pfr0_el1(void)
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector)
static void __init teardown_subsystems(void)
static void cpu_hyp_init_context(void)
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
static void * hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]
static void __init hyp_cpu_pm_init(void)
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
static unsigned long nvhe_percpu_order(void)
struct kvm * kvm_arch_alloc_vm(void)
static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
static void __init init_cpu_logical_map(void)
#define init_psci_0_1_impl_state(config, what)
static unsigned long nvhe_percpu_size(void)
void kvm_arm_halt_guest(struct kvm *kvm)
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
static void __init teardown_hyp_mode(void)
static unsigned long system_supported_vcpu_features(void)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
static void hyp_install_host_vector(void)
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
static void pkvm_hyp_init_ptrauth(void)
static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
static void cpu_init_hyp_mode(void)
int kvm_arch_hardware_enable(void)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
early_param("kvm-arm.mode", early_kvm_mode_cfg)
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod)
static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params)
static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
void kvm_arch_hardware_disable(void)
void kvm_arch_destroy_vm(struct kvm *kvm)
module_init(kvm_arm_init)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
enum kvm_mode kvm_get_mode(void)
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
static enum kvm_mode kvm_mode
static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
static void cpu_hyp_reset(void)
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod)
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
static void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu)
static bool kvm_arm_support_pmu_v3(void)
#define kvm_vcpu_has_pmu(vcpu)
#define irqchip_in_kernel(k)
static int kvm_vgic_get_max_vcpus(void)
#define vgic_initialized(k)
#define VGIC_NR_PRIVATE_IRQS
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
void kvm_arm_init_debug(void)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
static unsigned long base
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
u32 __attribute_const__ kvm_target_cpu(void)
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, struct kvm_arm_copy_mte_tags *copy_tags)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
u64 cpu_logical_map(unsigned int cpu)
u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS]
unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS]
unsigned int kvm_arm_vmid_bits
unsigned long __icache_flags
u64 id_aa64isar2_el1_sys_val
u64 id_aa64pfr0_el1_sys_val
u64 id_aa64pfr1_el1_sys_val
u64 id_aa64mmfr0_el1_sys_val
u64 id_aa64mmfr2_el1_sys_val
u64 id_aa64mmfr1_el1_sys_val
u64 id_aa64isar1_el1_sys_val
u64 id_aa64isar0_el1_sys_val
u64 id_aa64smfr0_el1_sys_val
void kvm_arm_teardown_hypercalls(struct kvm *kvm)
void kvm_arm_init_hypercalls(struct kvm *kvm)
int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
bool kvm_are_all_memslots_empty(struct kvm *kvm)
void kvm_destroy_vcpus(struct kvm *kvm)
void vcpu_put(struct kvm_vcpu *vcpu)
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
void vcpu_load(struct kvm_vcpu *vcpu)
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
phys_addr_t kvm_mmu_get_httbr(void)
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void **haddr)
int kvm_share_hyp(void *from, void *to)
phys_addr_t kvm_get_idmap_vector(void)
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
int __init kvm_mmu_init(u32 *hyp_va_bits)
void kvm_unshare_hyp(void *from, void *to)
void stage2_unmap_vm(struct kvm *kvm)
void __init free_hyp_pgds(void)
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
int kvm_init_nv_sysregs(struct kvm *kvm)
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
int pkvm_create_hyp_vm(struct kvm *host_kvm)
int pkvm_init_host_vm(struct kvm *host_kvm)
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
int kvm_arm_set_default_pmu(struct kvm *kvm)
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
struct kvm_host_psci_config __ro_after_init kvm_host_psci_config
bool kvm_arm_pvtime_supported(void)
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
int __init kvm_arm_init_sve(void)
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
int __init kvm_set_ipa_limit(void)
u32 get_kvm_ipa_limit(void)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, unsigned long *per_cpu_base, u32 hyp_va_bits)
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
int __init kvm_sys_reg_table_init(void)
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_vgic_cpu_down(void)
int kvm_vgic_create(struct kvm *kvm, u32 type)
void kvm_vgic_cpu_up(void)
int kvm_vgic_map_resources(struct kvm *kvm)
void kvm_vgic_early_init(struct kvm *kvm)
int kvm_vgic_hyp_init(void)
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_vgic_init_cpu_hardware(void)
void kvm_vgic_destroy(struct kvm *kvm)
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, struct kvm_kernel_irq_routing_entry *irq_entry)
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, struct kvm_kernel_irq_routing_entry *irq_entry)
int vgic_v4_load(struct kvm_vcpu *vcpu)
int vgic_v4_put(struct kvm_vcpu *vcpu)
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
void kvm_vgic_load(struct kvm_vcpu *vcpu)
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned int intid, bool level, void *owner)
void kvm_vgic_put(struct kvm_vcpu *vcpu)
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
void kvm_arm_vmid_clear_active(void)
void __init kvm_arm_vmid_alloc_free(void)
int __init kvm_arm_vmid_alloc_init(void)