18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
54 #include <linux/suspend.h>
56 #include <asm/processor.h>
57 #include <asm/ioctl.h>
58 #include <linux/uaccess.h>
65 #include <trace/events/ipi.h>
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/kvm.h>
70 #include <linux/kvm_dirty_ring.h>
74 #define ITOA_MAX_LEN 12
120 #ifdef CONFIG_KVM_COMPAT
121 static long kvm_vcpu_compat_ioctl(
struct file *file,
unsigned int ioctl,
123 #define KVM_COMPAT(c) .compat_ioctl = (c)
133 unsigned long arg) {
return -EINVAL; }
137 return is_compat_task() ? -ENODEV : 0;
139 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
140 .open = kvm_no_compat_open
147 #define KVM_EVENT_CREATE_VM 0
148 #define KVM_EVENT_DESTROY_VM 1
167 if (WARN_ON_ONCE(!page_count(page)))
170 return is_zone_device_page(page);
186 page = pfn_to_page(pfn);
187 if (!PageReserved(page))
191 if (is_zero_pfn(pfn))
212 __this_cpu_write(kvm_running_vcpu, vcpu);
213 preempt_notifier_register(&vcpu->preempt_notifier);
223 preempt_notifier_unregister(&vcpu->preempt_notifier);
224 __this_cpu_write(kvm_running_vcpu, NULL);
232 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
238 if (req & KVM_REQUEST_WAIT)
239 return mode != OUTSIDE_GUEST_MODE;
244 return mode == IN_GUEST_MODE;
253 if (cpumask_empty(cpus))
256 smp_call_function_many(cpus,
ack_kick, NULL, wait);
261 struct cpumask *tmp,
int current_cpu)
265 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
266 __kvm_make_request(req, vcpu);
282 cpu = READ_ONCE(vcpu->cpu);
283 if (cpu != -1 && cpu != current_cpu)
284 __cpumask_set_cpu(cpu, tmp);
289 unsigned long *vcpu_bitmap)
291 struct kvm_vcpu *vcpu;
292 struct cpumask *cpus;
298 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
301 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
302 vcpu = kvm_get_vcpu(kvm, i);
315 struct kvm_vcpu *except)
317 struct kvm_vcpu *vcpu;
318 struct cpumask *cpus;
325 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
328 kvm_for_each_vcpu(i, vcpu, kvm) {
348 ++kvm->stat.generic.remote_tlb_flush_requests;
363 ++kvm->stat.generic.remote_tlb_flush;
381 const struct kvm_memory_slot *memslot)
390 lockdep_assert_held(&kvm->slots_lock);
400 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
401 static inline void *mmu_memory_cache_alloc_obj(
struct kvm_mmu_memory_cache *mc,
404 gfp_flags |= mc->gfp_zero;
407 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
409 return (
void *)__get_free_page(gfp_flags);
412 int __kvm_mmu_topup_memory_cache(
struct kvm_mmu_memory_cache *mc,
int capacity,
int min)
414 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
417 if (mc->nobjs >= min)
420 if (unlikely(!mc->objects)) {
421 if (WARN_ON_ONCE(!capacity))
424 mc->objects = kvmalloc_array(
sizeof(
void *), capacity, gfp);
428 mc->capacity = capacity;
432 if (WARN_ON_ONCE(mc->capacity != capacity))
435 while (mc->nobjs < mc->capacity) {
436 obj = mmu_memory_cache_alloc_obj(mc, gfp);
438 return mc->nobjs >= min ? 0 : -ENOMEM;
439 mc->objects[mc->nobjs++] = obj;
444 int kvm_mmu_topup_memory_cache(
struct kvm_mmu_memory_cache *mc,
int min)
446 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
449 int kvm_mmu_memory_cache_nr_free_objects(
struct kvm_mmu_memory_cache *mc)
454 void kvm_mmu_free_memory_cache(
struct kvm_mmu_memory_cache *mc)
458 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
460 free_page((
unsigned long)mc->objects[--mc->nobjs]);
469 void *kvm_mmu_memory_cache_alloc(
struct kvm_mmu_memory_cache *mc)
473 if (WARN_ON(!mc->nobjs))
474 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
476 p = mc->objects[--mc->nobjs];
482 static void kvm_vcpu_init(
struct kvm_vcpu *vcpu,
struct kvm *kvm,
unsigned id)
484 mutex_init(&vcpu->mutex);
489 #ifndef __KVM_HAVE_ARCH_WQP
490 rcuwait_init(&vcpu->wait);
494 kvm_vcpu_set_in_spin_loop(vcpu,
false);
495 kvm_vcpu_set_dy_eligible(vcpu,
false);
496 vcpu->preempted =
false;
499 vcpu->last_used_slot = NULL;
502 snprintf(vcpu->stats_id,
sizeof(vcpu->stats_id),
"kvm-%d/vcpu-%d",
503 task_pid_nr(current),
id);
516 put_pid(rcu_dereference_protected(vcpu->pid, 1));
518 free_page((
unsigned long)vcpu->run);
525 struct kvm_vcpu *vcpu;
527 kvm_for_each_vcpu(i, vcpu, kvm) {
529 xa_erase(&kvm->vcpu_array, i);
532 atomic_set(&kvm->online_vcpus, 0);
536 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
537 static inline struct kvm *mmu_notifier_to_kvm(
struct mmu_notifier *mn)
539 return container_of(mn,
struct kvm, mmu_notifier);
542 typedef bool (*gfn_handler_t)(
struct kvm *kvm,
struct kvm_gfn_range *range);
544 typedef void (*on_lock_fn_t)(
struct kvm *kvm);
546 struct kvm_mmu_notifier_range {
553 union kvm_mmu_notifier_arg arg;
554 gfn_handler_t handler;
555 on_lock_fn_t on_lock;
568 typedef struct kvm_mmu_notifier_return {
580 static void kvm_null_fn(
void)
584 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
586 static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
589 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
590 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
592 node = interval_tree_iter_next(node, start, last)) \
594 static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(
struct kvm *kvm,
595 const struct kvm_mmu_notifier_range *range)
597 struct kvm_mmu_notifier_return r = {
599 .found_memslot =
false,
601 struct kvm_gfn_range gfn_range;
602 struct kvm_memory_slot *slot;
603 struct kvm_memslots *slots;
606 if (WARN_ON_ONCE(range->end <= range->start))
610 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
611 IS_KVM_NULL_FN(range->handler)))
614 idx = srcu_read_lock(&kvm->srcu);
616 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
617 struct interval_tree_node *node;
619 slots = __kvm_memslots(kvm, i);
620 kvm_for_each_memslot_in_hva_range(node, slots,
621 range->start, range->end - 1) {
622 unsigned long hva_start, hva_end;
624 slot = container_of(node,
struct kvm_memory_slot, hva_node[slots->node_idx]);
625 hva_start = max_t(
unsigned long, range->start, slot->userspace_addr);
626 hva_end = min_t(
unsigned long, range->end,
627 slot->userspace_addr + (slot->npages << PAGE_SHIFT));
635 gfn_range.arg = range->arg;
636 gfn_range.may_block = range->may_block;
642 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
643 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
644 gfn_range.slot = slot;
646 if (!r.found_memslot) {
647 r.found_memslot =
true;
649 if (!IS_KVM_NULL_FN(range->on_lock))
652 if (IS_KVM_NULL_FN(range->handler))
655 r.ret |= range->handler(kvm, &gfn_range);
659 if (range->flush_on_ret && r.ret)
665 srcu_read_unlock(&kvm->srcu, idx);
670 static __always_inline
int kvm_handle_hva_range(
struct mmu_notifier *mn,
673 union kvm_mmu_notifier_arg arg,
674 gfn_handler_t handler)
676 struct kvm *kvm = mmu_notifier_to_kvm(mn);
677 const struct kvm_mmu_notifier_range range = {
682 .on_lock = (
void *)kvm_null_fn,
683 .flush_on_ret =
true,
687 return __kvm_handle_hva_range(kvm, &range).ret;
690 static __always_inline
int kvm_handle_hva_range_no_flush(
struct mmu_notifier *mn,
693 gfn_handler_t handler)
695 struct kvm *kvm = mmu_notifier_to_kvm(mn);
696 const struct kvm_mmu_notifier_range range = {
700 .on_lock = (
void *)kvm_null_fn,
701 .flush_on_ret =
false,
705 return __kvm_handle_hva_range(kvm, &range).ret;
708 static bool kvm_change_spte_gfn(
struct kvm *kvm,
struct kvm_gfn_range *range)
717 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
719 if (range->slot->flags & KVM_MEMSLOT_INVALID)
725 static void kvm_mmu_notifier_change_pte(
struct mmu_notifier *mn,
726 struct mm_struct *mm,
727 unsigned long address,
730 struct kvm *kvm = mmu_notifier_to_kvm(mn);
731 const union kvm_mmu_notifier_arg arg = { .pte = pte };
733 trace_kvm_set_spte_hva(address);
743 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
744 if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
747 kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
750 void kvm_mmu_invalidate_begin(
struct kvm *kvm)
752 lockdep_assert_held_write(&kvm->mmu_lock);
758 kvm->mmu_invalidate_in_progress++;
760 if (likely(kvm->mmu_invalidate_in_progress == 1)) {
761 kvm->mmu_invalidate_range_start = INVALID_GPA;
762 kvm->mmu_invalidate_range_end = INVALID_GPA;
766 void kvm_mmu_invalidate_range_add(
struct kvm *kvm, gfn_t start, gfn_t
end)
768 lockdep_assert_held_write(&kvm->mmu_lock);
770 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
772 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
773 kvm->mmu_invalidate_range_start = start;
774 kvm->mmu_invalidate_range_end =
end;
785 kvm->mmu_invalidate_range_start =
786 min(kvm->mmu_invalidate_range_start, start);
787 kvm->mmu_invalidate_range_end =
788 max(kvm->mmu_invalidate_range_end,
end);
792 bool kvm_mmu_unmap_gfn_range(
struct kvm *kvm,
struct kvm_gfn_range *range)
794 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
798 static int kvm_mmu_notifier_invalidate_range_start(
struct mmu_notifier *mn,
799 const struct mmu_notifier_range *range)
801 struct kvm *kvm = mmu_notifier_to_kvm(mn);
802 const struct kvm_mmu_notifier_range hva_range = {
803 .start = range->start,
805 .handler = kvm_mmu_unmap_gfn_range,
806 .on_lock = kvm_mmu_invalidate_begin,
807 .flush_on_ret =
true,
808 .may_block = mmu_notifier_range_blockable(range),
811 trace_kvm_unmap_hva_range(range->start, range->end);
821 spin_lock(&kvm->mn_invalidate_lock);
822 kvm->mn_active_invalidate_count++;
823 spin_unlock(&kvm->mn_invalidate_lock);
836 hva_range.may_block);
843 if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
849 void kvm_mmu_invalidate_end(
struct kvm *kvm)
851 lockdep_assert_held_write(&kvm->mmu_lock);
858 kvm->mmu_invalidate_seq++;
865 kvm->mmu_invalidate_in_progress--;
866 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
872 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
875 static void kvm_mmu_notifier_invalidate_range_end(
struct mmu_notifier *mn,
876 const struct mmu_notifier_range *range)
878 struct kvm *kvm = mmu_notifier_to_kvm(mn);
879 const struct kvm_mmu_notifier_range hva_range = {
880 .start = range->start,
882 .handler = (
void *)kvm_null_fn,
883 .on_lock = kvm_mmu_invalidate_end,
884 .flush_on_ret =
false,
885 .may_block = mmu_notifier_range_blockable(range),
889 __kvm_handle_hva_range(kvm, &hva_range);
892 spin_lock(&kvm->mn_invalidate_lock);
893 wake = (--kvm->mn_active_invalidate_count == 0);
894 spin_unlock(&kvm->mn_invalidate_lock);
901 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
904 static int kvm_mmu_notifier_clear_flush_young(
struct mmu_notifier *mn,
905 struct mm_struct *mm,
909 trace_kvm_age_hva(start,
end);
911 return kvm_handle_hva_range(mn, start,
end, KVM_MMU_NOTIFIER_NO_ARG,
915 static int kvm_mmu_notifier_clear_young(
struct mmu_notifier *mn,
916 struct mm_struct *mm,
920 trace_kvm_age_hva(start,
end);
935 return kvm_handle_hva_range_no_flush(mn, start,
end,
kvm_age_gfn);
938 static int kvm_mmu_notifier_test_young(
struct mmu_notifier *mn,
939 struct mm_struct *mm,
940 unsigned long address)
942 trace_kvm_test_age_hva(address);
944 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
948 static void kvm_mmu_notifier_release(
struct mmu_notifier *mn,
949 struct mm_struct *mm)
951 struct kvm *kvm = mmu_notifier_to_kvm(mn);
954 idx = srcu_read_lock(&kvm->srcu);
956 srcu_read_unlock(&kvm->srcu, idx);
959 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
960 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
961 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
962 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
963 .clear_young = kvm_mmu_notifier_clear_young,
964 .test_young = kvm_mmu_notifier_test_young,
965 .change_pte = kvm_mmu_notifier_change_pte,
966 .release = kvm_mmu_notifier_release,
971 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
972 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
984 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
985 static int kvm_pm_notifier_call(
struct notifier_block *bl,
989 struct kvm *kvm = container_of(bl,
struct kvm, pm_notifier);
991 return kvm_arch_pm_notifier(kvm, state);
996 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
998 kvm->pm_notifier.priority = INT_MAX;
999 register_pm_notifier(&kvm->pm_notifier);
1004 unregister_pm_notifier(&kvm->pm_notifier);
1018 if (!memslot->dirty_bitmap)
1021 kvfree(memslot->dirty_bitmap);
1022 memslot->dirty_bitmap = NULL;
1028 if (slot->flags & KVM_MEM_GUEST_MEMFD)
1040 struct hlist_node *idnode;
1041 struct kvm_memory_slot *memslot;
1050 if (!slots->node_idx)
1053 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1059 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1060 case KVM_STATS_TYPE_INSTANT:
1062 case KVM_STATS_TYPE_CUMULATIVE:
1063 case KVM_STATS_TYPE_PEAK:
1076 if (IS_ERR(kvm->debugfs_dentry))
1079 debugfs_remove_recursive(kvm->debugfs_dentry);
1081 if (kvm->debugfs_stat_data) {
1082 for (i = 0; i < kvm_debugfs_num_entries; i++)
1083 kfree(kvm->debugfs_stat_data[i]);
1084 kfree(kvm->debugfs_stat_data);
1091 struct dentry *dent;
1093 struct kvm_stat_data *stat_data;
1094 const struct _kvm_stats_desc *pdesc;
1095 int i, ret = -ENOMEM;
1099 if (!debugfs_initialized())
1102 snprintf(dir_name,
sizeof(dir_name),
"%d-%s", task_pid_nr(current), fdname);
1103 mutex_lock(&kvm_debugfs_lock);
1106 pr_warn_ratelimited(
"KVM: debugfs: duplicate directory %s\n", dir_name);
1108 mutex_unlock(&kvm_debugfs_lock);
1112 mutex_unlock(&kvm_debugfs_lock);
1116 kvm->debugfs_dentry = dent;
1117 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1118 sizeof(*kvm->debugfs_stat_data),
1119 GFP_KERNEL_ACCOUNT);
1120 if (!kvm->debugfs_stat_data)
1125 stat_data = kzalloc(
sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1129 stat_data->kvm = kvm;
1130 stat_data->desc = pdesc;
1131 stat_data->kind = KVM_STAT_VM;
1132 kvm->debugfs_stat_data[i] = stat_data;
1134 kvm->debugfs_dentry, stat_data,
1140 stat_data = kzalloc(
sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1144 stat_data->kvm = kvm;
1145 stat_data->desc = pdesc;
1146 stat_data->kind = KVM_STAT_VCPU;
1149 kvm->debugfs_dentry, stat_data,
1194 struct kvm_memslots *slots;
1199 return ERR_PTR(-ENOMEM);
1202 mmgrab(current->mm);
1203 kvm->mm = current->mm;
1205 mutex_init(&kvm->lock);
1206 mutex_init(&kvm->irq_lock);
1207 mutex_init(&kvm->slots_lock);
1208 mutex_init(&kvm->slots_arch_lock);
1209 spin_lock_init(&kvm->mn_invalidate_lock);
1210 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1211 xa_init(&kvm->vcpu_array);
1212 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1213 xa_init(&kvm->mem_attr_array);
1216 INIT_LIST_HEAD(&kvm->gpc_list);
1217 spin_lock_init(&kvm->gpc_lock);
1219 INIT_LIST_HEAD(&kvm->devices);
1220 kvm->max_vcpus = KVM_MAX_VCPUS;
1222 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1228 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1230 snprintf(kvm->stats_id,
sizeof(kvm->stats_id),
"kvm-%d",
1231 task_pid_nr(current));
1233 if (init_srcu_struct(&kvm->srcu))
1234 goto out_err_no_srcu;
1235 if (init_srcu_struct(&kvm->irq_srcu))
1236 goto out_err_no_irq_srcu;
1238 refcount_set(&kvm->users_count, 1);
1239 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1240 for (j = 0; j < 2; j++) {
1241 slots = &kvm->__memslots[i][j];
1243 atomic_long_set(&slots->last_used_slot, (
unsigned long)NULL);
1244 slots->hva_tree = RB_ROOT_CACHED;
1245 slots->gfn_tree = RB_ROOT;
1246 hash_init(slots->id_hash);
1247 slots->node_idx = j;
1250 slots->generation = i;
1253 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1256 for (i = 0; i < KVM_NR_BUSES; i++) {
1257 rcu_assign_pointer(kvm->buses[i],
1258 kzalloc(
sizeof(
struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1260 goto out_err_no_arch_destroy_vm;
1265 goto out_err_no_arch_destroy_vm;
1269 goto out_err_no_disable;
1271 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1272 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1277 goto out_err_no_mmu_notifier;
1281 goto out_no_coalesced_mmio;
1285 goto out_err_no_debugfs;
1291 mutex_lock(&kvm_lock);
1292 list_add(&kvm->vm_list, &vm_list);
1293 mutex_unlock(&kvm_lock);
1295 preempt_notifier_inc();
1304 out_no_coalesced_mmio:
1305 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1306 if (kvm->mmu_notifier.ops)
1307 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1309 out_err_no_mmu_notifier:
1313 out_err_no_arch_destroy_vm:
1314 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1315 for (i = 0; i < KVM_NR_BUSES; i++)
1316 kfree(kvm_get_bus(kvm, i));
1317 cleanup_srcu_struct(&kvm->irq_srcu);
1318 out_err_no_irq_srcu:
1319 cleanup_srcu_struct(&kvm->srcu);
1321 kvm_arch_free_vm(kvm);
1322 mmdrop(current->mm);
1328 struct kvm_device *dev, *tmp;
1335 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1336 list_del(&dev->vm_node);
1337 dev->ops->destroy(dev);
1344 struct mm_struct *mm = kvm->mm;
1349 kvm_arch_sync_events(kvm);
1350 mutex_lock(&kvm_lock);
1351 list_del(&kvm->vm_list);
1352 mutex_unlock(&kvm_lock);
1356 for (i = 0; i < KVM_NR_BUSES; i++) {
1357 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1361 kvm->buses[i] = NULL;
1364 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1365 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1378 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1379 if (kvm->mn_active_invalidate_count)
1380 kvm->mn_active_invalidate_count = 0;
1382 WARN_ON(kvm->mmu_invalidate_in_progress);
1388 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1392 cleanup_srcu_struct(&kvm->irq_srcu);
1393 cleanup_srcu_struct(&kvm->srcu);
1394 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1395 xa_destroy(&kvm->mem_attr_array);
1397 kvm_arch_free_vm(kvm);
1398 preempt_notifier_dec();
1405 refcount_inc(&kvm->users_count);
1415 return refcount_inc_not_zero(&kvm->users_count);
1421 if (refcount_dec_and_test(&kvm->users_count))
1435 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1441 struct kvm *kvm = filp->private_data;
1443 kvm_irqfd_release(kvm);
1455 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1457 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1458 if (!memslot->dirty_bitmap)
1466 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1467 int node_idx_inactive = active->node_idx ^ 1;
1469 return &kvm->__memslots[as_id][node_idx_inactive];
1478 struct kvm_memory_slot *b)
1480 if (WARN_ON_ONCE(!a && !b))
1488 WARN_ON_ONCE(a->as_id != b->as_id);
1493 struct kvm_memory_slot *slot)
1495 struct rb_root *gfn_tree = &slots->gfn_tree;
1496 struct rb_node **node, *parent;
1497 int idx = slots->node_idx;
1500 for (node = &gfn_tree->rb_node; *node; ) {
1501 struct kvm_memory_slot *tmp;
1503 tmp = container_of(*node,
struct kvm_memory_slot, gfn_node[idx]);
1505 if (slot->base_gfn < tmp->base_gfn)
1506 node = &(*node)->rb_left;
1507 else if (slot->base_gfn > tmp->base_gfn)
1508 node = &(*node)->rb_right;
1513 rb_link_node(&slot->gfn_node[idx], parent, node);
1514 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1518 struct kvm_memory_slot *slot)
1520 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1524 struct kvm_memory_slot *old,
1525 struct kvm_memory_slot *
new)
1527 int idx = slots->node_idx;
1529 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1531 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1545 struct kvm_memory_slot *old,
1546 struct kvm_memory_slot *
new)
1550 int idx = slots->node_idx;
1553 hash_del(&old->id_node[idx]);
1554 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1556 if ((
long)old == atomic_long_read(&slots->last_used_slot))
1557 atomic_long_set(&slots->last_used_slot, (
long)
new);
1569 new->hva_node[idx].start =
new->userspace_addr;
1570 new->hva_node[idx].last =
new->userspace_addr +
1571 (
new->npages << PAGE_SHIFT) - 1;
1578 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1579 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1588 if (old && old->base_gfn == new->base_gfn) {
1602 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1603 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1606 const struct kvm_userspace_memory_region2 *mem)
1608 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1610 if (kvm_arch_has_private_mem(kvm))
1611 valid_flags |= KVM_MEM_GUEST_MEMFD;
1614 if (mem->flags & KVM_MEM_GUEST_MEMFD)
1615 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1617 #ifdef __KVM_HAVE_READONLY_MEM
1623 if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
1624 valid_flags |= KVM_MEM_READONLY;
1627 if (mem->flags & ~valid_flags)
1638 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1640 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1641 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1648 spin_lock(&kvm->mn_invalidate_lock);
1649 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1650 while (kvm->mn_active_invalidate_count) {
1651 set_current_state(TASK_UNINTERRUPTIBLE);
1652 spin_unlock(&kvm->mn_invalidate_lock);
1654 spin_lock(&kvm->mn_invalidate_lock);
1656 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1657 rcu_assign_pointer(kvm->memslots[as_id], slots);
1658 spin_unlock(&kvm->mn_invalidate_lock);
1665 mutex_unlock(&kvm->slots_arch_lock);
1667 synchronize_srcu_expedited(&kvm->srcu);
1675 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1684 gen += kvm_arch_nr_memslot_as_ids(kvm);
1688 slots->generation = gen;
1692 const struct kvm_memory_slot *old,
1693 struct kvm_memory_slot *
new,
1694 enum kvm_mr_change change)
1705 if (change != KVM_MR_DELETE) {
1706 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1707 new->dirty_bitmap = NULL;
1708 else if (old && old->dirty_bitmap)
1709 new->dirty_bitmap = old->dirty_bitmap;
1715 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1716 bitmap_set(new->dirty_bitmap, 0, new->npages);
1723 if (r &&
new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1730 struct kvm_memory_slot *old,
1731 const struct kvm_memory_slot *
new,
1732 enum kvm_mr_change change)
1734 int old_flags = old ? old->flags : 0;
1735 int new_flags =
new ?
new->flags : 0;
1740 if (change == KVM_MR_DELETE)
1741 kvm->nr_memslot_pages -= old->npages;
1742 else if (change == KVM_MR_CREATE)
1743 kvm->nr_memslot_pages +=
new->npages;
1745 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1746 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1747 atomic_set(&kvm->nr_memslots_dirty_logging,
1748 atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1762 case KVM_MR_FLAGS_ONLY:
1767 if (old->dirty_bitmap && !new->dirty_bitmap)
1792 struct kvm_memory_slot *old,
1793 struct kvm_memory_slot *
new)
1804 const struct kvm_memory_slot *src)
1806 dest->base_gfn = src->base_gfn;
1807 dest->npages = src->npages;
1808 dest->dirty_bitmap = src->dirty_bitmap;
1809 dest->arch = src->arch;
1810 dest->userspace_addr = src->userspace_addr;
1811 dest->flags = src->flags;
1813 dest->as_id = src->as_id;
1817 struct kvm_memory_slot *old,
1818 struct kvm_memory_slot *invalid_slot)
1826 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1846 mutex_lock(&kvm->slots_arch_lock);
1855 old->arch = invalid_slot->arch;
1859 struct kvm_memory_slot *
new)
1867 struct kvm_memory_slot *old,
1868 struct kvm_memory_slot *invalid_slot)
1879 struct kvm_memory_slot *old,
1880 struct kvm_memory_slot *
new,
1881 struct kvm_memory_slot *invalid_slot)
1892 struct kvm_memory_slot *old,
1893 struct kvm_memory_slot *
new)
1905 struct kvm_memory_slot *old,
1906 struct kvm_memory_slot *
new,
1907 enum kvm_mr_change change)
1909 struct kvm_memory_slot *invalid_slot;
1926 mutex_lock(&kvm->slots_arch_lock);
1941 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1942 invalid_slot = kzalloc(
sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1943 if (!invalid_slot) {
1944 mutex_unlock(&kvm->slots_arch_lock);
1958 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1960 kfree(invalid_slot);
1962 mutex_unlock(&kvm->slots_arch_lock);
1974 if (change == KVM_MR_CREATE)
1976 else if (change == KVM_MR_DELETE)
1978 else if (change == KVM_MR_MOVE)
1980 else if (change == KVM_MR_FLAGS_ONLY)
1986 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1987 kfree(invalid_slot);
2000 gfn_t start, gfn_t
end)
2002 struct kvm_memslot_iter iter;
2004 kvm_for_each_memslot_in_gfn_range(&iter, slots, start,
end) {
2005 if (iter.slot->id !=
id)
2021 const struct kvm_userspace_memory_region2 *mem)
2023 struct kvm_memory_slot *old, *
new;
2024 struct kvm_memslots *slots;
2025 enum kvm_mr_change change;
2026 unsigned long npages;
2035 as_id = mem->slot >> 16;
2036 id = (u16)mem->slot;
2039 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2040 (mem->memory_size != (
unsigned long)mem->memory_size))
2042 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2045 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2046 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2047 !access_ok((
void __user *)(
unsigned long)mem->userspace_addr,
2050 if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2051 (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2052 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2054 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) ||
id >= KVM_MEM_SLOTS_NUM)
2056 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2058 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2061 slots = __kvm_memslots(kvm, as_id);
2067 old = id_to_memslot(slots,
id);
2069 if (!mem->memory_size) {
2070 if (!old || !old->npages)
2073 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2079 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2080 npages = (mem->memory_size >> PAGE_SHIFT);
2082 if (!old || !old->npages) {
2083 change = KVM_MR_CREATE;
2089 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2093 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2095 if ((mem->userspace_addr != old->userspace_addr) ||
2096 (npages != old->npages) ||
2097 ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2100 if (base_gfn != old->base_gfn)
2101 change = KVM_MR_MOVE;
2102 else if (mem->flags != old->flags)
2103 change = KVM_MR_FLAGS_ONLY;
2108 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2113 new = kzalloc(
sizeof(*
new), GFP_KERNEL_ACCOUNT);
2119 new->base_gfn = base_gfn;
2120 new->npages = npages;
2121 new->flags = mem->flags;
2122 new->userspace_addr = mem->userspace_addr;
2123 if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2124 r =
kvm_gmem_bind(kvm,
new, mem->guest_memfd, mem->guest_memfd_offset);
2136 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2145 const struct kvm_userspace_memory_region2 *mem)
2149 mutex_lock(&kvm->slots_lock);
2151 mutex_unlock(&kvm->slots_lock);
2157 struct kvm_userspace_memory_region2 *mem)
2159 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2165 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2174 int *is_dirty,
struct kvm_memory_slot **memslot)
2176 struct kvm_memslots *slots;
2179 unsigned long any = 0;
2188 as_id = log->slot >> 16;
2189 id = (u16)log->slot;
2190 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) ||
id >= KVM_USER_MEM_SLOTS)
2193 slots = __kvm_memslots(kvm, as_id);
2194 *memslot = id_to_memslot(slots,
id);
2195 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2200 n = kvm_dirty_bitmap_bytes(*memslot);
2202 for (i = 0; !any && i < n/
sizeof(long); ++i)
2203 any = (*memslot)->dirty_bitmap[i];
2205 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2236 static int kvm_get_dirty_log_protect(
struct kvm *kvm,
struct kvm_dirty_log *log)
2238 struct kvm_memslots *slots;
2239 struct kvm_memory_slot *memslot;
2242 unsigned long *dirty_bitmap;
2243 unsigned long *dirty_bitmap_buffer;
2250 as_id = log->slot >> 16;
2251 id = (u16)log->slot;
2252 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) ||
id >= KVM_USER_MEM_SLOTS)
2255 slots = __kvm_memslots(kvm, as_id);
2256 memslot = id_to_memslot(slots,
id);
2257 if (!memslot || !memslot->dirty_bitmap)
2260 dirty_bitmap = memslot->dirty_bitmap;
2264 n = kvm_dirty_bitmap_bytes(memslot);
2266 if (kvm->manual_dirty_log_protect) {
2275 dirty_bitmap_buffer = dirty_bitmap;
2277 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2278 memset(dirty_bitmap_buffer, 0, n);
2281 for (i = 0; i < n /
sizeof(long); i++) {
2285 if (!dirty_bitmap[i])
2289 mask = xchg(&dirty_bitmap[i], 0);
2290 dirty_bitmap_buffer[i] = mask;
2292 offset = i * BITS_PER_LONG;
2302 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2327 static int kvm_vm_ioctl_get_dirty_log(
struct kvm *kvm,
2328 struct kvm_dirty_log *log)
2332 mutex_lock(&kvm->slots_lock);
2334 r = kvm_get_dirty_log_protect(kvm, log);
2336 mutex_unlock(&kvm->slots_lock);
2346 static int kvm_clear_dirty_log_protect(
struct kvm *kvm,
2347 struct kvm_clear_dirty_log *log)
2349 struct kvm_memslots *slots;
2350 struct kvm_memory_slot *memslot;
2354 unsigned long *dirty_bitmap;
2355 unsigned long *dirty_bitmap_buffer;
2362 as_id = log->slot >> 16;
2363 id = (u16)log->slot;
2364 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) ||
id >= KVM_USER_MEM_SLOTS)
2367 if (log->first_page & 63)
2370 slots = __kvm_memslots(kvm, as_id);
2371 memslot = id_to_memslot(slots,
id);
2372 if (!memslot || !memslot->dirty_bitmap)
2375 dirty_bitmap = memslot->dirty_bitmap;
2377 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2379 if (log->first_page > memslot->npages ||
2380 log->num_pages > memslot->npages - log->first_page ||
2381 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2387 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2388 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2392 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2393 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2394 i++, offset += BITS_PER_LONG) {
2395 unsigned long mask = *dirty_bitmap_buffer++;
2396 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2400 mask &= atomic_long_fetch_andnot(mask, p);
2422 static int kvm_vm_ioctl_clear_dirty_log(
struct kvm *kvm,
2423 struct kvm_clear_dirty_log *log)
2427 mutex_lock(&kvm->slots_lock);
2429 r = kvm_clear_dirty_log_protect(kvm, log);
2431 mutex_unlock(&kvm->slots_lock);
2436 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2441 bool kvm_range_has_memory_attributes(
struct kvm *kvm, gfn_t start, gfn_t
end,
2442 unsigned long attrs)
2444 XA_STATE(xas, &kvm->mem_attr_array, start);
2445 unsigned long index;
2452 has_attrs = !xas_find(&xas,
end - 1);
2457 for (index = start; index <
end; index++) {
2459 entry = xas_next(&xas);
2460 }
while (xas_retry(&xas, entry));
2462 if (xas.xa_index != index || xa_to_value(entry) != attrs) {
2473 static u64 kvm_supported_mem_attributes(
struct kvm *kvm)
2475 if (!kvm || kvm_arch_has_private_mem(kvm))
2476 return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2481 static __always_inline
void kvm_handle_gfn_range(
struct kvm *kvm,
2482 struct kvm_mmu_notifier_range *range)
2484 struct kvm_gfn_range gfn_range;
2485 struct kvm_memory_slot *slot;
2486 struct kvm_memslots *slots;
2487 struct kvm_memslot_iter iter;
2488 bool found_memslot =
false;
2492 gfn_range.arg = range->arg;
2493 gfn_range.may_block = range->may_block;
2495 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2496 slots = __kvm_memslots(kvm, i);
2498 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2500 gfn_range.slot = slot;
2502 gfn_range.start = max(range->start, slot->base_gfn);
2503 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2504 if (gfn_range.start >= gfn_range.end)
2507 if (!found_memslot) {
2508 found_memslot =
true;
2510 if (!IS_KVM_NULL_FN(range->on_lock))
2511 range->on_lock(kvm);
2514 ret |= range->handler(kvm, &gfn_range);
2518 if (range->flush_on_ret && ret)
2525 static bool kvm_pre_set_memory_attributes(
struct kvm *kvm,
2526 struct kvm_gfn_range *range)
2539 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2541 return kvm_arch_pre_set_memory_attributes(kvm, range);
2545 static int kvm_vm_set_mem_attributes(
struct kvm *kvm, gfn_t start, gfn_t
end,
2546 unsigned long attributes)
2548 struct kvm_mmu_notifier_range pre_set_range = {
2551 .handler = kvm_pre_set_memory_attributes,
2552 .on_lock = kvm_mmu_invalidate_begin,
2553 .flush_on_ret =
true,
2556 struct kvm_mmu_notifier_range post_set_range = {
2559 .arg.attributes = attributes,
2560 .handler = kvm_arch_post_set_memory_attributes,
2561 .on_lock = kvm_mmu_invalidate_end,
2568 entry = attributes ? xa_mk_value(attributes) : NULL;
2570 mutex_lock(&kvm->slots_lock);
2573 if (kvm_range_has_memory_attributes(kvm, start,
end, attributes))
2580 for (i = start; i <
end; i++) {
2581 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2586 kvm_handle_gfn_range(kvm, &pre_set_range);
2588 for (i = start; i <
end; i++) {
2589 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2590 GFP_KERNEL_ACCOUNT));
2594 kvm_handle_gfn_range(kvm, &post_set_range);
2597 mutex_unlock(&kvm->slots_lock);
2601 static int kvm_vm_ioctl_set_mem_attributes(
struct kvm *kvm,
2602 struct kvm_memory_attributes *attrs)
2609 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2611 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2613 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2616 start = attrs->address >> PAGE_SHIFT;
2617 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2624 BUILD_BUG_ON(
sizeof(attrs->attributes) !=
sizeof(
unsigned long));
2626 return kvm_vm_set_mem_attributes(kvm, start,
end, attrs->attributes);
2632 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2638 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2639 u64 gen = slots->generation;
2640 struct kvm_memory_slot *slot;
2646 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2647 vcpu->last_used_slot = NULL;
2648 vcpu->last_used_slot_gen = gen;
2651 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2660 slot = search_memslots(slots, gfn,
false);
2662 vcpu->last_used_slot = slot;
2673 return kvm_is_visible_memslot(memslot);
2681 return kvm_is_visible_memslot(memslot);
2687 struct vm_area_struct *vma;
2688 unsigned long addr,
size;
2693 if (kvm_is_error_hva(addr))
2696 mmap_read_lock(current->mm);
2697 vma = find_vma(current->mm, addr);
2701 size = vma_kernel_pagesize(vma);
2704 mmap_read_unlock(current->mm);
2711 return slot->flags & KVM_MEM_READONLY;
2715 gfn_t *nr_pages,
bool write)
2717 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2718 return KVM_HVA_ERR_BAD;
2721 return KVM_HVA_ERR_RO_BAD;
2724 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2726 return __gfn_to_hva_memslot(slot, gfn);
2763 gfn_t gfn,
bool *writable)
2767 if (!kvm_is_error_hva(hva) && writable)
2789 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2791 rc = get_user_pages(addr, 1, flags, NULL);
2792 return rc == -EHWPOISON;
2801 bool *writable, kvm_pfn_t *pfn)
2803 struct page *page[1];
2810 if (!(write_fault || writable))
2813 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2814 *pfn = page_to_pfn(page[0]);
2829 bool interruptible,
bool *writable, kvm_pfn_t *pfn)
2842 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2849 *writable = write_fault;
2852 flags |= FOLL_WRITE;
2854 flags |= FOLL_NOWAIT;
2856 flags |= FOLL_INTERRUPTIBLE;
2858 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2863 if (unlikely(!write_fault) && writable) {
2866 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2872 *pfn = page_to_pfn(page);
2878 if (unlikely(!(vma->vm_flags & VM_READ)))
2881 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2894 return get_page_unless_zero(page);
2898 unsigned long addr,
bool write_fault,
2899 bool *writable, kvm_pfn_t *p_pfn)
2907 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2913 bool unlocked =
false;
2914 r = fixup_user_fault(current->mm, addr,
2915 (write_fault ? FAULT_FLAG_WRITE : 0),
2922 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2927 pte = ptep_get(ptep);
2929 if (write_fault && !pte_write(pte)) {
2930 pfn = KVM_PFN_ERR_RO_FAULT;
2935 *writable = pte_write(pte);
2959 pte_unmap_unlock(ptep, ptl);
2980 kvm_pfn_t
hva_to_pfn(
unsigned long addr,
bool atomic,
bool interruptible,
2981 bool *async,
bool write_fault,
bool *writable)
2983 struct vm_area_struct *vma;
2988 BUG_ON(atomic && async);
2994 return KVM_PFN_ERR_FAULT;
3000 if (npages == -EINTR)
3001 return KVM_PFN_ERR_SIGPENDING;
3003 mmap_read_lock(current->mm);
3004 if (npages == -EHWPOISON ||
3006 pfn = KVM_PFN_ERR_HWPOISON;
3011 vma = vma_lookup(current->mm, addr);
3014 pfn = KVM_PFN_ERR_FAULT;
3015 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3020 pfn = KVM_PFN_ERR_FAULT;
3024 pfn = KVM_PFN_ERR_FAULT;
3027 mmap_read_unlock(current->mm);
3032 bool atomic,
bool interruptible,
bool *async,
3033 bool write_fault,
bool *writable, hva_t *hva)
3040 if (addr == KVM_HVA_ERR_RO_BAD) {
3043 return KVM_PFN_ERR_RO_FAULT;
3046 if (kvm_is_error_hva(addr)) {
3049 return KVM_PFN_NOSLOT;
3058 return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3067 NULL, write_fault, writable, NULL);
3104 struct page **pages,
int nr_pages)
3110 if (kvm_is_error_hva(addr))
3113 if (entry < nr_pages)
3116 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3133 if (is_error_noslot_pfn(pfn))
3134 return KVM_ERR_PTR_BAD_PAGE;
3138 return KVM_ERR_PTR_BAD_PAGE;
3152 int kvm_vcpu_map(
struct kvm_vcpu *vcpu, gfn_t gfn,
struct kvm_host_map *map)
3156 struct page *page = KVM_UNMAPPED_PAGE;
3162 if (is_error_noslot_pfn(pfn))
3165 if (pfn_valid(pfn)) {
3166 page = pfn_to_page(pfn);
3168 #ifdef CONFIG_HAS_IOMEM
3170 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3194 if (map->page != KVM_UNMAPPED_PAGE)
3196 #ifdef CONFIG_HAS_IOMEM
3217 return !PageReserved(page);
3229 mark_page_accessed(page);
3234 WARN_ON(is_error_page(page));
3245 if (is_error_noslot_pfn(pfn))
3258 WARN_ON(is_error_page(page));
3269 if (is_error_noslot_pfn(pfn))
3287 if (WARN_ON(is_error_noslot_pfn(pfn)))
3297 if (WARN_ON(is_error_noslot_pfn(pfn)))
3307 if (len > PAGE_SIZE - offset)
3308 return PAGE_SIZE - offset;
3314 void *data,
int offset,
int len)
3320 if (kvm_is_error_hva(addr))
3322 r = __copy_from_user(data, (
void __user *)addr + offset, len);
3338 int offset,
int len)
3348 gfn_t gfn = gpa >> PAGE_SHIFT;
3350 int offset = offset_in_page(gpa);
3368 gfn_t gfn = gpa >> PAGE_SHIFT;
3370 int offset = offset_in_page(gpa);
3387 void *data,
int offset,
unsigned long len)
3393 if (kvm_is_error_hva(addr))
3395 pagefault_disable();
3396 r = __copy_from_user_inatomic(data, (
void __user *)addr + offset, len);
3404 void *data,
unsigned long len)
3406 gfn_t gfn = gpa >> PAGE_SHIFT;
3408 int offset = offset_in_page(gpa);
3415 struct kvm_memory_slot *memslot, gfn_t gfn,
3416 const void *data,
int offset,
int len)
3422 if (kvm_is_error_hva(addr))
3424 r = __copy_to_user((
void __user *)addr + offset, data, len);
3432 const void *data,
int offset,
int len)
3441 const void *data,
int offset,
int len)
3452 gfn_t gfn = gpa >> PAGE_SHIFT;
3454 int offset = offset_in_page(gpa);
3473 gfn_t gfn = gpa >> PAGE_SHIFT;
3475 int offset = offset_in_page(gpa);
3492 struct gfn_to_hva_cache *ghc,
3493 gpa_t gpa,
unsigned long len)
3495 int offset = offset_in_page(gpa);
3496 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3497 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3498 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3499 gfn_t nr_pages_avail;
3502 ghc->generation = slots->generation;
3504 if (start_gfn > end_gfn) {
3505 ghc->hva = KVM_HVA_ERR_BAD;
3513 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3514 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3517 if (kvm_is_error_hva(ghc->hva))
3522 if (nr_pages_needed == 1)
3525 ghc->memslot = NULL;
3533 gpa_t gpa,
unsigned long len)
3535 struct kvm_memslots *slots = kvm_memslots(kvm);
3541 void *data,
unsigned int offset,
3544 struct kvm_memslots *slots = kvm_memslots(kvm);
3546 gpa_t gpa = ghc->gpa + offset;
3548 if (WARN_ON_ONCE(len + offset > ghc->len))
3551 if (slots->generation != ghc->generation) {
3556 if (kvm_is_error_hva(ghc->hva))
3559 if (unlikely(!ghc->memslot))
3562 r = __copy_to_user((
void __user *)ghc->hva + offset, data, len);
3572 void *data,
unsigned long len)
3579 void *data,
unsigned int offset,
3582 struct kvm_memslots *slots = kvm_memslots(kvm);
3584 gpa_t gpa = ghc->gpa + offset;
3586 if (WARN_ON_ONCE(len + offset > ghc->len))
3589 if (slots->generation != ghc->generation) {
3594 if (kvm_is_error_hva(ghc->hva))
3597 if (unlikely(!ghc->memslot))
3600 r = __copy_from_user(data, (
void __user *)ghc->hva + offset, len);
3609 void *data,
unsigned long len)
3617 const void *zero_page = (
const void *) __va(page_to_phys(ZERO_PAGE(0)));
3618 gfn_t gfn = gpa >> PAGE_SHIFT;
3620 int offset = offset_in_page(gpa);
3636 const struct kvm_memory_slot *memslot,
3641 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3642 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3648 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3649 unsigned long rel_gfn = gfn - memslot->base_gfn;
3650 u32 slot = (memslot->as_id << 16) | memslot->id;
3652 if (kvm->dirty_ring_size && vcpu)
3654 else if (memslot->dirty_bitmap)
3655 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3662 struct kvm_memory_slot *memslot;
3671 struct kvm_memory_slot *memslot;
3680 if (!vcpu->sigset_active)
3689 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3694 if (!vcpu->sigset_active)
3697 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3698 sigemptyset(¤t->real_blocked);
3703 unsigned int old, val, grow, grow_start;
3705 old = val = vcpu->halt_poll_ns;
3712 if (val < grow_start)
3715 vcpu->halt_poll_ns = val;
3717 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3722 unsigned int old, val, shrink, grow_start;
3724 old = val = vcpu->halt_poll_ns;
3732 if (val < grow_start)
3735 vcpu->halt_poll_ns = val;
3736 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3742 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3748 if (signal_pending(current))
3750 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3755 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3766 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3767 bool waited =
false;
3769 vcpu->stat.generic.blocking = 1;
3773 prepare_to_rcuwait(wait);
3777 set_current_state(TASK_INTERRUPTIBLE);
3787 finish_rcuwait(wait);
3791 vcpu->stat.generic.blocking = 0;
3797 ktime_t
end,
bool success)
3799 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3800 u64 poll_ns = ktime_to_ns(ktime_sub(
end, start));
3802 ++vcpu->stat.generic.halt_attempted_poll;
3805 ++vcpu->stat.generic.halt_successful_poll;
3807 if (!vcpu_valid_wakeup(vcpu))
3808 ++vcpu->stat.generic.halt_poll_invalid;
3810 stats->halt_poll_success_ns += poll_ns;
3811 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3813 stats->halt_poll_fail_ns += poll_ns;
3814 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3820 struct kvm *kvm = vcpu->kvm;
3822 if (kvm->override_halt_poll_ns) {
3830 return READ_ONCE(kvm->max_halt_poll_ns);
3845 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3846 ktime_t start,
cur, poll_end;
3847 bool waited =
false;
3851 if (vcpu->halt_poll_ns > max_halt_poll_ns)
3852 vcpu->halt_poll_ns = max_halt_poll_ns;
3854 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3856 start =
cur = poll_end = ktime_get();
3858 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3864 poll_end =
cur = ktime_get();
3865 }
while (kvm_vcpu_can_poll(
cur, stop));
3872 vcpu->stat.generic.halt_wait_ns +=
3873 ktime_to_ns(
cur) - ktime_to_ns(poll_end);
3874 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3875 ktime_to_ns(
cur) - ktime_to_ns(poll_end));
3879 halt_ns = ktime_to_ns(
cur) - ktime_to_ns(start);
3889 if (halt_poll_allowed) {
3893 if (!vcpu_valid_wakeup(vcpu)) {
3895 }
else if (max_halt_poll_ns) {
3899 else if (vcpu->halt_poll_ns &&
3900 halt_ns > max_halt_poll_ns)
3903 else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3904 halt_ns < max_halt_poll_ns)
3907 vcpu->halt_poll_ns = 0;
3911 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3917 if (__kvm_vcpu_wake_up(vcpu)) {
3918 WRITE_ONCE(vcpu->ready,
true);
3919 ++vcpu->stat.generic.halt_wakeup;
3945 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3946 if (vcpu->mode == IN_GUEST_MODE)
3947 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3959 cpu = READ_ONCE(vcpu->cpu);
3960 if (cpu != me && (
unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3961 smp_send_reschedule(cpu);
3972 struct task_struct *task = NULL;
3976 pid = rcu_dereference(target->pid);
3978 task = get_pid_task(pid, PIDTYPE_PID);
3982 ret = yield_to(task, 1);
3983 put_task_struct(task);
4013 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
4016 eligible = !vcpu->spin_loop.in_spin_loop ||
4017 vcpu->spin_loop.dy_eligible;
4019 if (vcpu->spin_loop.in_spin_loop)
4020 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
4043 #ifdef CONFIG_KVM_ASYNC_PF
4044 if (!list_empty_careful(&vcpu->async_pf.done))
4058 struct kvm *kvm = me->kvm;
4059 struct kvm_vcpu *vcpu;
4060 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
4066 kvm_vcpu_set_in_spin_loop(me,
true);
4074 for (pass = 0; pass < 2 && !yielded &&
try; pass++) {
4075 kvm_for_each_vcpu(i, vcpu, kvm) {
4076 if (!pass && i <= last_boosted_vcpu) {
4077 i = last_boosted_vcpu;
4079 }
else if (pass && i > last_boosted_vcpu)
4081 if (!READ_ONCE(vcpu->ready))
4087 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4096 kvm->last_boosted_vcpu = i;
4098 }
else if (yielded < 0) {
4105 kvm_vcpu_set_in_spin_loop(me,
false);
4108 kvm_vcpu_set_dy_eligible(me,
false);
4114 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4115 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4116 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4117 kvm->dirty_ring_size / PAGE_SIZE);
4125 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4128 if (vmf->pgoff == 0)
4129 page = virt_to_page(vcpu->run);
4131 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4132 page = virt_to_page(vcpu->arch.pio_data);
4134 #ifdef CONFIG_KVM_MMIO
4135 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4136 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4141 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4155 struct kvm_vcpu *vcpu = file->private_data;
4156 unsigned long pages = vma_pages(vma);
4160 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4169 struct kvm_vcpu *vcpu = filp->private_data;
4179 .llseek = noop_llseek,
4190 snprintf(name,
sizeof(name),
"kvm-vcpu:%d", vcpu->vcpu_id);
4191 return anon_inode_getfd(name, &
kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4194 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
4195 static int vcpu_get_pid(
void *data, u64 *val)
4197 struct kvm_vcpu *vcpu = data;
4200 *val = pid_nr(rcu_dereference(vcpu->pid));
4207 static void kvm_create_vcpu_debugfs(
struct kvm_vcpu *vcpu)
4209 struct dentry *debugfs_dentry;
4212 if (!debugfs_initialized())
4215 snprintf(dir_name,
sizeof(dir_name),
"vcpu%d", vcpu->vcpu_id);
4216 debugfs_dentry = debugfs_create_dir(dir_name,
4217 vcpu->kvm->debugfs_dentry);
4218 debugfs_create_file(
"pid", 0444, debugfs_dentry, vcpu,
4219 &vcpu_get_pid_fops);
4221 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4231 struct kvm_vcpu *vcpu;
4234 if (
id >= KVM_MAX_VCPU_IDS)
4237 mutex_lock(&kvm->lock);
4238 if (kvm->created_vcpus >= kvm->max_vcpus) {
4239 mutex_unlock(&kvm->lock);
4245 mutex_unlock(&kvm->lock);
4249 kvm->created_vcpus++;
4250 mutex_unlock(&kvm->lock);
4255 goto vcpu_decrement;
4258 BUILD_BUG_ON(
sizeof(
struct kvm_run) > PAGE_SIZE);
4259 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4264 vcpu->run = page_address(page);
4270 goto vcpu_free_run_page;
4272 if (kvm->dirty_ring_size) {
4274 id, kvm->dirty_ring_size);
4276 goto arch_vcpu_destroy;
4279 mutex_lock(&kvm->lock);
4281 #ifdef CONFIG_LOCKDEP
4283 mutex_lock(&vcpu->mutex);
4284 mutex_unlock(&vcpu->mutex);
4287 if (kvm_get_vcpu_by_id(kvm,
id)) {
4289 goto unlock_vcpu_destroy;
4292 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4293 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4295 goto unlock_vcpu_destroy;
4301 goto kvm_put_xa_release;
4303 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4305 goto kvm_put_xa_release;
4313 atomic_inc(&kvm->online_vcpus);
4315 mutex_unlock(&kvm->lock);
4317 kvm_create_vcpu_debugfs(vcpu);
4322 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4323 unlock_vcpu_destroy:
4324 mutex_unlock(&kvm->lock);
4329 free_page((
unsigned long)vcpu->run);
4333 mutex_lock(&kvm->lock);
4334 kvm->created_vcpus--;
4335 mutex_unlock(&kvm->lock);
4342 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4343 vcpu->sigset_active = 1;
4344 vcpu->sigset = *sigset;
4346 vcpu->sigset_active = 0;
4351 size_t size, loff_t *offset)
4353 struct kvm_vcpu *vcpu = file->private_data;
4357 sizeof(vcpu->stat), user_buffer,
size, offset);
4362 struct kvm_vcpu *vcpu = file->private_data;
4369 .owner = THIS_MODULE,
4372 .llseek = noop_llseek,
4381 snprintf(name,
sizeof(name),
"kvm-vcpu-stats:%d", vcpu->vcpu_id);
4383 fd = get_unused_fd_flags(O_CLOEXEC);
4390 return PTR_ERR(file);
4395 file->f_mode |= FMODE_PREAD;
4396 fd_install(fd, file);
4402 unsigned int ioctl,
unsigned long arg)
4404 struct kvm_vcpu *vcpu = filp->private_data;
4405 void __user *argp = (
void __user *)arg;
4407 struct kvm_fpu *fpu = NULL;
4408 struct kvm_sregs *kvm_sregs = NULL;
4410 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4413 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4420 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4421 if (r != -ENOIOCTLCMD)
4424 if (mutex_lock_killable(&vcpu->mutex))
4432 oldpid = rcu_access_pointer(vcpu->pid);
4433 if (unlikely(oldpid != task_pid(current))) {
4441 newpid = get_task_pid(current, PIDTYPE_PID);
4442 rcu_assign_pointer(vcpu->pid, newpid);
4448 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4451 case KVM_GET_REGS: {
4452 struct kvm_regs *kvm_regs;
4455 kvm_regs = kzalloc(
sizeof(
struct kvm_regs), GFP_KERNEL_ACCOUNT);
4462 if (copy_to_user(argp, kvm_regs,
sizeof(
struct kvm_regs)))
4469 case KVM_SET_REGS: {
4470 struct kvm_regs *kvm_regs;
4472 kvm_regs = memdup_user(argp,
sizeof(*kvm_regs));
4473 if (IS_ERR(kvm_regs)) {
4474 r = PTR_ERR(kvm_regs);
4481 case KVM_GET_SREGS: {
4482 kvm_sregs = kzalloc(
sizeof(
struct kvm_sregs),
4483 GFP_KERNEL_ACCOUNT);
4491 if (copy_to_user(argp, kvm_sregs,
sizeof(
struct kvm_sregs)))
4496 case KVM_SET_SREGS: {
4497 kvm_sregs = memdup_user(argp,
sizeof(*kvm_sregs));
4498 if (IS_ERR(kvm_sregs)) {
4499 r = PTR_ERR(kvm_sregs);
4506 case KVM_GET_MP_STATE: {
4507 struct kvm_mp_state mp_state;
4513 if (copy_to_user(argp, &mp_state,
sizeof(mp_state)))
4518 case KVM_SET_MP_STATE: {
4519 struct kvm_mp_state mp_state;
4522 if (copy_from_user(&mp_state, argp,
sizeof(mp_state)))
4527 case KVM_TRANSLATE: {
4528 struct kvm_translation tr;
4531 if (copy_from_user(&tr, argp,
sizeof(tr)))
4537 if (copy_to_user(argp, &tr,
sizeof(tr)))
4542 case KVM_SET_GUEST_DEBUG: {
4543 struct kvm_guest_debug dbg;
4546 if (copy_from_user(&dbg, argp,
sizeof(dbg)))
4551 case KVM_SET_SIGNAL_MASK: {
4552 struct kvm_signal_mask __user *sigmask_arg = argp;
4553 struct kvm_signal_mask kvm_sigmask;
4554 sigset_t sigset, *p;
4559 if (copy_from_user(&kvm_sigmask, argp,
4560 sizeof(kvm_sigmask)))
4563 if (kvm_sigmask.len !=
sizeof(sigset))
4566 if (copy_from_user(&sigset, sigmask_arg->sigset,
4575 fpu = kzalloc(
sizeof(
struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4583 if (copy_to_user(argp, fpu,
sizeof(
struct kvm_fpu)))
4589 fpu = memdup_user(argp,
sizeof(*fpu));
4598 case KVM_GET_STATS_FD: {
4606 mutex_unlock(&vcpu->mutex);
4612 #ifdef CONFIG_KVM_COMPAT
4613 static long kvm_vcpu_compat_ioctl(
struct file *filp,
4614 unsigned int ioctl,
unsigned long arg)
4616 struct kvm_vcpu *vcpu = filp->private_data;
4617 void __user *argp = compat_ptr(arg);
4620 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4624 case KVM_SET_SIGNAL_MASK: {
4625 struct kvm_signal_mask __user *sigmask_arg = argp;
4626 struct kvm_signal_mask kvm_sigmask;
4631 if (copy_from_user(&kvm_sigmask, argp,
4632 sizeof(kvm_sigmask)))
4635 if (kvm_sigmask.len !=
sizeof(compat_sigset_t))
4638 if (get_compat_sigset(&sigset,
4639 (compat_sigset_t __user *)sigmask_arg->sigset))
4657 struct kvm_device *dev = filp->private_data;
4660 return dev->ops->mmap(dev, vma);
4666 int (*accessor)(
struct kvm_device *dev,
4667 struct kvm_device_attr *attr),
4670 struct kvm_device_attr attr;
4675 if (copy_from_user(&attr, (
void __user *)arg,
sizeof(attr)))
4678 return accessor(dev, &attr);
4684 struct kvm_device *dev = filp->private_data;
4686 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4690 case KVM_SET_DEVICE_ATTR:
4692 case KVM_GET_DEVICE_ATTR:
4694 case KVM_HAS_DEVICE_ATTR:
4697 if (dev->ops->ioctl)
4698 return dev->ops->ioctl(dev, ioctl, arg);
4706 struct kvm_device *dev = filp->private_data;
4707 struct kvm *kvm = dev->kvm;
4709 if (dev->ops->release) {
4710 mutex_lock(&kvm->lock);
4711 list_del(&dev->vm_node);
4712 dev->ops->release(dev);
4713 mutex_unlock(&kvm->lock);
4732 return filp->private_data;
4736 #ifdef CONFIG_KVM_MPIC
4737 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4738 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4761 struct kvm_create_device *cd)
4763 const struct kvm_device_ops *ops;
4764 struct kvm_device *dev;
4765 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4780 dev = kzalloc(
sizeof(*dev), GFP_KERNEL_ACCOUNT);
4787 mutex_lock(&kvm->lock);
4788 ret = ops->create(dev, type);
4790 mutex_unlock(&kvm->lock);
4794 list_add(&dev->vm_node, &kvm->devices);
4795 mutex_unlock(&kvm->lock);
4801 ret = anon_inode_getfd(ops->name, &
kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4804 mutex_lock(&kvm->lock);
4805 list_del(&dev->vm_node);
4808 mutex_unlock(&kvm->lock);
4821 case KVM_CAP_USER_MEMORY:
4822 case KVM_CAP_USER_MEMORY2:
4823 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4824 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4825 case KVM_CAP_INTERNAL_ERROR_DATA:
4826 #ifdef CONFIG_HAVE_KVM_MSI
4827 case KVM_CAP_SIGNAL_MSI:
4829 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4832 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4833 case KVM_CAP_CHECK_EXTENSION_VM:
4834 case KVM_CAP_ENABLE_CAP_VM:
4835 case KVM_CAP_HALT_POLL:
4837 #ifdef CONFIG_KVM_MMIO
4838 case KVM_CAP_COALESCED_MMIO:
4839 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4840 case KVM_CAP_COALESCED_PIO:
4843 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4844 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4845 return KVM_DIRTY_LOG_MANUAL_CAPS;
4847 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4848 case KVM_CAP_IRQ_ROUTING:
4849 return KVM_MAX_IRQ_ROUTES;
4851 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4852 case KVM_CAP_MULTI_ADDRESS_SPACE:
4854 return kvm_arch_nr_memslot_as_ids(kvm);
4855 return KVM_MAX_NR_ADDRESS_SPACES;
4857 case KVM_CAP_NR_MEMSLOTS:
4858 return KVM_USER_MEM_SLOTS;
4859 case KVM_CAP_DIRTY_LOG_RING:
4860 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4861 return KVM_DIRTY_RING_MAX_ENTRIES *
sizeof(
struct kvm_dirty_gfn);
4865 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4866 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4867 return KVM_DIRTY_RING_MAX_ENTRIES *
sizeof(
struct kvm_dirty_gfn);
4871 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4872 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4874 case KVM_CAP_BINARY_STATS_FD:
4875 case KVM_CAP_SYSTEM_EVENT_DATA:
4876 case KVM_CAP_DEVICE_CTRL:
4878 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4879 case KVM_CAP_MEMORY_ATTRIBUTES:
4880 return kvm_supported_mem_attributes(kvm);
4882 #ifdef CONFIG_KVM_PRIVATE_MEM
4883 case KVM_CAP_GUEST_MEMFD:
4884 return !kvm || kvm_arch_has_private_mem(kvm);
4896 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4905 sizeof(
struct kvm_dirty_gfn) ||
size < PAGE_SIZE)
4908 if (
size > KVM_DIRTY_RING_MAX_ENTRIES *
4909 sizeof(
struct kvm_dirty_gfn))
4913 if (kvm->dirty_ring_size)
4916 mutex_lock(&kvm->lock);
4918 if (kvm->created_vcpus) {
4922 kvm->dirty_ring_size =
size;
4926 mutex_unlock(&kvm->lock);
4933 struct kvm_vcpu *vcpu;
4936 if (!kvm->dirty_ring_size)
4939 mutex_lock(&kvm->slots_lock);
4941 kvm_for_each_vcpu(i, vcpu, kvm)
4944 mutex_unlock(&kvm->slots_lock);
4953 struct kvm_enable_cap *cap)
4962 lockdep_assert_held(&kvm->slots_lock);
4964 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
4965 if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
4974 struct kvm_enable_cap *cap)
4977 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4978 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4979 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4981 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4982 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4984 if (cap->flags || (cap->args[0] & ~allowed_options))
4986 kvm->manual_dirty_log_protect = cap->args[0];
4990 case KVM_CAP_HALT_POLL: {
4991 if (cap->flags || cap->args[0] != (
unsigned int)cap->args[0])
4994 kvm->max_halt_poll_ns = cap->args[0];
5003 kvm->override_halt_poll_ns =
true;
5007 case KVM_CAP_DIRTY_LOG_RING:
5008 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5013 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5016 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5017 !kvm->dirty_ring_size || cap->flags)
5020 mutex_lock(&kvm->slots_lock);
5028 kvm->dirty_ring_with_bitmap =
true;
5032 mutex_unlock(&kvm->slots_lock);
5042 size_t size, loff_t *offset)
5044 struct kvm *kvm = file->private_data;
5048 sizeof(kvm->stat), user_buffer,
size, offset);
5053 struct kvm *kvm = file->private_data;
5060 .owner = THIS_MODULE,
5063 .llseek = noop_llseek,
5071 fd = get_unused_fd_flags(O_CLOEXEC);
5075 file = anon_inode_getfile(
"kvm-vm-stats",
5079 return PTR_ERR(file);
5084 file->f_mode |= FMODE_PREAD;
5085 fd_install(fd, file);
5090 #define SANITY_CHECK_MEM_REGION_FIELD(field) \
5092 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
5093 offsetof(struct kvm_userspace_memory_region2, field)); \
5094 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
5095 sizeof_field(struct kvm_userspace_memory_region2, field)); \
5099 unsigned int ioctl,
unsigned long arg)
5101 struct kvm *kvm = filp->private_data;
5102 void __user *argp = (
void __user *)arg;
5105 if (kvm->mm != current->mm || kvm->vm_dead)
5108 case KVM_CREATE_VCPU:
5111 case KVM_ENABLE_CAP: {
5112 struct kvm_enable_cap cap;
5115 if (copy_from_user(&cap, argp,
sizeof(cap)))
5120 case KVM_SET_USER_MEMORY_REGION2:
5121 case KVM_SET_USER_MEMORY_REGION: {
5122 struct kvm_userspace_memory_region2 mem;
5125 if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5130 memset(&mem, 0,
sizeof(mem));
5131 size =
sizeof(
struct kvm_userspace_memory_region);
5133 size =
sizeof(
struct kvm_userspace_memory_region2);
5144 if (copy_from_user(&mem, argp,
size))
5148 if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5155 case KVM_GET_DIRTY_LOG: {
5156 struct kvm_dirty_log log;
5159 if (copy_from_user(&log, argp,
sizeof(log)))
5161 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5164 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5165 case KVM_CLEAR_DIRTY_LOG: {
5166 struct kvm_clear_dirty_log log;
5169 if (copy_from_user(&log, argp,
sizeof(log)))
5171 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5175 #ifdef CONFIG_KVM_MMIO
5176 case KVM_REGISTER_COALESCED_MMIO: {
5177 struct kvm_coalesced_mmio_zone zone;
5180 if (copy_from_user(&zone, argp,
sizeof(zone)))
5185 case KVM_UNREGISTER_COALESCED_MMIO: {
5186 struct kvm_coalesced_mmio_zone zone;
5189 if (copy_from_user(&zone, argp,
sizeof(zone)))
5196 struct kvm_irqfd data;
5199 if (copy_from_user(&data, argp,
sizeof(data)))
5201 r = kvm_irqfd(kvm, &data);
5204 case KVM_IOEVENTFD: {
5208 if (copy_from_user(&data, argp,
sizeof(data)))
5213 #ifdef CONFIG_HAVE_KVM_MSI
5214 case KVM_SIGNAL_MSI: {
5218 if (copy_from_user(&msi, argp,
sizeof(msi)))
5224 #ifdef __KVM_HAVE_IRQ_LINE
5225 case KVM_IRQ_LINE_STATUS:
5226 case KVM_IRQ_LINE: {
5227 struct kvm_irq_level irq_event;
5230 if (copy_from_user(&irq_event, argp,
sizeof(irq_event)))
5234 ioctl == KVM_IRQ_LINE_STATUS);
5239 if (ioctl == KVM_IRQ_LINE_STATUS) {
5240 if (copy_to_user(argp, &irq_event,
sizeof(irq_event)))
5248 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5249 case KVM_SET_GSI_ROUTING: {
5250 struct kvm_irq_routing routing;
5251 struct kvm_irq_routing __user *urouting;
5252 struct kvm_irq_routing_entry *entries = NULL;
5255 if (copy_from_user(&routing, argp,
sizeof(routing)))
5260 if (routing.nr > KVM_MAX_IRQ_ROUTES)
5266 entries = vmemdup_array_user(urouting->entries,
5267 routing.nr,
sizeof(*entries));
5268 if (IS_ERR(entries)) {
5269 r = PTR_ERR(entries);
5279 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5280 case KVM_SET_MEMORY_ATTRIBUTES: {
5281 struct kvm_memory_attributes attrs;
5284 if (copy_from_user(&attrs, argp,
sizeof(attrs)))
5287 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5291 case KVM_CREATE_DEVICE: {
5292 struct kvm_create_device cd;
5295 if (copy_from_user(&cd, argp,
sizeof(cd)))
5303 if (copy_to_user(argp, &cd,
sizeof(cd)))
5309 case KVM_CHECK_EXTENSION:
5312 case KVM_RESET_DIRTY_RINGS:
5315 case KVM_GET_STATS_FD:
5318 #ifdef CONFIG_KVM_PRIVATE_MEM
5319 case KVM_CREATE_GUEST_MEMFD: {
5320 struct kvm_create_guest_memfd guest_memfd;
5323 if (copy_from_user(&guest_memfd, argp,
sizeof(guest_memfd)))
5337 #ifdef CONFIG_KVM_COMPAT
5338 struct compat_kvm_dirty_log {
5342 compat_uptr_t dirty_bitmap;
5347 struct compat_kvm_clear_dirty_log {
5352 compat_uptr_t dirty_bitmap;
5357 long __weak kvm_arch_vm_compat_ioctl(
struct file *filp,
unsigned int ioctl,
5363 static long kvm_vm_compat_ioctl(
struct file *filp,
5364 unsigned int ioctl,
unsigned long arg)
5366 struct kvm *kvm = filp->private_data;
5369 if (kvm->mm != current->mm || kvm->vm_dead)
5372 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5377 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5378 case KVM_CLEAR_DIRTY_LOG: {
5379 struct compat_kvm_clear_dirty_log compat_log;
5380 struct kvm_clear_dirty_log log;
5382 if (copy_from_user(&compat_log, (
void __user *)arg,
5383 sizeof(compat_log)))
5385 log.slot = compat_log.slot;
5386 log.num_pages = compat_log.num_pages;
5387 log.first_page = compat_log.first_page;
5388 log.padding2 = compat_log.padding2;
5389 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5391 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5395 case KVM_GET_DIRTY_LOG: {
5396 struct compat_kvm_dirty_log compat_log;
5397 struct kvm_dirty_log log;
5399 if (copy_from_user(&compat_log, (
void __user *)arg,
5400 sizeof(compat_log)))
5402 log.slot = compat_log.slot;
5403 log.padding1 = compat_log.padding1;
5404 log.padding2 = compat_log.padding2;
5405 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5407 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5420 .llseek = noop_llseek,
5437 fd = get_unused_fd_flags(O_CLOEXEC);
5441 snprintf(fdname,
sizeof(fdname),
"%d", fd);
5449 file = anon_inode_getfile(
"kvm-vm", &
kvm_vm_fops, kvm, O_RDWR);
5463 fd_install(fd, file);
5474 unsigned int ioctl,
unsigned long arg)
5479 case KVM_GET_API_VERSION:
5482 r = KVM_API_VERSION;
5487 case KVM_CHECK_EXTENSION:
5490 case KVM_GET_VCPU_MMAP_SIZE:
5497 #ifdef CONFIG_KVM_MMIO
5510 .llseek = noop_llseek,
5514 static struct miscdevice
kvm_dev = {
5520 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5521 __visible
bool kvm_rebooting;
5525 static int kvm_usage_count;
5527 static int __hardware_enable_nolock(
void)
5529 if (__this_cpu_read(hardware_enabled))
5533 pr_info(
"kvm: enabling virtualization on CPU%d failed\n",
5534 raw_smp_processor_id());
5538 __this_cpu_write(hardware_enabled,
true);
5542 static void hardware_enable_nolock(
void *failed)
5544 if (__hardware_enable_nolock())
5548 static int kvm_online_cpu(
unsigned int cpu)
5557 mutex_lock(&kvm_lock);
5558 if (kvm_usage_count)
5559 ret = __hardware_enable_nolock();
5560 mutex_unlock(&kvm_lock);
5564 static void hardware_disable_nolock(
void *junk)
5570 if (!__this_cpu_read(hardware_enabled))
5575 __this_cpu_write(hardware_enabled,
false);
5578 static int kvm_offline_cpu(
unsigned int cpu)
5580 mutex_lock(&kvm_lock);
5581 if (kvm_usage_count)
5582 hardware_disable_nolock(NULL);
5583 mutex_unlock(&kvm_lock);
5587 static void hardware_disable_all_nolock(
void)
5589 BUG_ON(!kvm_usage_count);
5592 if (!kvm_usage_count)
5593 on_each_cpu(hardware_disable_nolock, NULL, 1);
5599 mutex_lock(&kvm_lock);
5600 hardware_disable_all_nolock();
5601 mutex_unlock(&kvm_lock);
5607 atomic_t failed = ATOMIC_INIT(0);
5619 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5620 system_state == SYSTEM_RESTART)
5632 mutex_lock(&kvm_lock);
5637 if (kvm_usage_count == 1) {
5638 on_each_cpu(hardware_enable_nolock, &failed, 1);
5640 if (atomic_read(&failed)) {
5641 hardware_disable_all_nolock();
5646 mutex_unlock(&kvm_lock);
5652 static void kvm_shutdown(
void)
5665 pr_info(
"kvm: exiting hardware virtualization\n");
5666 kvm_rebooting =
true;
5667 on_each_cpu(hardware_disable_nolock, NULL, 1);
5670 static int kvm_suspend(
void)
5680 lockdep_assert_not_held(&kvm_lock);
5681 lockdep_assert_irqs_disabled();
5683 if (kvm_usage_count)
5684 hardware_disable_nolock(NULL);
5688 static void kvm_resume(
void)
5690 lockdep_assert_not_held(&kvm_lock);
5691 lockdep_assert_irqs_disabled();
5693 if (kvm_usage_count)
5694 WARN_ON_ONCE(__hardware_enable_nolock());
5697 static struct syscore_ops kvm_syscore_ops = {
5698 .suspend = kvm_suspend,
5699 .resume = kvm_resume,
5700 .shutdown = kvm_shutdown,
5724 for (i = 0; i < bus->dev_count; i++) {
5733 const struct kvm_io_range *r2)
5735 gpa_t addr1 = r1->addr;
5736 gpa_t addr2 = r2->addr;
5763 gpa_t addr,
int len)
5765 struct kvm_io_range *range, key;
5768 key = (
struct kvm_io_range) {
5773 range = bsearch(&key, bus->range, bus->dev_count,
5778 off = range - bus->range;
5787 struct kvm_io_range *range,
const void *val)
5795 while (idx < bus->dev_count &&
5808 int len,
const void *val)
5810 struct kvm_io_bus *bus;
5811 struct kvm_io_range range;
5814 range = (
struct kvm_io_range) {
5819 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5823 return r < 0 ? r : 0;
5829 gpa_t addr,
int len,
const void *val,
long cookie)
5831 struct kvm_io_bus *bus;
5832 struct kvm_io_range range;
5834 range = (
struct kvm_io_range) {
5839 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5844 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5858 struct kvm_io_range *range,
void *val)
5866 while (idx < bus->dev_count &&
5881 struct kvm_io_bus *bus;
5882 struct kvm_io_range range;
5885 range = (
struct kvm_io_range) {
5890 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5894 return r < 0 ? r : 0;
5901 struct kvm_io_bus *new_bus, *bus;
5902 struct kvm_io_range range;
5904 lockdep_assert_held(&kvm->slots_lock);
5906 bus = kvm_get_bus(kvm, bus_idx);
5911 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5914 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5915 GFP_KERNEL_ACCOUNT);
5919 range = (
struct kvm_io_range) {
5925 for (i = 0; i < bus->dev_count; i++)
5929 memcpy(new_bus, bus,
sizeof(*bus) + i *
sizeof(
struct kvm_io_range));
5930 new_bus->dev_count++;
5931 new_bus->range[i] = range;
5932 memcpy(new_bus->range + i + 1, bus->range + i,
5933 (bus->dev_count - i) *
sizeof(
struct kvm_io_range));
5934 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5935 synchronize_srcu_expedited(&kvm->srcu);
5945 struct kvm_io_bus *new_bus, *bus;
5947 lockdep_assert_held(&kvm->slots_lock);
5949 bus = kvm_get_bus(kvm, bus_idx);
5953 for (i = 0; i < bus->dev_count; i++) {
5954 if (bus->range[i].dev == dev) {
5959 if (i == bus->dev_count)
5962 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5963 GFP_KERNEL_ACCOUNT);
5965 memcpy(new_bus, bus, struct_size(bus, range, i));
5966 new_bus->dev_count--;
5967 memcpy(new_bus->range + i, bus->range + i + 1,
5968 flex_array_size(new_bus, range, new_bus->dev_count - i));
5971 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5972 synchronize_srcu_expedited(&kvm->srcu);
5979 pr_err(
"kvm: failed to shrink bus, removing it completely\n");
5992 struct kvm_io_bus *bus;
5993 int dev_idx, srcu_idx;
5996 srcu_idx = srcu_read_lock(&kvm->srcu);
5998 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
6006 iodev = bus->range[dev_idx].dev;
6009 srcu_read_unlock(&kvm->srcu, srcu_idx);
6016 int (*get)(
void *, u64 *),
int (*set)(
void *, u64),
6020 struct kvm_stat_data *stat_data = inode->i_private;
6030 ret = simple_attr_open(inode, file, get,
6041 struct kvm_stat_data *stat_data = inode->i_private;
6043 simple_attr_release(inode, file);
6051 *val = *(u64 *)((
void *)(&kvm->stat) + offset);
6058 *(u64 *)((
void *)(&kvm->stat) + offset) = 0;
6066 struct kvm_vcpu *vcpu;
6070 kvm_for_each_vcpu(i, vcpu, kvm)
6071 *val += *(u64 *)((
void *)(&vcpu->stat) + offset);
6079 struct kvm_vcpu *vcpu;
6081 kvm_for_each_vcpu(i, vcpu, kvm)
6082 *(u64 *)((
void *)(&vcpu->stat) + offset) = 0;
6090 struct kvm_stat_data *stat_data = data;
6092 switch (stat_data->kind) {
6095 stat_data->desc->desc.offset, val);
6099 stat_data->desc->desc.offset, val);
6109 struct kvm_stat_data *stat_data = data;
6114 switch (stat_data->kind) {
6117 stat_data->desc->desc.offset);
6121 stat_data->desc->desc.offset);
6130 __simple_attr_check_format(
"%llu\n", 0ull);
6136 .owner = THIS_MODULE,
6139 .read = simple_attr_read,
6140 .write = simple_attr_write,
6141 .llseek = no_llseek,
6146 unsigned offset = (long)_offset;
6151 mutex_lock(&kvm_lock);
6152 list_for_each_entry(kvm, &vm_list, vm_list) {
6156 mutex_unlock(&kvm_lock);
6162 unsigned offset = (long)_offset;
6168 mutex_lock(&kvm_lock);
6169 list_for_each_entry(kvm, &vm_list, vm_list) {
6172 mutex_unlock(&kvm_lock);
6182 unsigned offset = (long)_offset;
6187 mutex_lock(&kvm_lock);
6188 list_for_each_entry(kvm, &vm_list, vm_list) {
6192 mutex_unlock(&kvm_lock);
6198 unsigned offset = (long)_offset;
6204 mutex_lock(&kvm_lock);
6205 list_for_each_entry(kvm, &vm_list, vm_list) {
6208 mutex_unlock(&kvm_lock);
6219 struct kobj_uevent_env *env;
6220 unsigned long long created, active;
6222 if (!
kvm_dev.this_device || !kvm)
6225 mutex_lock(&kvm_lock);
6234 mutex_unlock(&kvm_lock);
6236 env = kzalloc(
sizeof(*env), GFP_KERNEL_ACCOUNT);
6240 add_uevent_var(env,
"CREATED=%llu", created);
6241 add_uevent_var(env,
"COUNT=%llu", active);
6244 add_uevent_var(env,
"EVENT=create");
6245 kvm->userspace_pid = task_pid_nr(current);
6247 add_uevent_var(env,
"EVENT=destroy");
6249 add_uevent_var(env,
"PID=%d", kvm->userspace_pid);
6251 if (!IS_ERR(kvm->debugfs_dentry)) {
6252 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
6255 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6257 add_uevent_var(env,
"STATS_PATH=%s", tmp);
6262 env->envp[env->envp_idx++] = NULL;
6263 kobject_uevent_env(&
kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6269 const struct file_operations *fops;
6270 const struct _kvm_stats_desc *pdesc;
6278 fops = &vm_stat_fops;
6280 fops = &vm_stat_readonly_fops;
6283 (
void *)(
long)pdesc->desc.offset, fops);
6289 fops = &vcpu_stat_fops;
6291 fops = &vcpu_stat_readonly_fops;
6294 (
void *)(
long)pdesc->desc.offset, fops);
6301 return container_of(pn,
struct kvm_vcpu, preempt_notifier);
6308 WRITE_ONCE(vcpu->preempted,
false);
6309 WRITE_ONCE(vcpu->ready,
false);
6311 __this_cpu_write(kvm_running_vcpu, vcpu);
6312 kvm_arch_sched_in(vcpu, cpu);
6317 struct task_struct *next)
6321 if (current->on_rq) {
6322 WRITE_ONCE(vcpu->preempted,
true);
6323 WRITE_ONCE(vcpu->ready,
true);
6326 __this_cpu_write(kvm_running_vcpu, NULL);
6340 struct kvm_vcpu *vcpu;
6343 vcpu = __this_cpu_read(kvm_running_vcpu);
6355 return &kvm_running_vcpu;
6358 #ifdef CONFIG_GUEST_PERF_EVENTS
6359 static unsigned int kvm_guest_state(
void)
6364 if (!kvm_arch_pmi_in_guest(vcpu))
6367 state = PERF_GUEST_ACTIVE;
6369 state |= PERF_GUEST_USER;
6374 static unsigned long kvm_guest_get_ip(
void)
6379 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6382 return kvm_arch_vcpu_get_ip(vcpu);
6385 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6386 .state = kvm_guest_state,
6387 .get_ip = kvm_guest_get_ip,
6388 .handle_intel_pt_intr = NULL,
6391 void kvm_register_perf_callbacks(
unsigned int (*pt_intr_handler)(
void))
6393 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6394 perf_register_guest_info_callbacks(&kvm_guest_cbs);
6396 void kvm_unregister_perf_callbacks(
void)
6398 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6402 int kvm_init(
unsigned vcpu_size,
unsigned vcpu_align,
struct module *module)
6407 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6408 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE,
"kvm/cpu:online",
6409 kvm_online_cpu, kvm_offline_cpu);
6413 register_syscore_ops(&kvm_syscore_ops);
6418 vcpu_align = __alignof__(
struct kvm_vcpu);
6420 kmem_cache_create_usercopy(
"kvm_vcpu", vcpu_size, vcpu_align,
6422 offsetof(
struct kvm_vcpu, arch),
6423 offsetofend(
struct kvm_vcpu, stats_id)
6424 - offsetof(
struct kvm_vcpu, arch),
6428 goto err_vcpu_cache;
6431 for_each_possible_cpu(cpu) {
6432 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6433 GFP_KERNEL, cpu_to_node(cpu))) {
6435 goto err_cpu_kick_mask;
6439 r = kvm_irqfd_init();
6458 if (WARN_ON_ONCE(r))
6469 pr_err(
"kvm: misc device register failed\n");
6483 for_each_possible_cpu(cpu)
6484 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6487 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6488 unregister_syscore_ops(&kvm_syscore_ops);
6489 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6507 for_each_possible_cpu(cpu)
6508 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6512 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6513 unregister_syscore_ops(&kvm_syscore_ops);
6514 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6536 struct task_struct *parent;
6537 struct kvm *kvm = init_context->
kvm;
6538 kvm_vm_thread_fn_t thread_fn = init_context->
thread_fn;
6539 uintptr_t data = init_context->
data;
6542 err = kthread_park(current);
6548 err = cgroup_attach_task_all(init_context->
parent, current);
6550 kvm_err(
"%s: cgroup_attach_task_all failed with err %d\n",
6555 set_user_nice(current, task_nice(init_context->
parent));
6558 init_context->
err = err;
6560 init_context = NULL;
6568 if (!kthread_should_stop())
6569 err = thread_fn(kvm, data);
6584 parent = rcu_dereference(current->real_parent);
6585 get_task_struct(parent);
6587 cgroup_attach_task_all(parent, current);
6588 put_task_struct(parent);
6594 uintptr_t data,
const char *name,
6595 struct task_struct **thread_ptr)
6598 struct task_struct *thread;
6601 init_context.
kvm = kvm;
6602 init_context.
parent = current;
6604 init_context.
data = data;
6605 init_completion(&init_context.
init_done);
6608 "%s-%d", name, task_pid_nr(current));
6610 return PTR_ERR(thread);
6613 WARN_ON(thread == NULL);
6615 wait_for_completion(&init_context.
init_done);
6617 if (!init_context.
err)
6618 *thread_ptr = thread;
6620 return init_context.
err;
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
struct kvm * kvm_arch_alloc_vm(void)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
int kvm_arch_hardware_enable(void)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
void kvm_arch_hardware_disable(void)
void kvm_arch_destroy_vm(struct kvm *kvm)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_async_pf_deinit(void)
int kvm_async_pf_init(void)
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, const struct _kvm_stats_desc *desc, void *stats, size_t size_stats, char __user *user_buffer, size_t size, loff_t *offset)
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone)
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone)
void kvm_coalesced_mmio_free(struct kvm *kvm)
int kvm_coalesced_mmio_init(struct kvm *kvm)
int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
bool kvm_use_dirty_bitmap(struct kvm *kvm)
u32 kvm_dirty_ring_get_rsvd_entries(void)
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
struct page * kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
void kvm_eventfd_init(struct kvm *kvm)
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
const struct kvm_stats_header kvm_vm_stats_header
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
const struct _kvm_stats_desc kvm_vcpu_stats_desc[]
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
const struct kvm_stats_header kvm_vcpu_stats_header
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
const struct _kvm_stats_desc kvm_vm_stats_desc[]
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
void kvm_gmem_init(struct module *module)
void kvm_gmem_unbind(struct kvm_memory_slot *slot)
int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned int fd, loff_t offset)
static int kvm_iodevice_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int l, void *v)
static int kvm_iodevice_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int l, const void *v)
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *ue, unsigned nr, unsigned flags)
void kvm_free_irq_routing(struct kvm *kvm)
bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
void kvm_put_kvm(struct kvm *kvm)
static int next_segment(unsigned long len, int offset)
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
static void kvm_set_page_accessed(struct page *page)
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
static void kvm_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change)
static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
bool kvm_are_all_memslots_empty(struct kvm *kvm)
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev)
static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
#define SANITY_CHECK_MEM_REGION_FIELD(field)
void kvm_destroy_vcpus(struct kvm *kvm)
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev)
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
static int kvm_vm_stats_release(struct inode *inode, struct file *file)
static void kvm_replace_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new)
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len)
static int hva_to_pfn_remapped(struct vm_area_struct *vma, unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *p_pfn)
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu)
void vcpu_put(struct kvm_vcpu *vcpu)
struct kvm_vcpu *__percpu * kvm_get_running_vcpus(void)
void kvm_release_pfn_dirty(kvm_pfn_t pfn)
static void kvm_update_flags_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new)
static struct kvm_memslots * kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
int kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region2 *mem)
static struct miscdevice kvm_dev
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap)
static void hardware_disable_all(void)
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len)
#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS
static int kvm_try_get_pfn(kvm_pfn_t pfn)
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
unsigned int halt_poll_ns
struct dentry * kvm_debugfs_dir
#define KVM_EVENT_CREATE_VM
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len)
static const struct file_operations stat_fops_per_vm
static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
static int kvm_vm_worker_thread(void *context)
struct kvm_device * kvm_device_from_filp(struct file *filp)
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val)
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
static unsigned long long kvm_active_vms
static int kvm_debugfs_open(struct inode *inode, struct file *file, int(*get)(void *, u64 *), int(*set)(void *, u64), const char *fmt)
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val, long cookie)
static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
static struct kmem_cache * kvm_vcpu_cache
static int kvm_device_release(struct inode *inode, struct file *filp)
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, struct kvm_io_range *range, const void *val)
static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len)
static int vcpu_stat_clear(void *_offset, u64 val)
static const struct kvm_device_ops * kvm_device_ops_table[KVM_DEV_TYPE_MAX]
static int check_user_page_hwpoison(unsigned long addr)
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region2 *mem)
static int vm_stat_get(void *_offset, u64 *val)
struct page * gfn_to_page(struct kvm *kvm, gfn_t gfn)
bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, bool interruptible, bool *writable, kvm_pfn_t *pfn)
static void kvm_activate_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new)
void kvm_flush_remote_tlbs(struct kvm *kvm)
static struct kvm * kvm_create_vm(unsigned long type, const char *fdname)
static bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
static void kvm_insert_gfn_node(struct kvm_memslots *slots, struct kvm_memory_slot *slot)
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable)
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_device_ioctl_attr(struct kvm_device *dev, int(*accessor)(struct kvm_device *dev, struct kvm_device_attr *attr), unsigned long arg)
void vcpu_load(struct kvm_vcpu *vcpu)
static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next)
static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, size_t size, loff_t *offset)
static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva)
void kvm_release_page_clean(struct page *page)
static const struct vm_operations_struct kvm_vcpu_vm_ops
static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
bool kvm_is_zone_device_page(struct page *page)
static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
static int kvm_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change)
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
static int __kvm_write_guest_page(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len)
unsigned int halt_poll_ns_grow_start
static struct file_operations kvm_device_fops
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, struct kvm_io_range *range, void *val)
static int kvm_io_bus_cmp(const struct kvm_io_range *r1, const struct kvm_io_range *r2)
static void ack_kick(void *_completed)
static const struct file_operations kvm_vm_stats_fops
static int hardware_enable_all(void)
static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n")
static void kvm_destroy_vm_debugfs(struct kvm *kvm)
static const struct file_operations kvm_vcpu_stats_fops
struct kvm_memory_slot * gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
static unsigned long long kvm_createvm_count
static void kvm_destroy_pm_notifier(struct kvm *kvm)
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr)
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
module_param(halt_poll_ns, uint, 0644)
void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, size_t size, loff_t *offset)
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
void kvm_get_kvm(struct kvm *kvm)
int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
static int kvm_stat_data_open(struct inode *inode, struct file *file)
unsigned int halt_poll_ns_grow
MODULE_AUTHOR("Qumranet")
static void kvm_create_memslot(struct kvm *kvm, struct kvm_memory_slot *new)
bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, gfn_t start, gfn_t end)
static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, struct cpumask *tmp, int current_cpu)
static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
static int kvm_no_compat_open(struct inode *inode, struct file *file)
static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
#define KVM_EVENT_DESTROY_VM
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, gpa_t addr, int len)
static int kvm_dev_ioctl_create_vm(unsigned long type)
static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
static int vm_stat_clear(void *_offset, u64 val)
struct page * kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
void kvm_unregister_device_ops(u32 type)
static struct kvm_vcpu * preempt_notifier_to_vcpu(struct preempt_notifier *pn)
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *pfn)
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot)
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
static void kvm_replace_gfn_node(struct kvm_memslots *slots, struct kvm_memory_slot *old, struct kvm_memory_slot *new)
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
static void kvm_iodevice_destructor(struct kvm_io_device *dev)
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
static void kvm_copy_memslot(struct kvm_memory_slot *dest, const struct kvm_memory_slot *src)
static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, struct kvm_memory_slot *b)
static void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success)
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
static void kvm_invalidate_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *invalid_slot)
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, struct kvm_enable_cap *cap)
static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
bool file_is_kvm(struct file *file)
unsigned int halt_poll_ns_shrink
int __weak kvm_arch_post_init_vm(struct kvm *kvm)
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, unsigned long len)
static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write)
static struct file_operations kvm_vcpu_fops
static void kvm_flush_shadow_all(struct kvm *kvm)
static int kvm_init_mmu_notifier(struct kvm *kvm)
static int check_memory_region_flags(struct kvm *kvm, const struct kvm_userspace_memory_region2 *mem)
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages)
bool kvm_get_kvm_safe(struct kvm *kvm)
static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
static void kvm_destroy_devices(struct kvm *kvm)
static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val)
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except)
static int vcpu_stat_get(void *_offset, u64 *val)
struct kvm_memory_slot * kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
static void kvm_delete_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *invalid_slot)
struct kvm_vcpu * kvm_get_running_vcpu(void)
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len)
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region2 *mem)
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
static int kvm_set_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty, struct kvm_memory_slot **memslot)
EXPORT_SYMBOL_GPL(halt_poll_ns)
static int kvm_ioctl_create_device(struct kvm *kvm, struct kvm_create_device *cd)
void kvm_put_kvm_no_destroy(struct kvm *kvm)
struct kvm_io_device * kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr)
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, int len)
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned int offset, unsigned long len)
static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
void kvm_release_page_dirty(struct page *page)
static int kvm_stat_data_get(void *data, u64 *val)
static void kvm_init_debug(void)
static int kvm_stat_data_clear(void *data, u64 val)
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
static __read_mostly struct preempt_ops kvm_preempt_ops
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
static void kvm_erase_gfn_node(struct kvm_memslots *slots, struct kvm_memory_slot *slot)
static void kvm_set_page_dirty(struct page *page)
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable)
static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable)
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len)
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages)
static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
static struct file_operations kvm_chardev_ops
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned int offset, unsigned long len)
int __attribute__((weak))
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
static bool kvm_is_ad_tracked_page(struct page *page)
static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
static int kvm_debugfs_release(struct inode *inode, struct file *file)
static int kvm_vm_release(struct inode *inode, struct file *filp)
static struct file_operations kvm_vm_fops
void kvm_release_pfn_clean(kvm_pfn_t pfn)
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
static void kvm_init_pm_notifier(struct kvm *kvm)
static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len)
static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
static void kvm_destroy_vm(struct kvm *kvm)
static void kvm_move_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new, struct kvm_memory_slot *invalid_slot)
static long kvm_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
#define KVM_MMU_LOCK(kvm)
#define KVM_MMU_LOCK_INIT(kvm)
#define KVM_MMU_UNLOCK(kvm)
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change)
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change)
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, unsigned long end, bool may_block)
void(* destructor)(struct kvm_io_device *this)
const struct kvm_io_device_ops * ops
struct completion init_done
struct task_struct * parent
kvm_vm_thread_fn_t thread_fn
void kvm_vfio_ops_exit(void)
int kvm_vfio_ops_init(void)
bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)