8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/eventfd.h>
16 #include <linux/kvm_host.h>
17 #include <linux/sched/stat.h>
19 #include <trace/events/kvm.h>
20 #include <xen/interface/xen.h>
21 #include <xen/interface/vcpu.h>
22 #include <xen/interface/version.h>
23 #include <xen/interface/event_channel.h>
24 #include <xen/interface/sched.h>
26 #include <asm/xen/cpuid.h>
39 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
40 struct pvclock_wall_clock *wc;
41 gpa_t gpa = gfn_to_gpa(gfn);
46 int idx = srcu_read_lock(&kvm->srcu);
48 if (gfn == KVM_XEN_INVALID_GFN) {
65 read_lock_irq(&gpc->lock);
70 read_unlock_irq(&gpc->lock);
76 BUILD_BUG_ON(offsetof(
struct pvclock_vcpu_time_info, version) != 0);
80 BUILD_BUG_ON(offsetof(
struct shared_info, wc) != 0xc00);
81 BUILD_BUG_ON(offsetof(
struct shared_info, wc_sec_hi) != 0xc0c);
83 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
84 struct shared_info *shinfo = gpc->khva;
86 wc_sec_hi = &shinfo->wc_sec_hi;
98 wc_version =
wc->version = (
wc->version + 1) | 1;
101 wc->nsec = do_div(wall_nsec, NSEC_PER_SEC);
102 wc->sec = (u32)wall_nsec;
103 *wc_sec_hi = wall_nsec >> 32;
106 wc->version = wc_version + 1;
107 read_unlock_irq(&gpc->lock);
112 srcu_read_unlock(&kvm->srcu, idx);
118 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
119 struct kvm_xen_evtchn e;
121 e.vcpu_id = vcpu->vcpu_id;
122 e.vcpu_idx = vcpu->vcpu_idx;
123 e.port = vcpu->arch.xen.timer_virq;
124 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
128 vcpu->arch.xen.timer_expires = 0;
129 atomic_set(&vcpu->arch.xen.timer_pending, 0);
135 struct kvm_vcpu *vcpu = container_of(timer,
struct kvm_vcpu,
137 struct kvm_xen_evtchn e;
140 if (atomic_read(&vcpu->arch.xen.timer_pending))
141 return HRTIMER_NORESTART;
143 e.vcpu_id = vcpu->vcpu_id;
144 e.vcpu_idx = vcpu->vcpu_idx;
145 e.port = vcpu->arch.xen.timer_virq;
146 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
149 if (rc != -EWOULDBLOCK) {
150 vcpu->arch.xen.timer_expires = 0;
151 return HRTIMER_NORESTART;
154 atomic_inc(&vcpu->arch.xen.timer_pending);
155 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
158 return HRTIMER_NORESTART;
168 if (vcpu->arch.xen.timer_expires)
169 hrtimer_cancel(&vcpu->arch.xen.timer);
171 atomic_set(&vcpu->arch.xen.timer_pending, 0);
172 vcpu->arch.xen.timer_expires = guest_abs;
177 ktime_t ktime_now = ktime_get();
178 hrtimer_start(&vcpu->arch.xen.timer,
179 ktime_add_ns(ktime_now, delta_ns),
180 HRTIMER_MODE_ABS_HARD);
186 hrtimer_cancel(&vcpu->arch.xen.timer);
187 vcpu->arch.xen.timer_expires = 0;
188 atomic_set(&vcpu->arch.xen.timer_pending, 0);
193 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
194 HRTIMER_MODE_ABS_HARD);
200 struct kvm_vcpu_xen *vx = &v->arch.xen;
201 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache;
202 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
203 size_t user_len, user_len1, user_len2;
204 struct vcpu_runstate_info rs;
207 uint8_t *update_bit = NULL;
220 BUILD_BUG_ON(offsetof(
struct vcpu_runstate_info,
state) != 0);
230 BUILD_BUG_ON(offsetof(
struct vcpu_runstate_info,
time) !=
232 BUILD_BUG_ON(
sizeof(
struct vcpu_runstate_info) != 0x2c + 4);
238 BUILD_BUG_ON(offsetof(
struct vcpu_runstate_info,
state) !=
240 BUILD_BUG_ON(sizeof_field(
struct vcpu_runstate_info,
state) !=
241 sizeof(vx->current_runstate));
243 sizeof(vx->current_runstate));
255 BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80);
262 offsetof(
struct vcpu_runstate_info,
time) -
sizeof(uint64_t));
265 BUILD_BUG_ON(sizeof_field(
struct vcpu_runstate_info,
time) !=
267 BUILD_BUG_ON(sizeof_field(
struct vcpu_runstate_info,
time) !=
268 sizeof(vx->runstate_times));
270 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
271 user_len =
sizeof(
struct vcpu_runstate_info);
272 times_ofs = offsetof(
struct vcpu_runstate_info,
286 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
287 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
288 user_len2 = user_len - user_len1;
290 user_len1 = user_len;
293 BUG_ON(user_len1 + user_len2 != user_len);
301 local_irq_save(
flags);
302 if (!read_trylock(&gpc1->lock)) {
303 local_irq_restore(
flags);
307 read_lock_irqsave(&gpc1->lock,
flags);
310 read_unlock_irqrestore(&gpc1->lock,
flags);
319 read_lock_irqsave(&gpc1->lock,
flags);
322 if (likely(!user_len2)) {
332 rs_state = gpc1->khva;
333 rs_times = gpc1->khva + times_ofs;
334 if (v->kvm->arch.xen.runstate_update_flag)
335 update_bit = ((
void *)(&rs_times[1])) - 1;
344 lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
346 if (!read_trylock(&gpc2->lock)) {
347 read_unlock_irqrestore(&gpc1->lock,
flags);
351 read_lock(&gpc2->lock);
355 read_unlock(&gpc2->lock);
356 read_unlock_irqrestore(&gpc1->lock,
flags);
385 rs_times = &rs.state_entry_time;
392 rs_state = ((
void *)rs_times) - times_ofs;
398 if (v->kvm->arch.xen.runstate_update_flag) {
399 if (user_len1 >= times_ofs +
sizeof(uint64_t))
400 update_bit = gpc1->khva + times_ofs +
401 sizeof(uint64_t) - 1;
403 update_bit = gpc2->khva + times_ofs +
404 sizeof(uint64_t) - 1 - user_len1;
425 entry_time = vx->runstate_entry_time;
427 entry_time |= XEN_RUNSTATE_UPDATE;
428 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56;
437 *rs_state = vx->current_runstate;
438 rs_times[0] = entry_time;
439 memcpy(rs_times + 1, vx->runstate_times,
sizeof(vx->runstate_times));
443 memcpy(gpc1->khva, rs_state, user_len1);
444 memcpy(gpc2->khva, ((
void *)rs_state) + user_len1, user_len2);
450 entry_time &= ~XEN_RUNSTATE_UPDATE;
451 *update_bit = entry_time >> 56;
456 read_unlock(&gpc2->lock);
458 read_unlock_irqrestore(&gpc1->lock,
flags);
467 struct kvm_vcpu_xen *vx = &v->arch.xen;
469 u64 delta_ns = now - vx->runstate_entry_time;
470 u64 run_delay = current->sched_info.run_delay;
472 if (unlikely(!vx->runstate_entry_time))
473 vx->current_runstate = RUNSTATE_offline;
479 if (vx->current_runstate == RUNSTATE_running) {
480 u64 steal_ns = run_delay - vx->last_steal;
482 delta_ns -= steal_ns;
484 vx->runstate_times[RUNSTATE_runnable] += steal_ns;
486 vx->last_steal = run_delay;
488 vx->runstate_times[vx->current_runstate] += delta_ns;
489 vx->current_runstate =
state;
490 vx->runstate_entry_time = now;
492 if (vx->runstate_cache.active)
498 struct kvm_lapic_irq irq = { };
501 irq.dest_id = v->vcpu_id;
502 irq.vector = v->arch.xen.upcall_vector;
503 irq.dest_mode = APIC_DEST_PHYSICAL;
505 irq.delivery_mode = APIC_DM_FIXED;
521 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
522 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
525 if (!evtchn_pending_sel)
533 read_lock_irqsave(&gpc->lock,
flags);
535 read_unlock_irqrestore(&gpc->lock,
flags);
540 read_lock_irqsave(&gpc->lock,
flags);
544 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
545 struct vcpu_info *vi = gpc->khva;
547 asm volatile(LOCK_PREFIX
"orq %0, %1\n"
549 LOCK_PREFIX
"andq %0, %2\n"
550 :
"=r" (evtchn_pending_sel),
551 "+m" (vi->evtchn_pending_sel),
552 "+m" (v->arch.xen.evtchn_pending_sel)
553 :
"0" (evtchn_pending_sel));
554 WRITE_ONCE(vi->evtchn_upcall_pending, 1);
556 u32 evtchn_pending_sel32 = evtchn_pending_sel;
559 asm volatile(LOCK_PREFIX
"orl %0, %1\n"
561 LOCK_PREFIX
"andl %0, %2\n"
562 :
"=r" (evtchn_pending_sel32),
564 "+m" (v->arch.xen.evtchn_pending_sel)
565 :
"0" (evtchn_pending_sel32));
568 read_unlock_irqrestore(&gpc->lock,
flags);
571 if (v->arch.xen.upcall_vector)
579 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
589 BUILD_BUG_ON(offsetof(
struct vcpu_info, evtchn_upcall_pending) !=
591 BUILD_BUG_ON(
sizeof(rc) !=
592 sizeof_field(
struct vcpu_info, evtchn_upcall_pending));
593 BUILD_BUG_ON(
sizeof(rc) !=
596 read_lock_irqsave(&gpc->lock,
flags);
598 read_unlock_irqrestore(&gpc->lock,
flags);
608 if (in_atomic() || !task_is_running(current))
618 read_lock_irqsave(&gpc->lock,
flags);
621 rc = ((
struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
622 read_unlock_irqrestore(&gpc->lock,
flags);
631 switch (data->type) {
632 case KVM_XEN_ATTR_TYPE_LONG_MODE:
633 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
636 mutex_lock(&kvm->arch.xen.xen_lock);
637 kvm->arch.xen.long_mode = !!data->u.long_mode;
638 mutex_unlock(&kvm->arch.xen.xen_lock);
643 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
644 mutex_lock(&kvm->arch.xen.xen_lock);
646 mutex_unlock(&kvm->arch.xen.xen_lock);
649 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
650 if (data->u.vector && data->u.vector < 0x10)
653 mutex_lock(&kvm->arch.xen.xen_lock);
654 kvm->arch.xen.upcall_vector = data->u.vector;
655 mutex_unlock(&kvm->arch.xen.xen_lock);
660 case KVM_XEN_ATTR_TYPE_EVTCHN:
664 case KVM_XEN_ATTR_TYPE_XEN_VERSION:
665 mutex_lock(&kvm->arch.xen.xen_lock);
666 kvm->arch.xen.xen_version = data->u.xen_version;
667 mutex_unlock(&kvm->arch.xen.xen_lock);
671 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
672 if (!sched_info_on()) {
676 mutex_lock(&kvm->arch.xen.xen_lock);
677 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
678 mutex_unlock(&kvm->arch.xen.xen_lock);
693 mutex_lock(&kvm->arch.xen.xen_lock);
695 switch (data->type) {
696 case KVM_XEN_ATTR_TYPE_LONG_MODE:
697 data->u.long_mode = kvm->arch.xen.long_mode;
701 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
702 if (kvm->arch.xen.shinfo_cache.active)
703 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
705 data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
709 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
710 data->u.vector = kvm->arch.xen.upcall_vector;
714 case KVM_XEN_ATTR_TYPE_XEN_VERSION:
715 data->u.xen_version = kvm->arch.xen.xen_version;
719 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
720 if (!sched_info_on()) {
724 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag;
732 mutex_unlock(&kvm->arch.xen.xen_lock);
738 int idx, r = -ENOENT;
740 mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
741 idx = srcu_read_lock(&vcpu->kvm->srcu);
743 switch (data->type) {
744 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
746 BUILD_BUG_ON(
sizeof(
struct vcpu_info) !=
748 BUILD_BUG_ON(offsetof(
struct vcpu_info,
time) !=
751 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
758 data->u.gpa,
sizeof(
struct vcpu_info));
760 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
764 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
765 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
773 sizeof(
struct pvclock_vcpu_time_info));
775 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
778 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: {
781 if (!sched_info_on()) {
785 if (data->u.gpa == KVM_XEN_INVALID_GPA) {
798 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode)
799 sz =
sizeof(
struct vcpu_runstate_info);
804 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
815 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
817 data->u.gpa + sz1, sz2);
825 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
826 if (!sched_info_on()) {
830 if (data->u.runstate.state > RUNSTATE_offline) {
839 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
840 if (!sched_info_on()) {
844 if (data->u.runstate.state > RUNSTATE_offline) {
848 if (data->u.runstate.state_entry_time !=
849 (data->u.runstate.time_running +
850 data->u.runstate.time_runnable +
851 data->u.runstate.time_blocked +
852 data->u.runstate.time_offline)) {
857 data->u.runstate.state_entry_time) {
862 vcpu->arch.xen.current_runstate = data->u.runstate.state;
863 vcpu->arch.xen.runstate_entry_time =
864 data->u.runstate.state_entry_time;
865 vcpu->arch.xen.runstate_times[RUNSTATE_running] =
866 data->u.runstate.time_running;
867 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
868 data->u.runstate.time_runnable;
869 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
870 data->u.runstate.time_blocked;
871 vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
872 data->u.runstate.time_offline;
873 vcpu->arch.xen.last_steal = current->sched_info.run_delay;
877 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
878 if (!sched_info_on()) {
882 if (data->u.runstate.state > RUNSTATE_offline &&
883 data->u.runstate.state != (u64)-1) {
888 if (data->u.runstate.state_entry_time !=
889 (data->u.runstate.time_running +
890 data->u.runstate.time_runnable +
891 data->u.runstate.time_blocked +
892 data->u.runstate.time_offline)) {
898 (vcpu->arch.xen.runstate_entry_time +
899 data->u.runstate.state_entry_time)) {
904 vcpu->arch.xen.runstate_entry_time +=
905 data->u.runstate.state_entry_time;
906 vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
907 data->u.runstate.time_running;
908 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
909 data->u.runstate.time_runnable;
910 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
911 data->u.runstate.time_blocked;
912 vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
913 data->u.runstate.time_offline;
915 if (data->u.runstate.state <= RUNSTATE_offline)
917 else if (vcpu->arch.xen.runstate_cache.active)
922 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
923 if (data->u.vcpu_id >= KVM_MAX_VCPUS)
926 vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
931 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
932 if (data->u.timer.port &&
933 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
938 if (!vcpu->arch.xen.timer.function)
943 vcpu->arch.xen.timer_virq = data->u.timer.port;
946 if (data->u.timer.port && data->u.timer.expires_ns)
948 data->u.timer.expires_ns -
954 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
955 if (data->u.vector && data->u.vector < 0x10)
958 vcpu->arch.xen.upcall_vector = data->u.vector;
967 srcu_read_unlock(&vcpu->kvm->srcu, idx);
968 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
976 mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
978 switch (data->type) {
979 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
980 if (vcpu->arch.xen.vcpu_info_cache.active)
981 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
983 data->u.gpa = KVM_XEN_INVALID_GPA;
987 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
988 if (vcpu->arch.xen.vcpu_time_info_cache.active)
989 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
991 data->u.gpa = KVM_XEN_INVALID_GPA;
995 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
996 if (!sched_info_on()) {
1000 if (vcpu->arch.xen.runstate_cache.active) {
1001 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
1006 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
1007 if (!sched_info_on()) {
1011 data->u.runstate.state = vcpu->arch.xen.current_runstate;
1015 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
1016 if (!sched_info_on()) {
1020 data->u.runstate.state = vcpu->arch.xen.current_runstate;
1021 data->u.runstate.state_entry_time =
1022 vcpu->arch.xen.runstate_entry_time;
1023 data->u.runstate.time_running =
1024 vcpu->arch.xen.runstate_times[RUNSTATE_running];
1025 data->u.runstate.time_runnable =
1026 vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
1027 data->u.runstate.time_blocked =
1028 vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
1029 data->u.runstate.time_offline =
1030 vcpu->arch.xen.runstate_times[RUNSTATE_offline];
1034 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
1038 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
1039 data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
1043 case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
1053 if (vcpu->arch.xen.timer_expires) {
1054 hrtimer_cancel(&vcpu->arch.xen.timer);
1058 data->u.timer.port = vcpu->arch.xen.timer_virq;
1059 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
1060 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
1070 if (vcpu->arch.xen.timer_expires)
1071 hrtimer_start_expires(&vcpu->arch.xen.timer,
1072 HRTIMER_MODE_ABS_HARD);
1077 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
1078 data->u.vector = vcpu->arch.xen.upcall_vector;
1086 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
1092 struct kvm *kvm = vcpu->kvm;
1093 u32 page_num = data & ~PAGE_MASK;
1094 u64 page_addr = data & PAGE_MASK;
1098 vcpu->kvm->arch.xen.long_mode = lm;
1107 u8 instructions[32];
1114 instructions[0] = 0xb8;
1117 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
1120 instructions[8] = 0xc3;
1123 memset(instructions + 9, 0xcc,
sizeof(instructions) - 9);
1125 for (i = 0; i < PAGE_SIZE /
sizeof(instructions); i++) {
1126 *(u32 *)&instructions[1] = i;
1128 page_addr + (i *
sizeof(instructions)),
1129 instructions,
sizeof(instructions)))
1137 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
1138 : kvm->arch.xen_hvm_config.blob_addr_32;
1139 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1140 : kvm->arch.xen_hvm_config.blob_size_32;
1144 if (page_num >= blob_size)
1147 blob_addr += page_num * PAGE_SIZE;
1149 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
1151 return PTR_ERR(page);
1164 u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
1165 KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
1166 KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
1169 if (xhc->flags & ~permitted_flags)
1176 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
1177 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
1178 xhc->blob_size_32 || xhc->blob_size_64))
1181 mutex_lock(&kvm->arch.xen.xen_lock);
1183 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
1184 static_branch_inc(&kvm_xen_enabled.key);
1185 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
1186 static_branch_slow_dec_deferred(&kvm_xen_enabled);
1188 old_flags = kvm->arch.xen_hvm_config.flags;
1189 memcpy(&kvm->arch.xen_hvm_config, xhc,
sizeof(*xhc));
1191 mutex_unlock(&kvm->arch.xen.xen_lock);
1193 if ((old_flags ^ xhc->flags) & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
1201 kvm_rax_write(vcpu, result);
1207 struct kvm_run *run = vcpu->run;
1217 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
1218 return EVTCHN_2L_NR_CHANNELS;
1224 evtchn_port_t *ports)
1226 struct kvm *kvm = vcpu->kvm;
1227 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1228 unsigned long *pending_bits;
1229 unsigned long flags;
1233 idx = srcu_read_lock(&kvm->srcu);
1234 read_lock_irqsave(&gpc->lock,
flags);
1239 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1240 struct shared_info *shinfo = gpc->khva;
1241 pending_bits = (
unsigned long *)&shinfo->evtchn_pending;
1247 for (i = 0; i < nr_ports; i++) {
1248 if (test_bit(ports[i], pending_bits)) {
1255 read_unlock_irqrestore(&gpc->lock,
flags);
1256 srcu_read_unlock(&kvm->srcu, idx);
1264 struct sched_poll sched_poll;
1265 evtchn_port_t port, *ports;
1270 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
1273 if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
1277 BUILD_BUG_ON(
sizeof(sp32) != 16);
1289 sched_poll.ports = (
void *)(
unsigned long)(sp32.
ports);
1290 sched_poll.nr_ports = sp32.
nr_ports;
1291 sched_poll.timeout = sp32.
timeout;
1294 sizeof(sched_poll), &e)) {
1300 if (unlikely(sched_poll.nr_ports > 1)) {
1302 if (sched_poll.nr_ports > 128) {
1307 ports = kmalloc_array(sched_poll.nr_ports,
1308 sizeof(*
ports), GFP_KERNEL);
1317 sched_poll.nr_ports *
sizeof(*
ports), &e)) {
1322 for (i = 0; i < sched_poll.nr_ports; i++) {
1329 if (sched_poll.nr_ports == 1)
1330 vcpu->arch.xen.poll_evtchn = port;
1332 vcpu->arch.xen.poll_evtchn = -1;
1334 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1337 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
1339 if (sched_poll.timeout)
1340 mod_timer(&vcpu->arch.xen.poll_timer,
1341 jiffies + nsecs_to_jiffies(sched_poll.timeout));
1345 if (sched_poll.timeout)
1346 del_timer(&vcpu->arch.xen.poll_timer);
1348 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1351 vcpu->arch.xen.poll_evtchn = 0;
1355 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1357 if (unlikely(sched_poll.nr_ports > 1))
1364 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer);
1366 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1371 int cmd, u64 param, u64 *r)
1395 int vcpu_id, u64 param, u64 *r)
1397 struct vcpu_set_singleshot_timer oneshot;
1405 case VCPUOP_set_singleshot_timer:
1406 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1423 offsetof(
struct vcpu_set_singleshot_timer,
flags));
1425 sizeof_field(
struct vcpu_set_singleshot_timer,
flags));
1439 case VCPUOP_stop_singleshot_timer:
1440 if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1460 int64_t delta = timeout - guest_now;
1471 if (unlikely((int64_t)timeout < 0 ||
1472 (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
1473 delta = 100 * NSEC_PER_MSEC;
1474 timeout = guest_now + delta;
1489 u64 input, params[6], r = -ENOSYS;
1490 bool handled =
false;
1496 if ((input & 0x80000000) &&
1502 params[0] = (u32)kvm_rbx_read(vcpu);
1503 params[1] = (u32)kvm_rcx_read(vcpu);
1504 params[2] = (u32)kvm_rdx_read(vcpu);
1505 params[3] = (u32)kvm_rsi_read(vcpu);
1506 params[4] = (u32)kvm_rdi_read(vcpu);
1507 params[5] = (u32)kvm_rbp_read(vcpu);
1509 #ifdef CONFIG_X86_64
1511 params[0] = (u64)kvm_rdi_read(vcpu);
1512 params[1] = (u64)kvm_rsi_read(vcpu);
1513 params[2] = (u64)kvm_rdx_read(vcpu);
1514 params[3] = (u64)kvm_r10_read(vcpu);
1515 params[4] = (u64)kvm_r8_read(vcpu);
1516 params[5] = (u64)kvm_r9_read(vcpu);
1519 cpl = static_call(kvm_x86_get_cpl)(vcpu);
1520 trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2],
1521 params[3], params[4], params[5]);
1527 if (unlikely(cpl > 0))
1528 goto handle_in_userspace;
1531 case __HYPERVISOR_xen_version:
1532 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
1533 r = vcpu->kvm->arch.xen.xen_version;
1537 case __HYPERVISOR_event_channel_op:
1538 if (params[0] == EVTCHNOP_send)
1541 case __HYPERVISOR_sched_op:
1545 case __HYPERVISOR_vcpu_op:
1549 case __HYPERVISOR_set_timer_op: {
1550 u64 timeout = params[0];
1553 timeout |= params[1] << 32;
1564 handle_in_userspace:
1565 vcpu->run->exit_reason = KVM_EXIT_XEN;
1566 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
1567 vcpu->run->xen.u.hcall.longmode = longmode;
1568 vcpu->run->xen.u.hcall.cpl = cpl;
1569 vcpu->run->xen.u.hcall.input = input;
1570 vcpu->run->xen.u.hcall.params[0] = params[0];
1571 vcpu->run->xen.u.hcall.params[1] = params[1];
1572 vcpu->run->xen.u.hcall.params[2] = params[2];
1573 vcpu->run->xen.u.hcall.params[3] = params[3];
1574 vcpu->run->xen.u.hcall.params[4] = params[4];
1575 vcpu->run->xen.u.hcall.params[5] = params[5];
1577 vcpu->arch.complete_userspace_io =
1585 int poll_evtchn = vcpu->arch.xen.poll_evtchn;
1587 if ((poll_evtchn == port || poll_evtchn == -1) &&
1588 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
1589 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1606 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1607 struct kvm_vcpu *vcpu;
1608 unsigned long *pending_bits, *mask_bits;
1609 unsigned long flags;
1611 bool kick_vcpu =
false;
1612 int vcpu_idx, idx, rc;
1614 vcpu_idx = READ_ONCE(xe->vcpu_idx);
1616 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1618 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
1621 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
1624 if (!vcpu->arch.xen.vcpu_info_cache.active)
1632 idx = srcu_read_lock(&kvm->srcu);
1634 read_lock_irqsave(&gpc->lock,
flags);
1638 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1639 struct shared_info *shinfo = gpc->khva;
1640 pending_bits = (
unsigned long *)&shinfo->evtchn_pending;
1641 mask_bits = (
unsigned long *)&shinfo->evtchn_mask;
1642 port_word_bit = xe->port / 64;
1646 mask_bits = (
unsigned long *)&shinfo->
evtchn_mask;
1647 port_word_bit = xe->port / 32;
1657 if (test_and_set_bit(xe->port, pending_bits)) {
1659 }
else if (test_bit(xe->port, mask_bits)) {
1665 read_unlock_irqrestore(&gpc->lock,
flags);
1666 gpc = &vcpu->arch.xen.vcpu_info_cache;
1668 read_lock_irqsave(&gpc->lock,
flags);
1674 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
1679 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1680 struct vcpu_info *vcpu_info = gpc->khva;
1681 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
1682 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1687 if (!test_and_set_bit(port_word_bit,
1695 if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
1702 read_unlock_irqrestore(&gpc->lock,
flags);
1703 srcu_read_unlock(&kvm->srcu, idx);
1706 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1715 bool mm_borrowed =
false;
1719 if (rc != -EWOULDBLOCK)
1722 if (current->mm != kvm->mm) {
1727 if (WARN_ON_ONCE(current->mm))
1730 kthread_use_mm(kvm->mm);
1734 mutex_lock(&kvm->arch.xen.xen_lock);
1751 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1755 if (rc != -EWOULDBLOCK)
1758 idx = srcu_read_lock(&kvm->srcu);
1760 srcu_read_unlock(&kvm->srcu, idx);
1763 mutex_unlock(&kvm->arch.xen.xen_lock);
1766 kthread_unuse_mm(kvm->mm);
1772 static int evtchn_set_fn(
struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm,
1773 int irq_source_id,
int level,
bool line_status)
1786 struct kvm_kernel_irq_routing_entry *e,
1787 const struct kvm_irq_routing_entry *ue)
1790 struct kvm_vcpu *vcpu;
1796 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1807 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1809 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx;
1811 e->xen_evtchn.vcpu_idx = -1;
1813 e->xen_evtchn.port = ue->u.xen_evtchn.port;
1814 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1815 e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1826 struct kvm_xen_evtchn e;
1833 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1837 e.vcpu_id = uxe->vcpu;
1839 e.priority = uxe->priority;
1847 if (ret > 0 || ret == -ENOTCONN)
1860 struct kvm_xen_evtchn
port;
1872 struct kvm_xen_hvm_attr *data)
1874 u32 port = data->u.evtchn.send_port;
1879 mutex_lock(&kvm->arch.xen.xen_lock);
1880 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports,
port);
1900 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1910 mutex_unlock(&kvm->arch.xen.xen_lock);
1919 struct kvm_xen_hvm_attr *data)
1921 u32
port = data->u.evtchn.send_port;
1922 struct eventfd_ctx *eventfd = NULL;
1930 switch(data->u.evtchn.type) {
1931 case EVTCHNSTAT_ipi:
1933 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1937 case EVTCHNSTAT_interdomain:
1938 if (data->u.evtchn.deliver.port.port) {
1942 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1950 case EVTCHNSTAT_virq:
1951 case EVTCHNSTAT_closed:
1952 case EVTCHNSTAT_unbound:
1953 case EVTCHNSTAT_pirq:
1964 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1973 mutex_lock(&kvm->arch.xen.xen_lock);
1976 mutex_unlock(&kvm->arch.xen.xen_lock);
1994 mutex_lock(&kvm->arch.xen.xen_lock);
1995 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports,
port);
1996 mutex_unlock(&kvm->arch.xen.xen_lock);
2001 synchronize_srcu(&kvm->srcu);
2014 mutex_lock(&kvm->arch.xen.xen_lock);
2021 idr_for_each_entry(&kvm->arch.xen.evtchn_ports,
evtchnfd, i)
2024 all_evtchnfds = kmalloc_array(n,
sizeof(
struct evtchnfd *), GFP_KERNEL);
2025 if (!all_evtchnfds) {
2026 mutex_unlock(&kvm->arch.xen.xen_lock);
2031 idr_for_each_entry(&kvm->arch.xen.evtchn_ports,
evtchnfd, i) {
2035 mutex_unlock(&kvm->arch.xen.xen_lock);
2037 synchronize_srcu(&kvm->srcu);
2045 kfree(all_evtchnfds);
2052 u32
port = data->u.evtchn.send_port;
2054 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
2060 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
2062 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
2064 if (data->u.evtchn.flags)
2073 struct evtchn_send send;
2077 BUILD_BUG_ON(
sizeof(send) != 4);
2088 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
2095 if (ret < 0 && ret != -ENOTCONN)
2107 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
2108 vcpu->arch.xen.poll_evtchn = 0;
2112 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
2114 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
2116 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
2118 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
2132 del_timer_sync(&vcpu->arch.xen.poll_timer);
2137 struct kvm_cpuid_entry2 *entry;
2140 if (!vcpu->arch.xen.cpuid.base)
2143 function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3);
2144 if (
function > vcpu->arch.xen.cpuid.limit)
2149 entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul;
2150 entry->edx = vcpu->arch.hv_clock.tsc_shift;
2155 entry->eax = vcpu->arch.hw_tsc_khz;
2160 mutex_init(&kvm->arch.xen.xen_lock);
2161 idr_init(&kvm->arch.xen.evtchn_ports);
2162 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
2172 idr_for_each_entry(&kvm->arch.xen.evtchn_ports,
evtchnfd, i) {
2177 idr_destroy(&kvm->arch.xen.evtchn_ports);
2179 if (kvm->arch.xen_hvm_config.msr)
2180 static_branch_slow_dec_deferred(&kvm_xen_enabled);
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
static bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
#define APIC_DEST_NOSHORT
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
uint32_t evtchn_pending[32]
struct compat_arch_shared_info arch
struct pvclock_wall_clock wc
struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]
uint32_t evtchn_pending_sel
uint8_t evtchn_upcall_pending
struct evtchnfd::@47::@48 eventfd
struct kvm_xen_evtchn port
union evtchnfd::@47 deliver
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
u64 get_kvmclock_ns(struct kvm *kvm)
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
static bool is_long_mode(struct kvm_vcpu *vcpu)
static bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
void kvm_xen_init_vm(struct kvm *kvm)
struct evtchnfd __attribute__
static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
void kvm_xen_destroy_vm(struct kvm *kvm)
static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
static int kvm_xen_eventfd_update(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, u64 param, u64 *r)
int kvm_xen_setup_evtchn(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue)
static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status)
static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, int vcpu_id, u64 param, u64 *r)
void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
static void cancel_evtchn_poll(struct timer_list *t)
static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ)
int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
static int max_evtchn_port(struct kvm *kvm)
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
static int kvm_xen_eventfd_reset(struct kvm *kvm)
static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, evtchn_port_t *ports)
void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout, u64 *r)
static int kvm_xen_eventfd_assign(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, u64 param, u64 *r)
static bool kvm_xen_hypercall_enabled(struct kvm *kvm)
static bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
uint64_t state_entry_time
#define COMPAT_EVTCHN_2L_NR_CHANNELS