21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
24 #include <linux/kvm_host.h>
27 #ifdef CONFIG_KVM_HYPERV
30 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
38 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
39 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
40 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
46 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
49 #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
50 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
51 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
52 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
53 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
54 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
57 #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
59 static inline struct kvm_hv *to_kvm_hv(
struct kvm *kvm)
61 return &kvm->arch.hyperv;
64 static inline struct kvm_vcpu_hv *to_hv_vcpu(
struct kvm_vcpu *vcpu)
66 return vcpu->arch.hyperv;
69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(
struct kvm_vcpu *vcpu)
71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
73 return &hv_vcpu->synic;
76 static inline struct kvm_vcpu *hv_synic_to_vcpu(
struct kvm_vcpu_hv_synic *synic)
78 struct kvm_vcpu_hv *hv_vcpu = container_of(synic,
struct kvm_vcpu_hv, synic);
83 static inline struct kvm_hv_syndbg *to_hv_syndbg(
struct kvm_vcpu *vcpu)
85 return &vcpu->kvm->arch.hyperv.hv_syndbg;
90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
100 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
112 return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap);
117 return to_hv_vcpu(vcpu) &&
118 test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap);
126 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(
struct kvm_vcpu *vcpu,
129 return &to_hv_vcpu(vcpu)->stimer[timer_index];
132 static inline struct kvm_vcpu *hv_stimer_to_vcpu(
struct kvm_vcpu_hv_stimer *stimer)
134 struct kvm_vcpu_hv *hv_vcpu;
136 hv_vcpu = container_of(stimer - stimer->index,
struct kvm_vcpu_hv,
138 return hv_vcpu->vcpu;
143 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
148 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
149 HV_SYNIC_STIMER_COUNT);
158 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
168 !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT))
176 return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC);
182 struct pvclock_vcpu_time_info *hv_clock);
194 struct kvm_cpuid_entry2 __user *entries);
196 static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(
struct kvm_vcpu *vcpu,
199 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
201 HV_L1_TLB_FLUSH_FIFO;
203 return &hv_vcpu->tlb_flush_fifo[i];
208 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
210 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
213 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu,
is_guest_mode(vcpu));
215 kfifo_reset_out(&tlb_flush_fifo->entries);
220 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
223 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH);
228 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
237 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
238 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
239 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
240 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX);
245 if (!to_hv_vcpu(vcpu))
264 kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
270 struct pvclock_vcpu_time_info *hv_clock) {}
286 return HV_STATUS_ACCESS_DENIED;
322 return vcpu->vcpu_idx;
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries)
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
void kvm_hv_irq_routing_update(struct kvm *kvm)
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
static u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
static void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
static bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
static bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
static void kvm_hv_init_vm(struct kvm *kvm)
static int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
static bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
static void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
static bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
static void kvm_hv_free_pa_page(struct kvm *kvm)
static bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
static void kvm_hv_setup_tsc_page(struct kvm *kvm, struct pvclock_vcpu_time_info *hv_clock)
static void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
static void kvm_hv_destroy_vm(struct kvm *kvm)
static bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
static bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
static void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled)
static void kvm_hv_request_tsc_page_update(struct kvm *kvm)
static int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
static void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
static void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)