5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kvm_host.h>
8 #include <asm/mshyperv.h>
23 return hyperv_fill_flush_guest_mapping_list(flush, range->
start_gfn,
31 return hyperv_flush_guest_mapping_range(root_tdp,
34 return hyperv_flush_guest_mapping(root_tdp);
40 struct kvm_arch *kvm_arch = &kvm->arch;
41 struct kvm_vcpu *vcpu;
42 int ret = 0, nr_unique_valid_roots;
46 spin_lock(&kvm_arch->hv_root_tdp_lock);
48 if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
49 nr_unique_valid_roots = 0;
56 kvm_for_each_vcpu(i, vcpu, kvm) {
57 root = vcpu->arch.hv_root_tdp;
58 if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
67 if (++nr_unique_valid_roots == 1)
68 kvm_arch->hv_root_tdp = root;
77 if (ret && nr_unique_valid_roots > 1)
85 if (nr_unique_valid_roots > 1)
86 kvm_arch->hv_root_tdp = INVALID_PAGE;
91 spin_unlock(&kvm_arch->hv_root_tdp_lock);
114 struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
117 spin_lock(&kvm_arch->hv_root_tdp_lock);
118 vcpu->arch.hv_root_tdp = root_tdp;
119 if (root_tdp != kvm_arch->hv_root_tdp)
120 kvm_arch->hv_root_tdp = INVALID_PAGE;
121 spin_unlock(&kvm_arch->hv_root_tdp_lock);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
int hv_flush_remote_tlbs(struct kvm *kvm)
static int hv_remote_flush_root_tdp(hpa_t root_tdp, struct kvm_hv_tlb_range *range)
EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range)
static int __hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_hv_tlb_range *range)
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, void *data)