14 #include <linux/kvm_host.h>
15 #include <linux/kvm.h>
16 #include <linux/highmem.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
26 unsigned long end,
bool may_block)
28 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
29 struct gfn_to_pfn_cache *gpc;
30 bool evict_vcpus =
false;
32 spin_lock(&kvm->gpc_lock);
33 list_for_each_entry(gpc, &kvm->gpc_list, list) {
34 write_lock_irq(&gpc->lock);
37 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
38 gpc->uhva >= start && gpc->uhva <
end) {
45 if (gpc->usage & KVM_GUEST_USES_PFN) {
48 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
50 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
53 write_unlock_irq(&gpc->lock);
55 spin_unlock(&kvm->gpc_lock);
62 unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
71 req &= ~KVM_REQUEST_WAIT;
75 WARN_ON_ONCE(called && !may_block);
81 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
86 if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
89 if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
102 if (!is_error_noslot_pfn(pfn) && khva) {
104 kunmap(pfn_to_page(pfn));
105 #ifdef CONFIG_HAS_IOMEM
126 if (kvm->mn_active_invalidate_count)
137 return kvm->mmu_invalidate_seq != mmu_seq;
143 void *old_khva = gpc->khva - offset_in_page(gpc->khva);
144 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
145 void *new_khva = NULL;
146 unsigned long mmu_seq;
148 lockdep_assert_held(&gpc->refresh_lock);
150 lockdep_assert_held_write(&gpc->lock);
160 mmu_seq = gpc->kvm->mmu_invalidate_seq;
163 write_unlock_irq(&gpc->lock);
172 if (new_pfn != KVM_PFN_ERR_FAULT) {
177 if (new_khva != old_khva)
186 new_pfn =
hva_to_pfn(gpc->uhva,
false,
false, NULL,
true, NULL);
187 if (is_error_noslot_pfn(new_pfn))
195 if (gpc->usage & KVM_HOST_USES_PFN) {
196 if (new_pfn == gpc->pfn) {
198 }
else if (pfn_valid(new_pfn)) {
199 new_khva = kmap(pfn_to_page(new_pfn));
200 #ifdef CONFIG_HAS_IOMEM
202 new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
211 write_lock_irq(&gpc->lock);
217 WARN_ON_ONCE(gpc->valid);
222 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
234 write_lock_irq(&gpc->lock);
242 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
243 unsigned long page_offset = gpa & ~PAGE_MASK;
244 bool unmap_old =
false;
245 unsigned long old_uhva;
254 if (page_offset + len > PAGE_SIZE)
262 mutex_lock(&gpc->refresh_lock);
264 write_lock_irq(&gpc->lock);
272 old_khva = gpc->khva - offset_in_page(gpc->khva);
273 old_uhva = gpc->uhva;
276 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
277 kvm_is_error_hva(gpc->uhva)) {
278 gfn_t gfn = gpa_to_gfn(gpa);
281 gpc->generation = slots->generation;
282 gpc->memslot = __gfn_to_memslot(slots, gfn);
285 if (kvm_is_error_hva(gpc->uhva)) {
295 if (!gpc->valid || old_uhva != gpc->uhva) {
303 gpc->khva = old_khva + page_offset;
316 gpc->pfn = KVM_PFN_ERR_FAULT;
321 unmap_old = (old_pfn != gpc->pfn);
324 write_unlock_irq(&gpc->lock);
326 mutex_unlock(&gpc->refresh_lock);
341 struct kvm_vcpu *vcpu,
enum pfn_cache_usage usage)
343 WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
344 WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
346 rwlock_init(&gpc->lock);
347 mutex_init(&gpc->refresh_lock);
352 gpc->pfn = KVM_PFN_ERR_FAULT;
353 gpc->uhva = KVM_HVA_ERR_BAD;
359 struct kvm *kvm = gpc->kvm;
362 if (KVM_BUG_ON(gpc->valid, kvm))
365 spin_lock(&kvm->gpc_lock);
366 list_add(&gpc->list, &kvm->gpc_list);
367 spin_unlock(&kvm->gpc_lock);
374 write_lock_irq(&gpc->lock);
376 write_unlock_irq(&gpc->lock);
384 struct kvm *kvm = gpc->kvm;
394 write_lock_irq(&gpc->lock);
404 old_khva = gpc->khva - offset_in_page(gpc->khva);
408 gpc->pfn = KVM_PFN_ERR_FAULT;
409 write_unlock_irq(&gpc->lock);
411 spin_lock(&kvm->gpc_lock);
412 list_del(&gpc->list);
413 spin_unlock(&kvm->gpc_lock);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap)
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable)
void kvm_release_pfn_clean(kvm_pfn_t pfn)
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
static void gpc_unmap_khva(kvm_pfn_t pfn, void *khva)
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, unsigned long end, bool may_block)
EXPORT_SYMBOL_GPL(kvm_gpc_check)
static bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)