7 #include <linux/kvm_host.h>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_dirty_ring.h>
11 #include <trace/events/kvm.h>
26 lockdep_assert_held(&kvm->slots_lock);
28 return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
31 #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
40 return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
55 struct kvm_memory_slot *memslot;
61 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) ||
id >= KVM_USER_MEM_SLOTS)
64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id),
id);
66 if (!memslot || (offset + __fls(mask)) >= memslot->npages)
76 ring->dirty_gfns = vzalloc(
size);
77 if (!ring->dirty_gfns)
80 ring->size =
size /
sizeof(
struct kvm_dirty_gfn);
82 ring->dirty_index = 0;
83 ring->reset_index = 0;
91 smp_store_release(&gfn->flags, 0);
96 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
101 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
106 u32 cur_slot, next_slot;
107 u64 cur_offset, next_offset;
110 struct kvm_dirty_gfn *entry;
111 bool first_round =
true;
114 cur_slot = cur_offset = mask = 0;
117 entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
122 next_slot = READ_ONCE(entry->slot);
123 next_offset = READ_ONCE(entry->offset);
134 if (!first_round && next_slot == cur_slot) {
135 s64 delta = next_offset - cur_offset;
137 if (delta >= 0 && delta < BITS_PER_LONG) {
138 mask |= 1ull << delta;
143 if (delta > -BITS_PER_LONG && delta < 0 &&
144 (mask << -delta >> -delta) == mask) {
145 cur_offset = next_offset;
146 mask = (mask << -delta) | 1;
151 cur_slot = next_slot;
152 cur_offset = next_offset;
164 trace_kvm_dirty_ring_reset(ring);
171 struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
172 struct kvm_dirty_gfn *entry;
177 entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
180 entry->offset = offset;
188 trace_kvm_dirty_ring_push(ring, slot, offset);
191 kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
202 if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
204 kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
205 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
206 trace_kvm_dirty_ring_exit(vcpu);
215 return vmalloc_to_page((
void *)ring->dirty_gfns + offset * PAGE_SIZE);
220 vfree(ring->dirty_gfns);
221 ring->dirty_gfns = NULL;
static void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
bool kvm_use_dirty_bitmap(struct kvm *kvm)
u32 kvm_dirty_ring_get_rsvd_entries(void)
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
int __weak kvm_cpu_dirty_log_size(void)
static bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
struct page * kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
static void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
#define KVM_MMU_LOCK(kvm)
#define KVM_MMU_UNLOCK(kvm)
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)