KVM
Functions | Variables
async_pf.c File Reference
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include "async_pf.h"
#include <trace/events/kvm.h>
Include dependency graph for async_pf.c:

Go to the source code of this file.

Functions

int kvm_async_pf_init (void)
 
void kvm_async_pf_deinit (void)
 
void kvm_async_pf_vcpu_init (struct kvm_vcpu *vcpu)
 
static void async_pf_execute (struct work_struct *work)
 
static void kvm_flush_and_free_async_pf_work (struct kvm_async_pf *work)
 
void kvm_clear_async_pf_completion_queue (struct kvm_vcpu *vcpu)
 
void kvm_check_async_pf_completion (struct kvm_vcpu *vcpu)
 
bool kvm_setup_async_pf (struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, unsigned long hva, struct kvm_arch_async_pf *arch)
 
int kvm_async_pf_wakeup_all (struct kvm_vcpu *vcpu)
 

Variables

static struct kmem_cache * async_pf_cache
 

Function Documentation

◆ async_pf_execute()

static void async_pf_execute ( struct work_struct *  work)
static

Definition at line 45 of file async_pf.c.

46 {
47  struct kvm_async_pf *apf =
48  container_of(work, struct kvm_async_pf, work);
49  struct mm_struct *mm = apf->mm;
50  struct kvm_vcpu *vcpu = apf->vcpu;
51  unsigned long addr = apf->addr;
52  gpa_t cr2_or_gpa = apf->cr2_or_gpa;
53  int locked = 1;
54  bool first;
55 
56  might_sleep();
57 
58  /*
59  * This work is run asynchronously to the task which owns
60  * mm and might be done in another context, so we must
61  * access remotely.
62  */
63  mmap_read_lock(mm);
64  get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked);
65  if (locked)
66  mmap_read_unlock(mm);
67 
68  if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
69  kvm_arch_async_page_present(vcpu, apf);
70 
71  spin_lock(&vcpu->async_pf.lock);
72  first = list_empty(&vcpu->async_pf.done);
73  list_add_tail(&apf->link, &vcpu->async_pf.done);
74  apf->vcpu = NULL;
75  spin_unlock(&vcpu->async_pf.lock);
76 
77  if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
78  kvm_arch_async_page_present_queued(vcpu);
79 
80  /*
81  * apf may be freed by kvm_check_async_pf_completion() after
82  * this point
83  */
84 
85  trace_kvm_async_pf_completed(addr, cr2_or_gpa);
86 
87  __kvm_vcpu_wake_up(vcpu);
88 
89  mmput(mm);
90 }
Here is the caller graph for this function:

◆ kvm_async_pf_deinit()

void kvm_async_pf_deinit ( void  )

Definition at line 32 of file async_pf.c.

33 {
34  kmem_cache_destroy(async_pf_cache);
35  async_pf_cache = NULL;
36 }
static struct kmem_cache * async_pf_cache
Definition: async_pf.c:20
Here is the caller graph for this function:

◆ kvm_async_pf_init()

int kvm_async_pf_init ( void  )

Definition at line 22 of file async_pf.c.

23 {
24  async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
25 
26  if (!async_pf_cache)
27  return -ENOMEM;
28 
29  return 0;
30 }
Here is the caller graph for this function:

◆ kvm_async_pf_vcpu_init()

void kvm_async_pf_vcpu_init ( struct kvm_vcpu *  vcpu)

Definition at line 38 of file async_pf.c.

39 {
40  INIT_LIST_HEAD(&vcpu->async_pf.done);
41  INIT_LIST_HEAD(&vcpu->async_pf.queue);
42  spin_lock_init(&vcpu->async_pf.lock);
43 }
Here is the caller graph for this function:

◆ kvm_async_pf_wakeup_all()

int kvm_async_pf_wakeup_all ( struct kvm_vcpu *  vcpu)

Definition at line 223 of file async_pf.c.

224 {
225  struct kvm_async_pf *work;
226  bool first;
227 
228  if (!list_empty_careful(&vcpu->async_pf.done))
229  return 0;
230 
231  work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
232  if (!work)
233  return -ENOMEM;
234 
235  work->wakeup_all = true;
236  INIT_LIST_HEAD(&work->queue); /* for list_del to work */
237 
238  spin_lock(&vcpu->async_pf.lock);
239  first = list_empty(&vcpu->async_pf.done);
240  list_add_tail(&work->link, &vcpu->async_pf.done);
241  spin_unlock(&vcpu->async_pf.lock);
242 
243  if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
244  kvm_arch_async_page_present_queued(vcpu);
245 
246  vcpu->async_pf.queued++;
247  return 0;
248 }

◆ kvm_check_async_pf_completion()

void kvm_check_async_pf_completion ( struct kvm_vcpu *  vcpu)

Definition at line 158 of file async_pf.c.

159 {
160  struct kvm_async_pf *work;
161 
162  while (!list_empty_careful(&vcpu->async_pf.done) &&
163  kvm_arch_can_dequeue_async_page_present(vcpu)) {
164  spin_lock(&vcpu->async_pf.lock);
165  work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
166  link);
167  list_del(&work->link);
168  spin_unlock(&vcpu->async_pf.lock);
169 
170  kvm_arch_async_page_ready(vcpu, work);
171  if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
172  kvm_arch_async_page_present(vcpu, work);
173 
174  list_del(&work->queue);
175  vcpu->async_pf.queued--;
177  }
178 }
static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
Definition: async_pf.c:92
Here is the call graph for this function:

◆ kvm_clear_async_pf_completion_queue()

void kvm_clear_async_pf_completion_queue ( struct kvm_vcpu *  vcpu)

Definition at line 113 of file async_pf.c.

114 {
115  spin_lock(&vcpu->async_pf.lock);
116 
117  /* cancel outstanding work queue item */
118  while (!list_empty(&vcpu->async_pf.queue)) {
119  struct kvm_async_pf *work =
120  list_first_entry(&vcpu->async_pf.queue,
121  typeof(*work), queue);
122  list_del(&work->queue);
123 
124  /*
125  * We know it's present in vcpu->async_pf.done, do
126  * nothing here.
127  */
128  if (!work->vcpu)
129  continue;
130 
131  spin_unlock(&vcpu->async_pf.lock);
132 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
133  flush_work(&work->work);
134 #else
135  if (cancel_work_sync(&work->work)) {
136  mmput(work->mm);
137  kmem_cache_free(async_pf_cache, work);
138  }
139 #endif
140  spin_lock(&vcpu->async_pf.lock);
141  }
142 
143  while (!list_empty(&vcpu->async_pf.done)) {
144  struct kvm_async_pf *work =
145  list_first_entry(&vcpu->async_pf.done,
146  typeof(*work), link);
147  list_del(&work->link);
148 
149  spin_unlock(&vcpu->async_pf.lock);
151  spin_lock(&vcpu->async_pf.lock);
152  }
153  spin_unlock(&vcpu->async_pf.lock);
154 
155  vcpu->async_pf.queued = 0;
156 }
Here is the call graph for this function:

◆ kvm_flush_and_free_async_pf_work()

static void kvm_flush_and_free_async_pf_work ( struct kvm_async_pf *  work)
static

Definition at line 92 of file async_pf.c.

93 {
94  /*
95  * The async #PF is "done", but KVM must wait for the work item itself,
96  * i.e. async_pf_execute(), to run to completion. If KVM is a module,
97  * KVM must ensure *no* code owned by the KVM (the module) can be run
98  * after the last call to module_put(). Note, flushing the work item
99  * is always required when the item is taken off the completion queue.
100  * E.g. even if the vCPU handles the item in the "normal" path, the VM
101  * could be terminated before async_pf_execute() completes.
102  *
103  * Wake all events skip the queue and go straight done, i.e. don't
104  * need to be flushed (but sanity check that the work wasn't queued).
105  */
106  if (work->wakeup_all)
107  WARN_ON_ONCE(work->work.func);
108  else
109  flush_work(&work->work);
110  kmem_cache_free(async_pf_cache, work);
111 }
Here is the caller graph for this function:

◆ kvm_setup_async_pf()

bool kvm_setup_async_pf ( struct kvm_vcpu *  vcpu,
gpa_t  cr2_or_gpa,
unsigned long  hva,
struct kvm_arch_async_pf *  arch 
)

Definition at line 184 of file async_pf.c.

186 {
187  struct kvm_async_pf *work;
188 
189  if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
190  return false;
191 
192  /* Arch specific code should not do async PF in this case */
193  if (unlikely(kvm_is_error_hva(hva)))
194  return false;
195 
196  /*
197  * do alloc nowait since if we are going to sleep anyway we
198  * may as well sleep faulting in page
199  */
200  work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
201  if (!work)
202  return false;
203 
204  work->wakeup_all = false;
205  work->vcpu = vcpu;
206  work->cr2_or_gpa = cr2_or_gpa;
207  work->addr = hva;
208  work->arch = *arch;
209  work->mm = current->mm;
210  mmget(work->mm);
211 
212  INIT_WORK(&work->work, async_pf_execute);
213 
214  list_add_tail(&work->queue, &vcpu->async_pf.queue);
215  vcpu->async_pf.queued++;
216  work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
217 
218  schedule_work(&work->work);
219 
220  return true;
221 }
static void async_pf_execute(struct work_struct *work)
Definition: async_pf.c:45
Here is the call graph for this function:

Variable Documentation

◆ async_pf_cache

struct kmem_cache* async_pf_cache
static

Definition at line 20 of file async_pf.c.