24 #include <linux/slab.h>
25 #include <linux/module.h>
47 async_pf_cache =
NULL;
52 INIT_LIST_HEAD(&vcpu->async_pf.done);
53 INIT_LIST_HEAD(&vcpu->async_pf.queue);
60 struct kvm_async_pf *apf =
75 spin_lock(&vcpu->async_pf.lock);
79 spin_unlock(&vcpu->async_pf.lock);
86 trace_kvm_async_pf_completed(addr, page, gva);
88 if (waitqueue_active(&vcpu->
wq))
98 while (!list_empty(&vcpu->async_pf.queue)) {
99 struct kvm_async_pf *work =
108 spin_lock(&vcpu->async_pf.lock);
109 while (!list_empty(&vcpu->async_pf.done)) {
110 struct kvm_async_pf *work =
114 if (!is_error_page(work->page))
118 spin_unlock(&vcpu->async_pf.lock);
120 vcpu->async_pf.queued = 0;
125 struct kvm_async_pf *
work;
127 while (!list_empty_careful(&vcpu->async_pf.done) &&
129 spin_lock(&vcpu->async_pf.lock);
133 spin_unlock(&vcpu->async_pf.lock);
140 vcpu->async_pf.queued--;
141 if (!is_error_page(work->page))
150 struct kvm_async_pf *
work;
161 work = kmem_cache_zalloc(async_pf_cache,
GFP_NOWAIT);
177 if (
unlikely(kvm_is_error_hva(work->addr)))
180 INIT_WORK(&work->work, async_pf_execute);
185 vcpu->async_pf.queued++;
197 struct kvm_async_pf *
work;
199 if (!list_empty_careful(&vcpu->async_pf.done))
202 work = kmem_cache_zalloc(async_pf_cache,
GFP_ATOMIC);
207 INIT_LIST_HEAD(&work->queue);
209 spin_lock(&vcpu->async_pf.lock);
211 spin_unlock(&vcpu->async_pf.lock);
213 vcpu->async_pf.queued++;