Skip to content

Commit f2e1066

Browse files
chai wenGleb Natapov
authored andcommitted
KVM: Drop FOLL_GET in GUP when doing async page fault
Page pinning is not mandatory in kvm async page fault processing since after async page fault event is delivered to a guest it accesses page once again and does its own GUP. Drop the FOLL_GET flag in GUP in async_pf code, and do some simplifying in check/clear processing. Suggested-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Gu zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: chai wen <chaiw.fnst@cn.fujitsu.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
1 parent a7efdf6 commit f2e1066

File tree

4 files changed

+12
-21
lines changed

4 files changed

+12
-21
lines changed

arch/x86/kvm/x86.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7298,7 +7298,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
72987298
int r;
72997299

73007300
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
7301-
is_error_page(work->page))
7301+
work->wakeup_all)
73027302
return;
73037303

73047304
r = kvm_mmu_reload(vcpu);
@@ -7408,7 +7408,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
74087408
struct x86_exception fault;
74097409

74107410
trace_kvm_async_pf_ready(work->arch.token, work->gva);
7411-
if (is_error_page(work->page))
7411+
if (work->wakeup_all)
74127412
work->arch.token = ~0; /* broadcast wakeup */
74137413
else
74147414
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);

include/linux/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ struct kvm_async_pf {
189189
gva_t gva;
190190
unsigned long addr;
191191
struct kvm_arch_async_pf arch;
192-
struct page *page;
192+
bool wakeup_all;
193193
};
194194

195195
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);

include/trace/events/kvm.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
296296

297297
TRACE_EVENT(
298298
kvm_async_pf_completed,
299-
TP_PROTO(unsigned long address, struct page *page, u64 gva),
300-
TP_ARGS(address, page, gva),
299+
TP_PROTO(unsigned long address, u64 gva),
300+
TP_ARGS(address, gva),
301301

302302
TP_STRUCT__entry(
303303
__field(unsigned long, address)
304-
__field(pfn_t, pfn)
305304
__field(u64, gva)
306305
),
307306

308307
TP_fast_assign(
309308
__entry->address = address;
310-
__entry->pfn = page ? page_to_pfn(page) : 0;
311309
__entry->gva = gva;
312310
),
313311

314-
TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
315-
__entry->address, __entry->pfn)
312+
TP_printk("gva %#llx address %#lx", __entry->gva,
313+
__entry->address)
316314
);
317315

318316
#endif

virt/kvm/async_pf.c

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
5656

5757
static void async_pf_execute(struct work_struct *work)
5858
{
59-
struct page *page = NULL;
6059
struct kvm_async_pf *apf =
6160
container_of(work, struct kvm_async_pf, work);
6261
struct mm_struct *mm = apf->mm;
@@ -68,21 +67,20 @@ static void async_pf_execute(struct work_struct *work)
6867

6968
use_mm(mm);
7069
down_read(&mm->mmap_sem);
71-
get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
70+
get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
7271
up_read(&mm->mmap_sem);
7372
unuse_mm(mm);
7473

7574
spin_lock(&vcpu->async_pf.lock);
7675
list_add_tail(&apf->link, &vcpu->async_pf.done);
77-
apf->page = page;
7876
spin_unlock(&vcpu->async_pf.lock);
7977

8078
/*
8179
* apf may be freed by kvm_check_async_pf_completion() after
8280
* this point
8381
*/
8482

85-
trace_kvm_async_pf_completed(addr, page, gva);
83+
trace_kvm_async_pf_completed(addr, gva);
8684

8785
if (waitqueue_active(&vcpu->wq))
8886
wake_up_interruptible(&vcpu->wq);
@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
112110
list_entry(vcpu->async_pf.done.next,
113111
typeof(*work), link);
114112
list_del(&work->link);
115-
if (!is_error_page(work->page))
116-
kvm_release_page_clean(work->page);
117113
kmem_cache_free(async_pf_cache, work);
118114
}
119115
spin_unlock(&vcpu->async_pf.lock);
@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
133129
list_del(&work->link);
134130
spin_unlock(&vcpu->async_pf.lock);
135131

136-
if (work->page)
137-
kvm_arch_async_page_ready(vcpu, work);
132+
kvm_arch_async_page_ready(vcpu, work);
138133
kvm_arch_async_page_present(vcpu, work);
139134

140135
list_del(&work->queue);
141136
vcpu->async_pf.queued--;
142-
if (!is_error_page(work->page))
143-
kvm_release_page_clean(work->page);
144137
kmem_cache_free(async_pf_cache, work);
145138
}
146139
}
@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
163156
if (!work)
164157
return 0;
165158

166-
work->page = NULL;
159+
work->wakeup_all = false;
167160
work->vcpu = vcpu;
168161
work->gva = gva;
169162
work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
203196
if (!work)
204197
return -ENOMEM;
205198

206-
work->page = KVM_ERR_PTR_BAD_PAGE;
199+
work->wakeup_all = true;
207200
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
208201

209202
spin_lock(&vcpu->async_pf.lock);

0 commit comments

Comments
 (0)