Skip to content

Commit c35635e

Browse files
paulusmackagraf
authored andcommitted
KVM: PPC: Book3S HV: Report VPA and DTL modifications in dirty map
At present, the KVM_GET_DIRTY_LOG ioctl doesn't report modifications done by the host to the virtual processor areas (VPAs) and dispatch trace logs (DTLs) registered by the guest. This is because those modifications are done either in real mode or in the host kernel context, and in neither case does the access go through the guest's HPT, and thus no change (C) bit gets set in the guest's HPT. However, the changes done by the host do need to be tracked so that the modified pages get transferred when doing live migration. In order to track these modifications, this adds a dirty flag to the struct representing the VPA/DTL areas, and arranges to set the flag when the VPA/DTL gets modified by the host. Then, when we are collecting the dirty log, we also check the dirty flags for the VPA and DTL for each vcpu and set the relevant bit in the dirty log if necessary. Doing this also means we now need to keep track of the guest physical address of the VPA/DTL areas. So as not to lose track of modifications to a VPA/DTL area when it gets unregistered, or when a new area gets registered in its place, we need to transfer the dirty state to the rmap chain. This adds code to kvmppc_unpin_guest_page() to do that if the area was dirty. To simplify that code, we now require that all VPA, DTL and SLB shadow buffer areas fit within a single host page. Guests already comply with this requirement because pHyp requires that these areas not cross a 4k boundary. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
1 parent a1b4a0f commit c35635e

File tree

6 files changed

+80
-21
lines changed

6 files changed

+80
-21
lines changed

arch/powerpc/include/asm/kvm_book3s.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,8 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
156156
unsigned long pte_index);
157157
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
158158
unsigned long *nb_ret);
159-
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
159+
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
160+
unsigned long gpa, bool dirty);
160161
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
161162
long pte_index, unsigned long pteh, unsigned long ptel);
162163
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,

arch/powerpc/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -301,11 +301,13 @@ struct kvmppc_vcore {
301301
* that a guest can register.
302302
*/
303303
struct kvmppc_vpa {
304+
unsigned long gpa; /* Current guest phys addr */
304305
void *pinned_addr; /* Address in kernel linear mapping */
305306
void *pinned_end; /* End of region */
306307
unsigned long next_gpa; /* Guest phys addr for update */
307308
unsigned long len; /* Number of bytes required */
308309
u8 update_pending; /* 1 => update pinned_addr from next_gpa */
310+
bool dirty; /* true => area has been modified by kernel */
309311
};
310312

311313
struct kvmppc_pte {

arch/powerpc/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -477,6 +477,7 @@ int main(void)
477477
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
478478
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
479479
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
480+
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
480481
#endif
481482
#ifdef CONFIG_PPC_BOOK3S
482483
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 52 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1099,11 +1099,30 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
10991099
return ret;
11001100
}
11011101

1102+
static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1103+
struct kvm_memory_slot *memslot,
1104+
unsigned long *map)
1105+
{
1106+
unsigned long gfn;
1107+
1108+
if (!vpa->dirty || !vpa->pinned_addr)
1109+
return;
1110+
gfn = vpa->gpa >> PAGE_SHIFT;
1111+
if (gfn < memslot->base_gfn ||
1112+
gfn >= memslot->base_gfn + memslot->npages)
1113+
return;
1114+
1115+
vpa->dirty = false;
1116+
if (map)
1117+
__set_bit_le(gfn - memslot->base_gfn, map);
1118+
}
1119+
11021120
long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
11031121
unsigned long *map)
11041122
{
11051123
unsigned long i;
11061124
unsigned long *rmapp;
1125+
struct kvm_vcpu *vcpu;
11071126

11081127
preempt_disable();
11091128
rmapp = memslot->arch.rmap;
@@ -1112,6 +1131,15 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
11121131
__set_bit_le(i, map);
11131132
++rmapp;
11141133
}
1134+
1135+
/* Harvest dirty bits from VPA and DTL updates */
1136+
/* Note: we never modify the SLB shadow buffer areas */
1137+
kvm_for_each_vcpu(i, vcpu, kvm) {
1138+
spin_lock(&vcpu->arch.vpa_update_lock);
1139+
harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map);
1140+
harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map);
1141+
spin_unlock(&vcpu->arch.vpa_update_lock);
1142+
}
11151143
preempt_enable();
11161144
return 0;
11171145
}
@@ -1123,7 +1151,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
11231151
unsigned long gfn = gpa >> PAGE_SHIFT;
11241152
struct page *page, *pages[1];
11251153
int npages;
1126-
unsigned long hva, psize, offset;
1154+
unsigned long hva, offset;
11271155
unsigned long pa;
11281156
unsigned long *physp;
11291157
int srcu_idx;
@@ -1155,26 +1183,41 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
11551183
}
11561184
srcu_read_unlock(&kvm->srcu, srcu_idx);
11571185

1158-
psize = PAGE_SIZE;
1159-
if (PageHuge(page)) {
1160-
page = compound_head(page);
1161-
psize <<= compound_order(page);
1162-
}
1163-
offset = gpa & (psize - 1);
1186+
offset = gpa & (PAGE_SIZE - 1);
11641187
if (nb_ret)
1165-
*nb_ret = psize - offset;
1188+
*nb_ret = PAGE_SIZE - offset;
11661189
return page_address(page) + offset;
11671190

11681191
err:
11691192
srcu_read_unlock(&kvm->srcu, srcu_idx);
11701193
return NULL;
11711194
}
11721195

1173-
void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
1196+
void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1197+
bool dirty)
11741198
{
11751199
struct page *page = virt_to_page(va);
1200+
struct kvm_memory_slot *memslot;
1201+
unsigned long gfn;
1202+
unsigned long *rmap;
1203+
int srcu_idx;
11761204

11771205
put_page(page);
1206+
1207+
if (!dirty || !kvm->arch.using_mmu_notifiers)
1208+
return;
1209+
1210+
/* We need to mark this page dirty in the rmap chain */
1211+
gfn = gpa >> PAGE_SHIFT;
1212+
srcu_idx = srcu_read_lock(&kvm->srcu);
1213+
memslot = gfn_to_memslot(kvm, gfn);
1214+
if (memslot) {
1215+
rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
1216+
lock_rmap(rmap);
1217+
*rmap |= KVMPPC_RMAP_CHANGED;
1218+
unlock_rmap(rmap);
1219+
}
1220+
srcu_read_unlock(&kvm->srcu, srcu_idx);
11781221
}
11791222

11801223
/*

arch/powerpc/kvm/book3s_hv.c

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
259259
len = ((struct reg_vpa *)va)->length.hword;
260260
else
261261
len = ((struct reg_vpa *)va)->length.word;
262-
kvmppc_unpin_guest_page(kvm, va);
262+
kvmppc_unpin_guest_page(kvm, va, vpa, false);
263263

264264
/* Check length */
265265
if (len > nb || len < sizeof(struct reg_vpa))
@@ -359,13 +359,13 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
359359
va = NULL;
360360
nb = 0;
361361
if (gpa)
362-
va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
362+
va = kvmppc_pin_guest_page(kvm, gpa, &nb);
363363
spin_lock(&vcpu->arch.vpa_update_lock);
364364
if (gpa == vpap->next_gpa)
365365
break;
366366
/* sigh... unpin that one and try again */
367367
if (va)
368-
kvmppc_unpin_guest_page(kvm, va);
368+
kvmppc_unpin_guest_page(kvm, va, gpa, false);
369369
}
370370

371371
vpap->update_pending = 0;
@@ -375,12 +375,15 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
375375
* has changed the mappings underlying guest memory,
376376
* so unregister the region.
377377
*/
378-
kvmppc_unpin_guest_page(kvm, va);
378+
kvmppc_unpin_guest_page(kvm, va, gpa, false);
379379
va = NULL;
380380
}
381381
if (vpap->pinned_addr)
382-
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
382+
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
383+
vpap->dirty);
384+
vpap->gpa = gpa;
383385
vpap->pinned_addr = va;
386+
vpap->dirty = false;
384387
if (va)
385388
vpap->pinned_end = va + vpap->len;
386389
}
@@ -472,6 +475,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
472475
/* order writing *dt vs. writing vpa->dtl_idx */
473476
smp_wmb();
474477
vpa->dtl_idx = ++vcpu->arch.dtl_index;
478+
vcpu->arch.dtl.dirty = true;
475479
}
476480

477481
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
@@ -913,15 +917,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
913917
return ERR_PTR(err);
914918
}
915919

920+
static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
921+
{
922+
if (vpa->pinned_addr)
923+
kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
924+
vpa->dirty);
925+
}
926+
916927
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
917928
{
918929
spin_lock(&vcpu->arch.vpa_update_lock);
919-
if (vcpu->arch.dtl.pinned_addr)
920-
kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
921-
if (vcpu->arch.slb_shadow.pinned_addr)
922-
kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
923-
if (vcpu->arch.vpa.pinned_addr)
924-
kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
930+
unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
931+
unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
932+
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
925933
spin_unlock(&vcpu->arch.vpa_update_lock);
926934
kvm_vcpu_uninit(vcpu);
927935
kmem_cache_free(kvm_vcpu_cache, vcpu);

arch/powerpc/kvm/book3s_hv_rmhandlers.S

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -260,6 +260,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
260260
lwz r5, LPPACA_YIELDCOUNT(r3)
261261
addi r5, r5, 1
262262
stw r5, LPPACA_YIELDCOUNT(r3)
263+
li r6, 1
264+
stb r6, VCPU_VPA_DIRTY(r4)
263265
25:
264266
/* Load up DAR and DSISR */
265267
ld r5, VCPU_DAR(r4)
@@ -1018,6 +1020,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
10181020
lwz r3, LPPACA_YIELDCOUNT(r8)
10191021
addi r3, r3, 1
10201022
stw r3, LPPACA_YIELDCOUNT(r8)
1023+
li r3, 1
1024+
stb r3, VCPU_VPA_DIRTY(r9)
10211025
25:
10221026
/* Save PMU registers if requested */
10231027
/* r8 and cr0.eq are live here */

0 commit comments

Comments
 (0)