@@ -54,6 +54,70 @@ static void *real_vmalloc_addr(void *x)
54
54
return __va (addr );
55
55
}
56
56
57
+ /*
58
+ * Add this HPTE into the chain for the real page.
59
+ * Must be called with the chain locked; it unlocks the chain.
60
+ */
61
+ static void kvmppc_add_revmap_chain (struct kvm * kvm , struct revmap_entry * rev ,
62
+ unsigned long * rmap , long pte_index , int realmode )
63
+ {
64
+ struct revmap_entry * head , * tail ;
65
+ unsigned long i ;
66
+
67
+ if (* rmap & KVMPPC_RMAP_PRESENT ) {
68
+ i = * rmap & KVMPPC_RMAP_INDEX ;
69
+ head = & kvm -> arch .revmap [i ];
70
+ if (realmode )
71
+ head = real_vmalloc_addr (head );
72
+ tail = & kvm -> arch .revmap [head -> back ];
73
+ if (realmode )
74
+ tail = real_vmalloc_addr (tail );
75
+ rev -> forw = i ;
76
+ rev -> back = head -> back ;
77
+ tail -> forw = pte_index ;
78
+ head -> back = pte_index ;
79
+ } else {
80
+ rev -> forw = rev -> back = pte_index ;
81
+ i = pte_index ;
82
+ }
83
+ smp_wmb ();
84
+ * rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT ; /* unlock */
85
+ }
86
+
87
+ /* Remove this HPTE from the chain for a real page */
88
+ static void remove_revmap_chain (struct kvm * kvm , long pte_index ,
89
+ unsigned long hpte_v )
90
+ {
91
+ struct revmap_entry * rev , * next , * prev ;
92
+ unsigned long gfn , ptel , head ;
93
+ struct kvm_memory_slot * memslot ;
94
+ unsigned long * rmap ;
95
+
96
+ rev = real_vmalloc_addr (& kvm -> arch .revmap [pte_index ]);
97
+ ptel = rev -> guest_rpte ;
98
+ gfn = hpte_rpn (ptel , hpte_page_size (hpte_v , ptel ));
99
+ memslot = builtin_gfn_to_memslot (kvm , gfn );
100
+ if (!memslot || (memslot -> flags & KVM_MEMSLOT_INVALID ))
101
+ return ;
102
+
103
+ rmap = real_vmalloc_addr (& memslot -> rmap [gfn - memslot -> base_gfn ]);
104
+ lock_rmap (rmap );
105
+
106
+ head = * rmap & KVMPPC_RMAP_INDEX ;
107
+ next = real_vmalloc_addr (& kvm -> arch .revmap [rev -> forw ]);
108
+ prev = real_vmalloc_addr (& kvm -> arch .revmap [rev -> back ]);
109
+ next -> back = rev -> back ;
110
+ prev -> forw = rev -> forw ;
111
+ if (head == pte_index ) {
112
+ head = rev -> forw ;
113
+ if (head == pte_index )
114
+ * rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX );
115
+ else
116
+ * rmap = (* rmap & ~KVMPPC_RMAP_INDEX ) | head ;
117
+ }
118
+ unlock_rmap (rmap );
119
+ }
120
+
57
121
long kvmppc_h_enter (struct kvm_vcpu * vcpu , unsigned long flags ,
58
122
long pte_index , unsigned long pteh , unsigned long ptel )
59
123
{
@@ -66,6 +130,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
66
130
struct kvm_memory_slot * memslot ;
67
131
unsigned long * physp , pte_size ;
68
132
unsigned long is_io ;
133
+ unsigned long * rmap ;
69
134
bool realmode = vcpu -> arch .vcore -> vcore_state == VCORE_RUNNING ;
70
135
71
136
psize = hpte_page_size (pteh , ptel );
@@ -83,6 +148,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
83
148
if (!slot_is_aligned (memslot , psize ))
84
149
return H_PARAMETER ;
85
150
slot_fn = gfn - memslot -> base_gfn ;
151
+ rmap = & memslot -> rmap [slot_fn ];
86
152
87
153
physp = kvm -> arch .slot_phys [memslot -> id ];
88
154
if (!physp )
@@ -164,13 +230,25 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
164
230
}
165
231
166
232
/* Save away the guest's idea of the second HPTE dword */
167
- rev = real_vmalloc_addr (& kvm -> arch .revmap [pte_index ]);
233
+ rev = & kvm -> arch .revmap [pte_index ];
234
+ if (realmode )
235
+ rev = real_vmalloc_addr (rev );
168
236
if (rev )
169
237
rev -> guest_rpte = g_ptel ;
238
+
239
+ /* Link HPTE into reverse-map chain */
240
+ if (realmode )
241
+ rmap = real_vmalloc_addr (rmap );
242
+ lock_rmap (rmap );
243
+ kvmppc_add_revmap_chain (kvm , rev , rmap , pte_index , realmode );
244
+
170
245
hpte [1 ] = ptel ;
246
+
247
+ /* Write the first HPTE dword, unlocking the HPTE and making it valid */
171
248
eieio ();
172
249
hpte [0 ] = pteh ;
173
250
asm volatile ("ptesync" : : : "memory" );
251
+
174
252
vcpu -> arch .gpr [4 ] = pte_index ;
175
253
return H_SUCCESS ;
176
254
}
@@ -220,6 +298,8 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
220
298
vcpu -> arch .gpr [4 ] = v = hpte [0 ] & ~HPTE_V_HVLOCK ;
221
299
vcpu -> arch .gpr [5 ] = r = hpte [1 ];
222
300
rb = compute_tlbie_rb (v , r , pte_index );
301
+ remove_revmap_chain (kvm , pte_index , v );
302
+ smp_wmb ();
223
303
hpte [0 ] = 0 ;
224
304
if (!(flags & H_LOCAL )) {
225
305
while (!try_lock_tlbie (& kvm -> arch .tlbie_lock ))
@@ -293,6 +373,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
293
373
flags |= (hp [1 ] >> 5 ) & 0x0c ;
294
374
args [i * 2 ] = ((0x80 | flags ) << 56 ) + pte_index ;
295
375
tlbrb [n_inval ++ ] = compute_tlbie_rb (hp [0 ], hp [1 ], pte_index );
376
+ remove_revmap_chain (kvm , pte_index , hp [0 ]);
377
+ smp_wmb ();
296
378
hp [0 ] = 0 ;
297
379
}
298
380
if (n_inval == 0 )
0 commit comments