|
40 | 40 |
|
41 | 41 | #include "trace_hv.h"
|
42 | 42 |
|
| 43 | +//#define DEBUG_RESIZE_HPT 1 |
| 44 | + |
| 45 | +#ifdef DEBUG_RESIZE_HPT |
| 46 | +#define resize_hpt_debug(resize, ...) \ |
| 47 | + do { \ |
| 48 | + printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \ |
| 49 | + printk(__VA_ARGS__); \ |
| 50 | + } while (0) |
| 51 | +#else |
| 52 | +#define resize_hpt_debug(resize, ...) \ |
| 53 | + do { } while (0) |
| 54 | +#endif |
| 55 | + |
43 | 56 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
|
44 | 57 | long pte_index, unsigned long pteh,
|
45 | 58 | unsigned long ptel, unsigned long *pte_idx_ret);
|
| 59 | + |
| 60 | +struct kvm_resize_hpt { |
| 61 | + /* These fields read-only after init */ |
| 62 | + struct kvm *kvm; |
| 63 | + struct work_struct work; |
| 64 | + u32 order; |
| 65 | + |
| 66 | + /* These fields protected by kvm->lock */ |
| 67 | + int error; |
| 68 | + bool prepare_done; |
| 69 | +}; |
| 70 | + |
46 | 71 | static void kvmppc_rmap_reset(struct kvm *kvm);
|
47 | 72 |
|
48 | 73 | int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
|
@@ -1179,6 +1204,172 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
|
1179 | 1204 | srcu_read_unlock(&kvm->srcu, srcu_idx);
|
1180 | 1205 | }
|
1181 | 1206 |
|
| 1207 | +/* |
| 1208 | + * HPT resizing |
| 1209 | + */ |
| 1210 | +static int resize_hpt_allocate(struct kvm_resize_hpt *resize) |
| 1211 | +{ |
| 1212 | + return 0; |
| 1213 | +} |
| 1214 | + |
| 1215 | +static int resize_hpt_rehash(struct kvm_resize_hpt *resize) |
| 1216 | +{ |
| 1217 | + return -EIO; |
| 1218 | +} |
| 1219 | + |
| 1220 | +static void resize_hpt_pivot(struct kvm_resize_hpt *resize) |
| 1221 | +{ |
| 1222 | +} |
| 1223 | + |
| 1224 | +static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
| 1225 | +{ |
| 1226 | + BUG_ON(kvm->arch.resize_hpt != resize); |
| 1227 | + kvm->arch.resize_hpt = NULL; |
| 1228 | + kfree(resize); |
| 1229 | +} |
| 1230 | + |
| 1231 | +static void resize_hpt_prepare_work(struct work_struct *work) |
| 1232 | +{ |
| 1233 | + struct kvm_resize_hpt *resize = container_of(work, |
| 1234 | + struct kvm_resize_hpt, |
| 1235 | + work); |
| 1236 | + struct kvm *kvm = resize->kvm; |
| 1237 | + int err; |
| 1238 | + |
| 1239 | + resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", |
| 1240 | + resize->order); |
| 1241 | + |
| 1242 | + err = resize_hpt_allocate(resize); |
| 1243 | + |
| 1244 | + mutex_lock(&kvm->lock); |
| 1245 | + |
| 1246 | + resize->error = err; |
| 1247 | + resize->prepare_done = true; |
| 1248 | + |
| 1249 | + mutex_unlock(&kvm->lock); |
| 1250 | +} |
| 1251 | + |
| 1252 | +long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, |
| 1253 | + struct kvm_ppc_resize_hpt *rhpt) |
| 1254 | +{ |
| 1255 | + unsigned long flags = rhpt->flags; |
| 1256 | + unsigned long shift = rhpt->shift; |
| 1257 | + struct kvm_resize_hpt *resize; |
| 1258 | + int ret; |
| 1259 | + |
| 1260 | + if (flags != 0) |
| 1261 | + return -EINVAL; |
| 1262 | + |
| 1263 | + if (shift && ((shift < 18) || (shift > 46))) |
| 1264 | + return -EINVAL; |
| 1265 | + |
| 1266 | + mutex_lock(&kvm->lock); |
| 1267 | + |
| 1268 | + resize = kvm->arch.resize_hpt; |
| 1269 | + |
| 1270 | + if (resize) { |
| 1271 | + if (resize->order == shift) { |
| 1272 | + /* Suitable resize in progress */ |
| 1273 | + if (resize->prepare_done) { |
| 1274 | + ret = resize->error; |
| 1275 | + if (ret != 0) |
| 1276 | + resize_hpt_release(kvm, resize); |
| 1277 | + } else { |
| 1278 | + ret = 100; /* estimated time in ms */ |
| 1279 | + } |
| 1280 | + |
| 1281 | + goto out; |
| 1282 | + } |
| 1283 | + |
| 1284 | + /* not suitable, cancel it */ |
| 1285 | + resize_hpt_release(kvm, resize); |
| 1286 | + } |
| 1287 | + |
| 1288 | + ret = 0; |
| 1289 | + if (!shift) |
| 1290 | + goto out; /* nothing to do */ |
| 1291 | + |
| 1292 | + /* start new resize */ |
| 1293 | + |
| 1294 | + resize = kzalloc(sizeof(*resize), GFP_KERNEL); |
| 1295 | + resize->order = shift; |
| 1296 | + resize->kvm = kvm; |
| 1297 | + INIT_WORK(&resize->work, resize_hpt_prepare_work); |
| 1298 | + kvm->arch.resize_hpt = resize; |
| 1299 | + |
| 1300 | + schedule_work(&resize->work); |
| 1301 | + |
| 1302 | + ret = 100; /* estimated time in ms */ |
| 1303 | + |
| 1304 | +out: |
| 1305 | + mutex_unlock(&kvm->lock); |
| 1306 | + return ret; |
| 1307 | +} |
| 1308 | + |
| 1309 | +static void resize_hpt_boot_vcpu(void *opaque) |
| 1310 | +{ |
| 1311 | + /* Nothing to do, just force a KVM exit */ |
| 1312 | +} |
| 1313 | + |
| 1314 | +long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, |
| 1315 | + struct kvm_ppc_resize_hpt *rhpt) |
| 1316 | +{ |
| 1317 | + unsigned long flags = rhpt->flags; |
| 1318 | + unsigned long shift = rhpt->shift; |
| 1319 | + struct kvm_resize_hpt *resize; |
| 1320 | + long ret; |
| 1321 | + |
| 1322 | + if (flags != 0) |
| 1323 | + return -EINVAL; |
| 1324 | + |
| 1325 | + if (shift && ((shift < 18) || (shift > 46))) |
| 1326 | + return -EINVAL; |
| 1327 | + |
| 1328 | + mutex_lock(&kvm->lock); |
| 1329 | + |
| 1330 | + resize = kvm->arch.resize_hpt; |
| 1331 | + |
| 1332 | + /* This shouldn't be possible */ |
| 1333 | + ret = -EIO; |
| 1334 | + if (WARN_ON(!kvm->arch.hpte_setup_done)) |
| 1335 | + goto out_no_hpt; |
| 1336 | + |
| 1337 | + /* Stop VCPUs from running while we mess with the HPT */ |
| 1338 | + kvm->arch.hpte_setup_done = 0; |
| 1339 | + smp_mb(); |
| 1340 | + |
| 1341 | + /* Boot all CPUs out of the guest so they re-read |
| 1342 | + * hpte_setup_done */ |
| 1343 | + on_each_cpu(resize_hpt_boot_vcpu, NULL, 1); |
| 1344 | + |
| 1345 | + ret = -ENXIO; |
| 1346 | + if (!resize || (resize->order != shift)) |
| 1347 | + goto out; |
| 1348 | + |
| 1349 | + ret = -EBUSY; |
| 1350 | + if (!resize->prepare_done) |
| 1351 | + goto out; |
| 1352 | + |
| 1353 | + ret = resize->error; |
| 1354 | + if (ret != 0) |
| 1355 | + goto out; |
| 1356 | + |
| 1357 | + ret = resize_hpt_rehash(resize); |
| 1358 | + if (ret != 0) |
| 1359 | + goto out; |
| 1360 | + |
| 1361 | + resize_hpt_pivot(resize); |
| 1362 | + |
| 1363 | +out: |
| 1364 | + /* Let VCPUs run again */ |
| 1365 | + kvm->arch.hpte_setup_done = 1; |
| 1366 | + smp_mb(); |
| 1367 | +out_no_hpt: |
| 1368 | + resize_hpt_release(kvm, resize); |
| 1369 | + mutex_unlock(&kvm->lock); |
| 1370 | + return ret; |
| 1371 | +} |
| 1372 | + |
1182 | 1373 | /*
|
1183 | 1374 | * Functions for reading and writing the hash table via reads and
|
1184 | 1375 | * writes on a file descriptor.
|
|
0 commit comments