Skip to content

Commit 67f6919

Browse files
AKASHI Takahirowildea01
authored andcommitted
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization at system boot, and has no way to gracefully shutdown a core in terms of kvm. This prevents kexec from rebooting the system at EL2. This patch adds a cpu tear-down function and also puts an existing cpu-init code into a separate function, kvm_arch_hardware_disable() and kvm_arch_hardware_enable() respectively. We don't need the arm64 specific cpu hotplug hook any more. Since this patch modifies common code between arm and arm64, one stub definition, __cpu_reset_hyp_mode(), is added on arm side to avoid compilation errors. Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> [Rebase, added separate VHE init/exit path, changed resets use of kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(), added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed guest-enter after teardown handling] Signed-off-by: James Morse <james.morse@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent c94b0cf commit 67f6919

File tree

9 files changed

+152
-50
lines changed

9 files changed

+152
-50
lines changed

arch/arm/include/asm/kvm_host.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,6 +265,15 @@ static inline void __cpu_init_stage2(void)
265265
kvm_call_hyp(__init_stage2_translation);
266266
}
267267

268+
static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
269+
phys_addr_t phys_idmap_start)
270+
{
271+
/*
272+
* TODO
273+
* kvm_call_reset(boot_pgd_ptr, phys_idmap_start);
274+
*/
275+
}
276+
268277
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
269278
{
270279
return 0;
@@ -277,7 +286,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
277286

278287
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
279288

280-
static inline void kvm_arch_hardware_disable(void) {}
281289
static inline void kvm_arch_hardware_unsetup(void) {}
282290
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
283291
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}

arch/arm/include/asm/kvm_mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
6666
phys_addr_t kvm_mmu_get_httbr(void);
6767
phys_addr_t kvm_mmu_get_boot_httbr(void);
6868
phys_addr_t kvm_get_idmap_vector(void);
69+
phys_addr_t kvm_get_idmap_start(void);
6970
int kvm_mmu_init(void);
7071
void kvm_clear_hyp_idmap(void);
7172

arch/arm/kvm/arm.c

Lines changed: 71 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1717
*/
1818

19-
#include <linux/cpu.h>
2019
#include <linux/cpu_pm.h>
2120
#include <linux/errno.h>
2221
#include <linux/err.h>
@@ -66,6 +65,8 @@ static DEFINE_SPINLOCK(kvm_vmid_lock);
6665

6766
static bool vgic_present;
6867

68+
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
69+
6970
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
7071
{
7172
BUG_ON(preemptible());
@@ -90,11 +91,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
9091
return &kvm_arm_running_vcpu;
9192
}
9293

93-
int kvm_arch_hardware_enable(void)
94-
{
95-
return 0;
96-
}
97-
9894
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
9995
{
10096
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -1033,11 +1029,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
10331029
}
10341030
}
10351031

1036-
static void cpu_init_stage2(void *dummy)
1037-
{
1038-
__cpu_init_stage2();
1039-
}
1040-
10411032
static void cpu_init_hyp_mode(void *dummy)
10421033
{
10431034
phys_addr_t boot_pgd_ptr;
@@ -1065,43 +1056,87 @@ static void cpu_hyp_reinit(void)
10651056
{
10661057
if (is_kernel_in_hyp_mode()) {
10671058
/*
1068-
* cpu_init_stage2() is safe to call even if the PM
1059+
* __cpu_init_stage2() is safe to call even if the PM
10691060
* event was cancelled before the CPU was reset.
10701061
*/
1071-
cpu_init_stage2(NULL);
1062+
__cpu_init_stage2();
10721063
} else {
10731064
if (__hyp_get_vectors() == hyp_default_vectors)
10741065
cpu_init_hyp_mode(NULL);
10751066
}
10761067
}
10771068

1078-
static int hyp_init_cpu_notify(struct notifier_block *self,
1079-
unsigned long action, void *cpu)
1069+
static void cpu_hyp_reset(void)
1070+
{
1071+
phys_addr_t boot_pgd_ptr;
1072+
phys_addr_t phys_idmap_start;
1073+
1074+
if (!is_kernel_in_hyp_mode()) {
1075+
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
1076+
phys_idmap_start = kvm_get_idmap_start();
1077+
1078+
__cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start);
1079+
}
1080+
}
1081+
1082+
static void _kvm_arch_hardware_enable(void *discard)
10801083
{
1081-
switch (action) {
1082-
case CPU_STARTING:
1083-
case CPU_STARTING_FROZEN:
1084+
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
10841085
cpu_hyp_reinit();
1086+
__this_cpu_write(kvm_arm_hardware_enabled, 1);
10851087
}
1088+
}
1089+
1090+
int kvm_arch_hardware_enable(void)
1091+
{
1092+
_kvm_arch_hardware_enable(NULL);
1093+
return 0;
1094+
}
10861095

1087-
return NOTIFY_OK;
1096+
static void _kvm_arch_hardware_disable(void *discard)
1097+
{
1098+
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1099+
cpu_hyp_reset();
1100+
__this_cpu_write(kvm_arm_hardware_enabled, 0);
1101+
}
10881102
}
10891103

1090-
static struct notifier_block hyp_init_cpu_nb = {
1091-
.notifier_call = hyp_init_cpu_notify,
1092-
};
1104+
void kvm_arch_hardware_disable(void)
1105+
{
1106+
_kvm_arch_hardware_disable(NULL);
1107+
}
10931108

10941109
#ifdef CONFIG_CPU_PM
10951110
static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
10961111
unsigned long cmd,
10971112
void *v)
10981113
{
1099-
if (cmd == CPU_PM_EXIT) {
1100-
cpu_hyp_reinit();
1114+
/*
1115+
* kvm_arm_hardware_enabled is left with its old value over
1116+
* PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1117+
* re-enable hyp.
1118+
*/
1119+
switch (cmd) {
1120+
case CPU_PM_ENTER:
1121+
if (__this_cpu_read(kvm_arm_hardware_enabled))
1122+
/*
1123+
* don't update kvm_arm_hardware_enabled here
1124+
* so that the hardware will be re-enabled
1125+
* when we resume. See below.
1126+
*/
1127+
cpu_hyp_reset();
1128+
1129+
return NOTIFY_OK;
1130+
case CPU_PM_EXIT:
1131+
if (__this_cpu_read(kvm_arm_hardware_enabled))
1132+
/* The hardware was enabled before suspend. */
1133+
cpu_hyp_reinit();
1134+
11011135
return NOTIFY_OK;
1102-
}
11031136

1104-
return NOTIFY_DONE;
1137+
default:
1138+
return NOTIFY_DONE;
1139+
}
11051140
}
11061141

11071142
static struct notifier_block hyp_init_cpu_pm_nb = {
@@ -1136,18 +1171,12 @@ static int init_common_resources(void)
11361171

11371172
static int init_subsystems(void)
11381173
{
1139-
int err;
1174+
int err = 0;
11401175

11411176
/*
1142-
* Register CPU Hotplug notifier
1177+
* Enable hardware so that subsystem initialisation can access EL2.
11431178
*/
1144-
cpu_notifier_register_begin();
1145-
err = __register_cpu_notifier(&hyp_init_cpu_nb);
1146-
cpu_notifier_register_done();
1147-
if (err) {
1148-
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1149-
return err;
1150-
}
1179+
on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
11511180

11521181
/*
11531182
* Register CPU lower-power notifier
@@ -1165,22 +1194,26 @@ static int init_subsystems(void)
11651194
case -ENODEV:
11661195
case -ENXIO:
11671196
vgic_present = false;
1197+
err = 0;
11681198
break;
11691199
default:
1170-
return err;
1200+
goto out;
11711201
}
11721202

11731203
/*
11741204
* Init HYP architected timer support
11751205
*/
11761206
err = kvm_timer_hyp_init();
11771207
if (err)
1178-
return err;
1208+
goto out;
11791209

11801210
kvm_perf_init();
11811211
kvm_coproc_table_init();
11821212

1183-
return 0;
1213+
out:
1214+
on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1215+
1216+
return err;
11841217
}
11851218

11861219
static void teardown_hyp_mode(void)
@@ -1197,11 +1230,6 @@ static void teardown_hyp_mode(void)
11971230

11981231
static int init_vhe_mode(void)
11991232
{
1200-
/*
1201-
* Execute the init code on each CPU.
1202-
*/
1203-
on_each_cpu(cpu_init_stage2, NULL, 1);
1204-
12051233
/* set size of VMID supported by CPU */
12061234
kvm_vmid_bits = kvm_get_vmid_bits();
12071235
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1288,11 +1316,6 @@ static int init_hyp_mode(void)
12881316
}
12891317
}
12901318

1291-
/*
1292-
* Execute the init code on each CPU.
1293-
*/
1294-
on_each_cpu(cpu_init_hyp_mode, NULL, 1);
1295-
12961319
#ifndef CONFIG_HOTPLUG_CPU
12971320
free_boot_hyp_pgd();
12981321
#endif

arch/arm/kvm/mmu.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1666,6 +1666,11 @@ phys_addr_t kvm_get_idmap_vector(void)
16661666
return hyp_idmap_vector;
16671667
}
16681668

1669+
phys_addr_t kvm_get_idmap_start(void)
1670+
{
1671+
return hyp_idmap_start;
1672+
}
1673+
16691674
int kvm_mmu_init(void)
16701675
{
16711676
int err;

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ struct kvm_vcpu;
4242

4343
extern char __kvm_hyp_init[];
4444
extern char __kvm_hyp_init_end[];
45+
extern char __kvm_hyp_reset[];
4546

4647
extern char __kvm_hyp_vector[];
4748

arch/arm64/include/asm/kvm_host.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
int __attribute_const__ kvm_target_cpu(void);
4747
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
4848
int kvm_arch_dev_ioctl_check_extension(long ext);
49+
phys_addr_t kvm_hyp_reset_entry(void);
4950

5051
struct kvm_arch {
5152
/* The VMID generation used for the virt. memory system */
@@ -352,7 +353,17 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
352353
hyp_stack_ptr, vector_ptr);
353354
}
354355

355-
static inline void kvm_arch_hardware_disable(void) {}
356+
static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
357+
phys_addr_t phys_idmap_start)
358+
{
359+
/*
360+
* Call reset code, and switch back to stub hyp vectors.
361+
* Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
362+
*/
363+
__kvm_call_hyp((void *)kvm_hyp_reset_entry(),
364+
boot_pgd_ptr, phys_idmap_start);
365+
}
366+
356367
static inline void kvm_arch_hardware_unsetup(void) {}
357368
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
358369
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
109109
phys_addr_t kvm_mmu_get_httbr(void);
110110
phys_addr_t kvm_mmu_get_boot_httbr(void);
111111
phys_addr_t kvm_get_idmap_vector(void);
112+
phys_addr_t kvm_get_idmap_start(void);
112113
int kvm_mmu_init(void);
113114
void kvm_clear_hyp_idmap(void);
114115

arch/arm64/kvm/hyp-init.S

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,44 @@ merged:
139139
eret
140140
ENDPROC(__kvm_hyp_init)
141141

142+
/*
143+
* x0: HYP boot pgd
144+
* x1: HYP phys_idmap_start
145+
*/
146+
ENTRY(__kvm_hyp_reset)
147+
/* We're in trampoline code in VA, switch back to boot page tables */
148+
msr ttbr0_el2, x0
149+
isb
150+
151+
/* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
152+
ic iallu
153+
tlbi alle2
154+
dsb sy
155+
isb
156+
157+
/* Branch into PA space */
158+
adr x0, 1f
159+
bfi x1, x0, #0, #PAGE_SHIFT
160+
br x1
161+
162+
/* We're now in idmap, disable MMU */
163+
1: mrs x0, sctlr_el2
164+
ldr x1, =SCTLR_ELx_FLAGS
165+
bic x0, x0, x1 // Clear SCTL_M and etc
166+
msr sctlr_el2, x0
167+
isb
168+
169+
/* Invalidate the old TLBs */
170+
tlbi alle2
171+
dsb sy
172+
173+
/* Install stub vectors */
174+
adr_l x0, __hyp_stub_vectors
175+
msr vbar_el2, x0
176+
177+
eret
178+
ENDPROC(__kvm_hyp_reset)
179+
142180
.ltorg
143181

144182
.popsection

arch/arm64/kvm/reset.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@
2929
#include <asm/cputype.h>
3030
#include <asm/ptrace.h>
3131
#include <asm/kvm_arm.h>
32+
#include <asm/kvm_asm.h>
3233
#include <asm/kvm_coproc.h>
34+
#include <asm/kvm_mmu.h>
3335

3436
/*
3537
* ARMv8 Reset Values
@@ -130,3 +132,15 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
130132
/* Reset timer */
131133
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
132134
}
135+
136+
extern char __hyp_idmap_text_start[];
137+
138+
phys_addr_t kvm_hyp_reset_entry(void)
139+
{
140+
unsigned long offset;
141+
142+
offset = (unsigned long)__kvm_hyp_reset
143+
- ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
144+
145+
return TRAMPOLINE_VA + offset;
146+
}

0 commit comments

Comments
 (0)