Skip to content

Commit adc9b2d

Browse files
James Morsewildea01
authored andcommitted
arm64: kernel: Rework finisher callback out of __cpu_suspend_enter()
Hibernate could make use of the cpu_suspend() code to save/restore cpu state, however it needs to be able to return '0' from the 'finisher'. Rework cpu_suspend() so that the finisher is called from C code, independently from the save/restore of cpu state. Space to save the context in is allocated in the caller's stack frame, and passed into __cpu_suspend_enter(). Hibernate's use of this API will look like a copy of the cpu_suspend() function. Signed-off-by: James Morse <james.morse@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 67f6919 commit adc9b2d

File tree

4 files changed

+97
-90
lines changed

4 files changed

+97
-90
lines changed

arch/arm64/include/asm/suspend.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#define __ASM_SUSPEND_H
33

44
#define NR_CTX_REGS 11
5+
#define NR_CALLEE_SAVED_REGS 12
56

67
/*
78
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
@@ -21,6 +22,25 @@ struct sleep_save_sp {
2122
phys_addr_t save_ptr_stash_phys;
2223
};
2324

25+
/*
26+
* Memory to save the cpu state is allocated on the stack by
27+
* __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
28+
* This data must survive until cpu_resume() is called.
29+
*
30+
* This struct desribes the size and the layout of the saved cpu state.
31+
* The layout of the callee_saved_regs is defined by the implementation
32+
* of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
33+
* in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
34+
* returns, and the data would be subsequently corrupted by the call to the
35+
* finisher.
36+
*/
37+
struct sleep_stack_data {
38+
struct cpu_suspend_ctx system_regs;
39+
unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS];
40+
};
41+
2442
extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
2543
extern void cpu_resume(void);
44+
int __cpu_suspend_enter(struct sleep_stack_data *state);
45+
void __cpu_suspend_exit(void);
2646
#endif

arch/arm64/kernel/asm-offsets.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,8 @@ int main(void)
122122
DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
123123
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
124124
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
125+
DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
126+
DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
125127
#endif
126128
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
127129
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));

arch/arm64/kernel/sleep.S

Lines changed: 33 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -49,37 +49,30 @@
4949
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
5050
.endm
5151
/*
52-
* Save CPU state for a suspend and execute the suspend finisher.
53-
* On success it will return 0 through cpu_resume - ie through a CPU
54-
* soft/hard reboot from the reset vector.
55-
* On failure it returns the suspend finisher return value or force
56-
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
57-
* is not allowed to return, if it does this must be considered failure).
58-
* It saves callee registers, and allocates space on the kernel stack
59-
* to save the CPU specific registers + some other data for resume.
52+
* Save CPU state in the provided sleep_stack_data area, and publish its
53+
* location for cpu_resume()'s use in sleep_save_stash.
6054
*
61-
* x0 = suspend finisher argument
62-
* x1 = suspend finisher function pointer
55+
* cpu_resume() will restore this saved state, and return. Because the
56+
* link-register is saved and restored, it will appear to return from this
57+
* function. So that the caller can tell the suspend/resume paths apart,
58+
* __cpu_suspend_enter() will always return a non-zero value, whereas the
59+
* path through cpu_resume() will return 0.
60+
*
61+
* x0 = struct sleep_stack_data area
6362
*/
6463
ENTRY(__cpu_suspend_enter)
65-
stp x29, lr, [sp, #-96]!
66-
stp x19, x20, [sp,#16]
67-
stp x21, x22, [sp,#32]
68-
stp x23, x24, [sp,#48]
69-
stp x25, x26, [sp,#64]
70-
stp x27, x28, [sp,#80]
71-
/*
72-
* Stash suspend finisher and its argument in x20 and x19
73-
*/
74-
mov x19, x0
75-
mov x20, x1
64+
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
65+
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
66+
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
67+
stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
68+
stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
69+
stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
70+
71+
/* save the sp in cpu_suspend_ctx */
7672
mov x2, sp
77-
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
78-
mov x0, sp
79-
/*
80-
* x0 now points to struct cpu_suspend_ctx allocated on the stack
81-
*/
82-
str x2, [x0, #CPU_CTX_SP]
73+
str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
74+
75+
/* find the mpidr_hash */
8376
ldr x1, =sleep_save_sp
8477
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
8578
mrs x7, mpidr_el1
@@ -93,34 +86,11 @@ ENTRY(__cpu_suspend_enter)
9386
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
9487
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
9588
add x1, x1, x8, lsl #3
89+
90+
stp x29, lr, [sp, #-16]!
9691
bl __cpu_suspend_save
97-
/*
98-
* Grab suspend finisher in x20 and its argument in x19
99-
*/
100-
mov x0, x19
101-
mov x1, x20
102-
/*
103-
* We are ready for power down, fire off the suspend finisher
104-
* in x1, with argument in x0
105-
*/
106-
blr x1
107-
/*
108-
* Never gets here, unless suspend finisher fails.
109-
* Successful cpu_suspend should return from cpu_resume, returning
110-
* through this code path is considered an error
111-
* If the return value is set to 0 force x0 = -EOPNOTSUPP
112-
* to make sure a proper error condition is propagated
113-
*/
114-
cmp x0, #0
115-
mov x3, #-EOPNOTSUPP
116-
csel x0, x3, x0, eq
117-
add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
118-
ldp x19, x20, [sp, #16]
119-
ldp x21, x22, [sp, #32]
120-
ldp x23, x24, [sp, #48]
121-
ldp x25, x26, [sp, #64]
122-
ldp x27, x28, [sp, #80]
123-
ldp x29, lr, [sp], #96
92+
ldp x29, lr, [sp], #16
93+
mov x0, #1
12494
ret
12595
ENDPROC(__cpu_suspend_enter)
12696
.ltorg
@@ -150,12 +120,6 @@ cpu_resume_after_mmu:
150120
bl kasan_unpoison_remaining_stack
151121
#endif
152122
mov x0, #0 // return zero on success
153-
ldp x19, x20, [sp, #16]
154-
ldp x21, x22, [sp, #32]
155-
ldp x23, x24, [sp, #48]
156-
ldp x25, x26, [sp, #64]
157-
ldp x27, x28, [sp, #80]
158-
ldp x29, lr, [sp], #96
159123
ret
160124
ENDPROC(cpu_resume_after_mmu)
161125

@@ -172,6 +136,8 @@ ENTRY(cpu_resume)
172136
/* x7 contains hash index, let's use it to grab context pointer */
173137
ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
174138
ldr x0, [x0, x7, lsl #3]
139+
add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
140+
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
175141
/* load sp from context */
176142
ldr x2, [x0, #CPU_CTX_SP]
177143
/* load physical address of identity map page table in x1 */
@@ -185,5 +151,12 @@ ENTRY(cpu_resume)
185151
* pointer and x1 to contain physical address of 1:1 page tables
186152
*/
187153
bl cpu_do_resume // PC relative jump, MMU off
154+
/* Can't access these by physical address once the MMU is on */
155+
ldp x19, x20, [x29, #16]
156+
ldp x21, x22, [x29, #32]
157+
ldp x23, x24, [x29, #48]
158+
ldp x25, x26, [x29, #64]
159+
ldp x27, x28, [x29, #80]
160+
ldp x29, lr, [x29]
188161
b cpu_resume_mmu // Resume MMU, never returns
189162
ENDPROC(cpu_resume)

arch/arm64/kernel/suspend.c

Lines changed: 42 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -10,22 +10,22 @@
1010
#include <asm/suspend.h>
1111
#include <asm/tlbflush.h>
1212

13-
extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
13+
1414
/*
1515
* This is called by __cpu_suspend_enter() to save the state, and do whatever
1616
* flushing is required to ensure that when the CPU goes to sleep we have
1717
* the necessary data available when the caches are not searched.
1818
*
19-
* ptr: CPU context virtual address
19+
* ptr: sleep_stack_data containing cpu state virtual address.
2020
* save_ptr: address of the location where the context physical address
2121
* must be saved
2222
*/
23-
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
23+
void notrace __cpu_suspend_save(struct sleep_stack_data *ptr,
2424
phys_addr_t *save_ptr)
2525
{
2626
*save_ptr = virt_to_phys(ptr);
2727

28-
cpu_do_suspend(ptr);
28+
cpu_do_suspend(&ptr->system_regs);
2929
/*
3030
* Only flush the context that must be retrieved with the MMU
3131
* off. VA primitives ensure the flush is applied to all
@@ -51,6 +51,30 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
5151
hw_breakpoint_restore = hw_bp_restore;
5252
}
5353

54+
void notrace __cpu_suspend_exit(void)
55+
{
56+
/*
57+
* We are resuming from reset with the idmap active in TTBR0_EL1.
58+
* We must uninstall the idmap and restore the expected MMU
59+
* state before we can possibly return to userspace.
60+
*/
61+
cpu_uninstall_idmap();
62+
63+
/*
64+
* Restore per-cpu offset before any kernel
65+
* subsystem relying on it has a chance to run.
66+
*/
67+
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
68+
69+
/*
70+
* Restore HW breakpoint registers to sane values
71+
* before debug exceptions are possibly reenabled
72+
* through local_dbg_restore.
73+
*/
74+
if (hw_breakpoint_restore)
75+
hw_breakpoint_restore(NULL);
76+
}
77+
5478
/*
5579
* cpu_suspend
5680
*
@@ -60,8 +84,9 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
6084
*/
6185
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
6286
{
63-
int ret;
87+
int ret = 0;
6488
unsigned long flags;
89+
struct sleep_stack_data state;
6590

6691
/*
6792
* From this point debug exceptions are disabled to prevent
@@ -77,34 +102,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
77102
*/
78103
pause_graph_tracing();
79104

80-
/*
81-
* mm context saved on the stack, it will be restored when
82-
* the cpu comes out of reset through the identity mapped
83-
* page tables, so that the thread address space is properly
84-
* set-up on function return.
85-
*/
86-
ret = __cpu_suspend_enter(arg, fn);
87-
if (ret == 0) {
88-
/*
89-
* We are resuming from reset with the idmap active in TTBR0_EL1.
90-
* We must uninstall the idmap and restore the expected MMU
91-
* state before we can possibly return to userspace.
92-
*/
93-
cpu_uninstall_idmap();
94-
95-
/*
96-
* Restore per-cpu offset before any kernel
97-
* subsystem relying on it has a chance to run.
98-
*/
99-
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
105+
if (__cpu_suspend_enter(&state)) {
106+
/* Call the suspend finisher */
107+
ret = fn(arg);
100108

101109
/*
102-
* Restore HW breakpoint registers to sane values
103-
* before debug exceptions are possibly reenabled
104-
* through local_dbg_restore.
110+
* Never gets here, unless the suspend finisher fails.
111+
* Successful cpu_suspend() should return from cpu_resume(),
112+
* returning through this code path is considered an error
113+
* If the return value is set to 0 force ret = -EOPNOTSUPP
114+
* to make sure a proper error condition is propagated
105115
*/
106-
if (hw_breakpoint_restore)
107-
hw_breakpoint_restore(NULL);
116+
if (!ret)
117+
ret = -EOPNOTSUPP;
118+
} else {
119+
__cpu_suspend_exit();
108120
}
109121

110122
unpause_graph_tracing();

0 commit comments

Comments
 (0)