49
49
orr \dst , \dst , \mask // dst|=(aff3>>rs3)
50
50
.endm
51
51
/ *
52
- * Save CPU state for a suspend and execute the suspend finisher.
53
- * On success it will return 0 through cpu_resume - ie through a CPU
54
- * soft/hard reboot from the reset vector.
55
- * On failure it returns the suspend finisher return value or force
56
- * - EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
57
- * is not allowed to return , if it does this must be considered failure).
58
- * It saves callee registers , and allocates space on the kernel stack
59
- * to save the CPU specific registers + some other data for resume.
52
+ * Save CPU state in the provided sleep_stack_data area , and publish its
53
+ * location for cpu_resume()'s use in sleep_save_stash.
60
54
*
61
- * x0 = suspend finisher argument
62
- * x1 = suspend finisher function pointer
55
+ * cpu_resume() will restore this saved state , and return. Because the
56
+ * link - register is saved and restored , it will appear to return from this
57
+ * function. So th at the caller can tell the suspend/resume paths apart ,
58
+ * __cpu_suspend_enter() will always return a non - zero value , whereas the
59
+ * path through cpu_resume() will return 0 .
60
+ *
61
+ * x0 = struct sleep_stack_data area
63
62
* /
64
63
ENTRY(__cpu_suspend_enter)
65
- stp x29 , lr , [ sp , # - 96 ] !
66
- stp x19 , x20 , [ sp , # 16 ]
67
- stp x21 , x22 , [ sp , # 32 ]
68
- stp x23 , x24 , [ sp , # 48 ]
69
- stp x25 , x26 , [ sp , # 64 ]
70
- stp x27 , x28 , [ sp , # 80 ]
71
- / *
72
- * Stash suspend finisher and its argument in x20 and x19
73
- * /
74
- mov x19 , x0
75
- mov x20 , x1
64
+ stp x29 , lr , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS ]
65
+ stp x19 , x20 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 16 ]
66
+ stp x21 , x22 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 32 ]
67
+ stp x23 , x24 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 48 ]
68
+ stp x25 , x26 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 64 ]
69
+ stp x27 , x28 , [ x0 , #SLEEP_STACK_DATA_CALLEE_REGS + 80 ]
70
+
71
+ / * save the sp in cpu_suspend_ctx * /
76
72
mov x2 , sp
77
- sub sp , sp , #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
78
- mov x0 , sp
79
- / *
80
- * x0 now points to struct cpu_suspend_ctx allocated on the stack
81
- * /
82
- str x2 , [ x0 , #CPU_CTX_SP ]
73
+ str x2 , [ x0 , #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP ]
74
+
75
+ / * find the mpidr_hash * /
83
76
ldr x1 , =sleep_save_sp
84
77
ldr x1 , [ x1 , #SLEEP_SAVE_SP_VIRT ]
85
78
mrs x7 , mpidr_el1
@@ -93,34 +86,11 @@ ENTRY(__cpu_suspend_enter)
93
86
ldp w5 , w6 , [ x9 , #(MPIDR_HASH_SHIFTS + 8 ) ]
94
87
compute_mpidr_hash x8 , x3 , x4 , x5 , x6 , x7 , x10
95
88
add x1 , x1 , x8 , lsl # 3
89
+
90
+ stp x29 , lr , [ sp , # - 16 ] !
96
91
bl __cpu_suspend_save
97
- / *
98
- * Grab suspend finisher in x20 and its argument in x19
99
- * /
100
- mov x0 , x19
101
- mov x1 , x20
102
- / *
103
- * We are ready for power down , fire off the suspend finisher
104
- * in x1 , with argument in x0
105
- * /
106
- blr x1
107
- / *
108
- * Never gets here , unless suspend finisher fails.
109
- * Successful cpu_suspend should return from cpu_resume , returning
110
- * through this code path is considered an error
111
- * If the return value is set to 0 force x0 = - EOPNOTSUPP
112
- * to make sure a proper error condition is propagated
113
- * /
114
- cmp x0 , # 0
115
- mov x3 , # - EOPNOTSUPP
116
- csel x0 , x3 , x0 , eq
117
- add sp , sp , #CPU_SUSPEND_SZ // rewind stack pointer
118
- ldp x19 , x20 , [ sp , # 16 ]
119
- ldp x21 , x22 , [ sp , # 32 ]
120
- ldp x23 , x24 , [ sp , # 48 ]
121
- ldp x25 , x26 , [ sp , # 64 ]
122
- ldp x27 , x28 , [ sp , # 80 ]
123
- ldp x29 , lr , [ sp ], # 96
92
+ ldp x29 , lr , [ sp ], # 16
93
+ mov x0 , # 1
124
94
ret
125
95
ENDPROC(__cpu_suspend_enter)
126
96
.ltorg
@@ -150,12 +120,6 @@ cpu_resume_after_mmu:
150
120
bl kasan_unpoison_remaining_stack
151
121
#endif
152
122
mov x0 , # 0 // return zero on success
153
- ldp x19 , x20 , [ sp , # 16 ]
154
- ldp x21 , x22 , [ sp , # 32 ]
155
- ldp x23 , x24 , [ sp , # 48 ]
156
- ldp x25 , x26 , [ sp , # 64 ]
157
- ldp x27 , x28 , [ sp , # 80 ]
158
- ldp x29 , lr , [ sp ], # 96
159
123
ret
160
124
ENDPROC(cpu_resume_after_mmu)
161
125
@@ -172,6 +136,8 @@ ENTRY(cpu_resume)
172
136
/ * x7 contains hash index , let's use it to grab context pointer * /
173
137
ldr_l x0 , sleep_save_sp + SLEEP_SAVE_SP_PHYS
174
138
ldr x0 , [ x0 , x7 , lsl # 3 ]
139
+ add x29 , x0 , #SLEEP_STACK_DATA_CALLEE_REGS
140
+ add x0 , x0 , #SLEEP_STACK_DATA_SYSTEM_REGS
175
141
/ * load sp from context * /
176
142
ldr x2 , [ x0 , #CPU_CTX_SP ]
177
143
/ * load physical address of identity map page table in x1 * /
@@ -185,5 +151,12 @@ ENTRY(cpu_resume)
185
151
* pointer and x1 to contain physical address of 1 : 1 page tables
186
152
* /
187
153
bl cpu_do_resume // PC relative jump , MMU off
154
+ / * Can't access these by physical address once the MMU is on * /
155
+ ldp x19 , x20 , [ x29 , # 16 ]
156
+ ldp x21 , x22 , [ x29 , # 32 ]
157
+ ldp x23 , x24 , [ x29 , # 48 ]
158
+ ldp x25 , x26 , [ x29 , # 64 ]
159
+ ldp x27 , x28 , [ x29 , # 80 ]
160
+ ldp x29 , lr , [ x29 ]
188
161
b cpu_resume_mmu // Resume MMU , never returns
189
162
ENDPROC(cpu_resume)
0 commit comments