@@ -150,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
150
150
151
151
#define arm_smccc_hvc_quirk (...) __arm_smccc_hvc(__VA_ARGS__)
152
152
153
+ /* SMCCC v1.1 implementation madness follows */
154
+ #ifdef CONFIG_ARM64
155
+
156
+ #define SMCCC_SMC_INST "smc #0"
157
+ #define SMCCC_HVC_INST "hvc #0"
158
+
159
+ #elif defined(CONFIG_ARM )
160
+ #include <asm/opcodes-sec.h>
161
+ #include <asm/opcodes-virt.h>
162
+
163
+ #define SMCCC_SMC_INST __SMC(0)
164
+ #define SMCCC_HVC_INST __HVC(0)
165
+
166
+ #endif
167
+
168
+ #define ___count_args (_0 , _1 , _2 , _3 , _4 , _5 , _6 , _7 , _8 , x , ...) x
169
+
170
+ #define __count_args (...) \
171
+ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
172
+
173
+ #define __constraint_write_0 \
174
+ "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
175
+ #define __constraint_write_1 \
176
+ "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
177
+ #define __constraint_write_2 \
178
+ "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
179
+ #define __constraint_write_3 \
180
+ "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
181
+ #define __constraint_write_4 __constraint_write_3
182
+ #define __constraint_write_5 __constraint_write_4
183
+ #define __constraint_write_6 __constraint_write_5
184
+ #define __constraint_write_7 __constraint_write_6
185
+
186
+ #define __constraint_read_0
187
+ #define __constraint_read_1
188
+ #define __constraint_read_2
189
+ #define __constraint_read_3
190
+ #define __constraint_read_4 "r" (r4)
191
+ #define __constraint_read_5 __constraint_read_4, "r" (r5)
192
+ #define __constraint_read_6 __constraint_read_5, "r" (r6)
193
+ #define __constraint_read_7 __constraint_read_6, "r" (r7)
194
+
195
+ #define __declare_arg_0 (a0 , res ) \
196
+ struct arm_smccc_res *___res = res; \
197
+ register u32 r0 asm("r0") = a0; \
198
+ register unsigned long r1 asm("r1"); \
199
+ register unsigned long r2 asm("r2"); \
200
+ register unsigned long r3 asm("r3")
201
+
202
+ #define __declare_arg_1 (a0 , a1 , res ) \
203
+ struct arm_smccc_res *___res = res; \
204
+ register u32 r0 asm("r0") = a0; \
205
+ register typeof(a1) r1 asm("r1") = a1; \
206
+ register unsigned long r2 asm("r2"); \
207
+ register unsigned long r3 asm("r3")
208
+
209
+ #define __declare_arg_2 (a0 , a1 , a2 , res ) \
210
+ struct arm_smccc_res *___res = res; \
211
+ register u32 r0 asm("r0") = a0; \
212
+ register typeof(a1) r1 asm("r1") = a1; \
213
+ register typeof(a2) r2 asm("r2") = a2; \
214
+ register unsigned long r3 asm("r3")
215
+
216
+ #define __declare_arg_3 (a0 , a1 , a2 , a3 , res ) \
217
+ struct arm_smccc_res *___res = res; \
218
+ register u32 r0 asm("r0") = a0; \
219
+ register typeof(a1) r1 asm("r1") = a1; \
220
+ register typeof(a2) r2 asm("r2") = a2; \
221
+ register typeof(a3) r3 asm("r3") = a3
222
+
223
+ #define __declare_arg_4 (a0 , a1 , a2 , a3 , a4 , res ) \
224
+ __declare_arg_3(a0, a1, a2, a3, res); \
225
+ register typeof(a4) r4 asm("r4") = a4
226
+
227
+ #define __declare_arg_5 (a0 , a1 , a2 , a3 , a4 , a5 , res ) \
228
+ __declare_arg_4(a0, a1, a2, a3, a4, res); \
229
+ register typeof(a5) r5 asm("r5") = a5
230
+
231
+ #define __declare_arg_6 (a0 , a1 , a2 , a3 , a4 , a5 , a6 , res ) \
232
+ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
233
+ register typeof(a6) r6 asm("r6") = a6
234
+
235
+ #define __declare_arg_7 (a0 , a1 , a2 , a3 , a4 , a5 , a6 , a7 , res ) \
236
+ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
237
+ register typeof(a7) r7 asm("r7") = a7
238
+
239
+ #define ___declare_args (count , ...) __declare_arg_ ## count(__VA_ARGS__)
240
+ #define __declare_args (count , ...) ___declare_args(count, __VA_ARGS__)
241
+
242
+ #define ___constraints (count ) \
243
+ : __constraint_write_ ## count \
244
+ : __constraint_read_ ## count \
245
+ : "memory"
246
+ #define __constraints (count ) ___constraints(count)
247
+
248
+ /*
249
+ * We have an output list that is not necessarily used, and GCC feels
250
+ * entitled to optimise the whole sequence away. "volatile" is what
251
+ * makes it stick.
252
+ */
253
+ #define __arm_smccc_1_1 (inst , ...) \
254
+ do { \
255
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
256
+ asm volatile(inst "\n" \
257
+ __constraints(__count_args(__VA_ARGS__))); \
258
+ if (___res) \
259
+ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
260
+ } while (0)
261
+
262
+ /*
263
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
264
+ *
265
+ * This is a variadic macro taking one to eight source arguments, and
266
+ * an optional return structure.
267
+ *
268
+ * @a0-a7: arguments passed in registers 0 to 7
269
+ * @res: result values from registers 0 to 3
270
+ *
271
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
272
+ * The content of the supplied param are copied to registers 0 to 7 prior
273
+ * to the SMC instruction. The return values are updated with the content
274
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
275
+ */
276
+ #define arm_smccc_1_1_smc (...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
277
+
278
+ /*
279
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
280
+ *
281
+ * This is a variadic macro taking one to eight source arguments, and
282
+ * an optional return structure.
283
+ *
284
+ * @a0-a7: arguments passed in registers 0 to 7
285
+ * @res: result values from registers 0 to 3
286
+ *
287
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
288
+ * The content of the supplied param are copied to registers 0 to 7 prior
289
+ * to the HVC instruction. The return values are updated with the content
290
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
291
+ */
292
+ #define arm_smccc_1_1_hvc (...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
293
+
153
294
#endif /*__ASSEMBLY__*/
154
295
#endif /*__LINUX_ARM_SMCCC_H*/
0 commit comments