clang 22.0.0git
arm_acle.h
Go to the documentation of this file.
1/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
2 *
3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 * See https://llvm.org/LICENSE.txt for license information.
5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 *
7 * The Arm C Language Extensions specifications can be found in the following
8 * link: https://github.com/ARM-software/acle/releases
9 *
10 * The ACLE section numbers are subject to change. When consulting the
11 * specifications, it is recommended to search using section titles if
12 * the section numbers look outdated.
13 *
14 *===-----------------------------------------------------------------------===
15 */
16
17#ifndef __ARM_ACLE_H
18#define __ARM_ACLE_H
19
20#ifndef __ARM_ACLE
21#error "ACLE intrinsics support not enabled."
22#endif
23
24#include <stdint.h>
25
26#if defined(__cplusplus)
27extern "C" {
28#endif
29
30/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
31/* 7.3 Memory barriers */
32void __dmb(unsigned int);
33void __dsb(unsigned int);
34void __isb(unsigned int);
35
36/* 7.4 Hints */
37void __wfi(void);
38void __wfe(void);
39void __sev(void);
40void __sevl(void);
41void __yield(void);
42
43#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
44#define __dbg(t) __builtin_arm_dbg(t)
45#endif
46
47#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
48#define _CHKFEAT_GCS 1
49static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
50__chkfeat(uint64_t __features) {
51 return __builtin_arm_chkfeat(__features) ^ __features;
52}
53#endif
54
55/* 7.5 Swap */
56static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
57__swp(uint32_t __x, volatile uint32_t *__p) {
58 uint32_t __v;
59#if (__ARM_FEATURE_LDREX & 4) || __ARM_ARCH_6M__ || __linux__
60 /*
61 * Using this clang builtin is sensible in most situations. Where
62 * LDREX and STREX are available, it will compile to a loop using
63 * them. Otherwise it will compile to a libcall, requiring the
64 * runtime to provide that library function.
65 *
66 * That's unavoidable on Armv6-M, which has no atomic instructions
67 * at all (not even SWP), so in that situation the user will just
68 * have to provide an implementation of __atomic_exchange_4 (perhaps
69 * it would temporarily disable interrupts, and then do a separate
70 * load and store).
71 *
72 * We also use the libcall strategy on pre-Armv7 Linux targets, on
73 * the theory that Linux's runtime support library _will_ provide a
74 * suitable libcall, and it's better to use that than the SWP
75 * instruction because then when the same binary is run on a later
76 * Linux system the libcall implementation will use LDREX instead.
77 */
78 __v = __atomic_exchange_n(__p, __x, __ATOMIC_RELAXED);
79#else
80 /*
81 * But for older Arm architectures when the target is not Linux, we
82 * fall back to using the SWP instruction via inline assembler. ACLE
83 * is clear that we're allowed to do this, but shouldn't do it if we
84 * have a better alternative.
85 */
86 __asm__("swp %0, %1, [%2]" : "=r"(__v) : "r"(__x), "r"(__p) : "memory");
87#endif
88 return __v;
89}
90
91/* 7.6 Memory prefetch intrinsics */
92/* 7.6.1 Data prefetch */
93#define __pld(addr) __pldx(0, 0, 0, addr)
94
95#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
96#define __pldx(access_kind, cache_level, retention_policy, addr) \
97 __builtin_arm_prefetch(addr, access_kind, 1)
98#else
99#define __pldx(access_kind, cache_level, retention_policy, addr) \
100 __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
101#endif
102
103/* 7.6.2 Instruction prefetch */
104#define __pli(addr) __plix(0, 0, addr)
105
106#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
107#define __plix(cache_level, retention_policy, addr) \
108 __builtin_arm_prefetch(addr, 0, 0)
109#else
110#define __plix(cache_level, retention_policy, addr) \
111 __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
112#endif
113
114/* 7.7 NOP */
115#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__))
116static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
117 __builtin_arm_nop();
118}
119#endif
120
121/* 8 DATA-PROCESSING INTRINSICS */
122/* 8.2 Miscellaneous data-processing intrinsics */
123/* ROR */
124static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
125__ror(uint32_t __x, uint32_t __y) {
126 __y %= 32;
127 if (__y == 0)
128 return __x;
129 return (__x >> __y) | (__x << (32 - __y));
130}
131
132static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
133__rorll(uint64_t __x, uint32_t __y) {
134 __y %= 64;
135 if (__y == 0)
136 return __x;
137 return (__x >> __y) | (__x << (64 - __y));
138}
139
140static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
141__rorl(unsigned long __x, uint32_t __y) {
142#if __SIZEOF_LONG__ == 4
143 return __ror(__x, __y);
144#else
145 return __rorll(__x, __y);
146#endif
147}
148
149
150/* CLZ */
151static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
152__clz(uint32_t __t) {
153 return __builtin_arm_clz(__t);
154}
155
156static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
157__clzl(unsigned long __t) {
158#if __SIZEOF_LONG__ == 4
159 return __builtin_arm_clz(__t);
160#else
161 return __builtin_arm_clz64(__t);
162#endif
163}
164
165static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
166__clzll(uint64_t __t) {
167 return __builtin_arm_clz64(__t);
168}
169
170/* CLS */
171static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
172__cls(uint32_t __t) {
173 return __builtin_arm_cls(__t);
174}
175
176static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
177__clsl(unsigned long __t) {
178#if __SIZEOF_LONG__ == 4
179 return __builtin_arm_cls(__t);
180#else
181 return __builtin_arm_cls64(__t);
182#endif
183}
184
185static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
186__clsll(uint64_t __t) {
187 return __builtin_arm_cls64(__t);
188}
189
190/* REV */
191static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
192__rev(uint32_t __t) {
193 return __builtin_bswap32(__t);
194}
195
196static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
197__revl(unsigned long __t) {
198#if __SIZEOF_LONG__ == 4
199 return __builtin_bswap32(__t);
200#else
201 return __builtin_bswap64(__t);
202#endif
203}
204
205static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
206__revll(uint64_t __t) {
207 return __builtin_bswap64(__t);
208}
209
210/* REV16 */
211static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
212__rev16(uint32_t __t) {
213 return __ror(__rev(__t), 16);
214}
215
216static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
217__rev16ll(uint64_t __t) {
218 return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
219}
220
221static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
222__rev16l(unsigned long __t) {
223#if __SIZEOF_LONG__ == 4
224 return __rev16(__t);
225#else
226 return __rev16ll(__t);
227#endif
228}
229
230/* REVSH */
231static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
232__revsh(int16_t __t) {
233 return (int16_t)__builtin_bswap16((uint16_t)__t);
234}
235
236/* RBIT */
237static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
238__rbit(uint32_t __t) {
239 return __builtin_arm_rbit(__t);
240}
241
242static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
243__rbitll(uint64_t __t) {
244#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
245 return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
246 __builtin_arm_rbit(__t >> 32);
247#else
248 return __builtin_arm_rbit64(__t);
249#endif
250}
251
252static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
253__rbitl(unsigned long __t) {
254#if __SIZEOF_LONG__ == 4
255 return __rbit(__t);
256#else
257 return __rbitll(__t);
258#endif
259}
260
261/* 8.3 16-bit multiplications */
262#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
263static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
264__smulbb(int32_t __a, int32_t __b) {
265 return __builtin_arm_smulbb(__a, __b);
266}
267static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
268__smulbt(int32_t __a, int32_t __b) {
269 return __builtin_arm_smulbt(__a, __b);
270}
271static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
272__smultb(int32_t __a, int32_t __b) {
273 return __builtin_arm_smultb(__a, __b);
274}
275static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
276__smultt(int32_t __a, int32_t __b) {
277 return __builtin_arm_smultt(__a, __b);
278}
279static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
280__smulwb(int32_t __a, int32_t __b) {
281 return __builtin_arm_smulwb(__a, __b);
282}
283static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
284__smulwt(int32_t __a, int32_t __b) {
285 return __builtin_arm_smulwt(__a, __b);
286}
287#endif
288
289/*
290 * 8.4 Saturating intrinsics
291 *
292 * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
293 * intrinsics are implemented and the flag is enabled.
294 */
295/* 8.4.1 Width-specified saturation intrinsics */
296#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
297#define __ssat(x, y) __builtin_arm_ssat(x, y)
298#define __usat(x, y) __builtin_arm_usat(x, y)
299#endif
300
301/* 8.4.2 Saturating addition and subtraction intrinsics */
302#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
303static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
304__qadd(int32_t __t, int32_t __v) {
305 return __builtin_arm_qadd(__t, __v);
306}
307
308static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
309__qsub(int32_t __t, int32_t __v) {
310 return __builtin_arm_qsub(__t, __v);
311}
312
313static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
314__qdbl(int32_t __t) {
315 return __builtin_arm_qadd(__t, __t);
316}
317#endif
318
319/* 8.4.3 Accumulating multiplications */
320#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
321static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
322__smlabb(int32_t __a, int32_t __b, int32_t __c) {
323 return __builtin_arm_smlabb(__a, __b, __c);
324}
325static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
326__smlabt(int32_t __a, int32_t __b, int32_t __c) {
327 return __builtin_arm_smlabt(__a, __b, __c);
328}
329static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
330__smlatb(int32_t __a, int32_t __b, int32_t __c) {
331 return __builtin_arm_smlatb(__a, __b, __c);
332}
333static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
334__smlatt(int32_t __a, int32_t __b, int32_t __c) {
335 return __builtin_arm_smlatt(__a, __b, __c);
336}
337static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
338__smlawb(int32_t __a, int32_t __b, int32_t __c) {
339 return __builtin_arm_smlawb(__a, __b, __c);
340}
341static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
342__smlawt(int32_t __a, int32_t __b, int32_t __c) {
343 return __builtin_arm_smlawt(__a, __b, __c);
344}
345#endif
346
347
348/* 8.5.4 Parallel 16-bit saturation */
349#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
350#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
351#define __usat16(x, y) __builtin_arm_usat16(x, y)
352#endif
353
354/* 8.5.5 Packing and unpacking */
355#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
356typedef int32_t int8x4_t;
357typedef int32_t int16x2_t;
358typedef uint32_t uint8x4_t;
359typedef uint32_t uint16x2_t;
360
361static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
362__sxtab16(int16x2_t __a, int8x4_t __b) {
363 return __builtin_arm_sxtab16(__a, __b);
364}
365static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
366__sxtb16(int8x4_t __a) {
367 return __builtin_arm_sxtb16(__a);
368}
369static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
370__uxtab16(int16x2_t __a, int8x4_t __b) {
371 return __builtin_arm_uxtab16(__a, __b);
372}
373static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
374__uxtb16(int8x4_t __a) {
375 return __builtin_arm_uxtb16(__a);
376}
377#endif
378
379/* 8.5.6 Parallel selection */
380#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
381static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
382__sel(uint8x4_t __a, uint8x4_t __b) {
383 return __builtin_arm_sel(__a, __b);
384}
385#endif
386
387/* 8.5.7 Parallel 8-bit addition and subtraction */
388#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
389static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
390__qadd8(int8x4_t __a, int8x4_t __b) {
391 return __builtin_arm_qadd8(__a, __b);
392}
393static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
394__qsub8(int8x4_t __a, int8x4_t __b) {
395 return __builtin_arm_qsub8(__a, __b);
396}
397static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
398__sadd8(int8x4_t __a, int8x4_t __b) {
399 return __builtin_arm_sadd8(__a, __b);
400}
401static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
402__shadd8(int8x4_t __a, int8x4_t __b) {
403 return __builtin_arm_shadd8(__a, __b);
404}
405static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
406__shsub8(int8x4_t __a, int8x4_t __b) {
407 return __builtin_arm_shsub8(__a, __b);
408}
409static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
410__ssub8(int8x4_t __a, int8x4_t __b) {
411 return __builtin_arm_ssub8(__a, __b);
412}
413static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
414__uadd8(uint8x4_t __a, uint8x4_t __b) {
415 return __builtin_arm_uadd8(__a, __b);
416}
417static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
418__uhadd8(uint8x4_t __a, uint8x4_t __b) {
419 return __builtin_arm_uhadd8(__a, __b);
420}
421static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
422__uhsub8(uint8x4_t __a, uint8x4_t __b) {
423 return __builtin_arm_uhsub8(__a, __b);
424}
425static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
426__uqadd8(uint8x4_t __a, uint8x4_t __b) {
427 return __builtin_arm_uqadd8(__a, __b);
428}
429static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
430__uqsub8(uint8x4_t __a, uint8x4_t __b) {
431 return __builtin_arm_uqsub8(__a, __b);
432}
433static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
434__usub8(uint8x4_t __a, uint8x4_t __b) {
435 return __builtin_arm_usub8(__a, __b);
436}
437#endif
438
439/* 8.5.8 Sum of 8-bit absolute differences */
440#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
441static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
442__usad8(uint8x4_t __a, uint8x4_t __b) {
443 return __builtin_arm_usad8(__a, __b);
444}
445static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
446__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
447 return __builtin_arm_usada8(__a, __b, __c);
448}
449#endif
450
451/* 8.5.9 Parallel 16-bit addition and subtraction */
452#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
453static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
454__qadd16(int16x2_t __a, int16x2_t __b) {
455 return __builtin_arm_qadd16(__a, __b);
456}
457static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
458__qasx(int16x2_t __a, int16x2_t __b) {
459 return __builtin_arm_qasx(__a, __b);
460}
461static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
462__qsax(int16x2_t __a, int16x2_t __b) {
463 return __builtin_arm_qsax(__a, __b);
464}
465static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
466__qsub16(int16x2_t __a, int16x2_t __b) {
467 return __builtin_arm_qsub16(__a, __b);
468}
469static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
470__sadd16(int16x2_t __a, int16x2_t __b) {
471 return __builtin_arm_sadd16(__a, __b);
472}
473static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
474__sasx(int16x2_t __a, int16x2_t __b) {
475 return __builtin_arm_sasx(__a, __b);
476}
477static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
478__shadd16(int16x2_t __a, int16x2_t __b) {
479 return __builtin_arm_shadd16(__a, __b);
480}
481static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
482__shasx(int16x2_t __a, int16x2_t __b) {
483 return __builtin_arm_shasx(__a, __b);
484}
485static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
486__shsax(int16x2_t __a, int16x2_t __b) {
487 return __builtin_arm_shsax(__a, __b);
488}
489static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
490__shsub16(int16x2_t __a, int16x2_t __b) {
491 return __builtin_arm_shsub16(__a, __b);
492}
493static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
494__ssax(int16x2_t __a, int16x2_t __b) {
495 return __builtin_arm_ssax(__a, __b);
496}
497static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
498__ssub16(int16x2_t __a, int16x2_t __b) {
499 return __builtin_arm_ssub16(__a, __b);
500}
501static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
502__uadd16(uint16x2_t __a, uint16x2_t __b) {
503 return __builtin_arm_uadd16(__a, __b);
504}
505static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
506__uasx(uint16x2_t __a, uint16x2_t __b) {
507 return __builtin_arm_uasx(__a, __b);
508}
509static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
510__uhadd16(uint16x2_t __a, uint16x2_t __b) {
511 return __builtin_arm_uhadd16(__a, __b);
512}
513static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
514__uhasx(uint16x2_t __a, uint16x2_t __b) {
515 return __builtin_arm_uhasx(__a, __b);
516}
517static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
518__uhsax(uint16x2_t __a, uint16x2_t __b) {
519 return __builtin_arm_uhsax(__a, __b);
520}
521static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
522__uhsub16(uint16x2_t __a, uint16x2_t __b) {
523 return __builtin_arm_uhsub16(__a, __b);
524}
525static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
526__uqadd16(uint16x2_t __a, uint16x2_t __b) {
527 return __builtin_arm_uqadd16(__a, __b);
528}
529static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
530__uqasx(uint16x2_t __a, uint16x2_t __b) {
531 return __builtin_arm_uqasx(__a, __b);
532}
533static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
534__uqsax(uint16x2_t __a, uint16x2_t __b) {
535 return __builtin_arm_uqsax(__a, __b);
536}
537static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
538__uqsub16(uint16x2_t __a, uint16x2_t __b) {
539 return __builtin_arm_uqsub16(__a, __b);
540}
541static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
542__usax(uint16x2_t __a, uint16x2_t __b) {
543 return __builtin_arm_usax(__a, __b);
544}
545static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
546__usub16(uint16x2_t __a, uint16x2_t __b) {
547 return __builtin_arm_usub16(__a, __b);
548}
549#endif
550
551/* 8.5.10 Parallel 16-bit multiplication */
552#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
553static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
554__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
555 return __builtin_arm_smlad(__a, __b, __c);
556}
557static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
558__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
559 return __builtin_arm_smladx(__a, __b, __c);
560}
561static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
562__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
563 return __builtin_arm_smlald(__a, __b, __c);
564}
565static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
566__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
567 return __builtin_arm_smlaldx(__a, __b, __c);
568}
569static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
570__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
571 return __builtin_arm_smlsd(__a, __b, __c);
572}
573static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
574__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
575 return __builtin_arm_smlsdx(__a, __b, __c);
576}
577static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
578__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
579 return __builtin_arm_smlsld(__a, __b, __c);
580}
581static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
582__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
583 return __builtin_arm_smlsldx(__a, __b, __c);
584}
585static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
586__smuad(int16x2_t __a, int16x2_t __b) {
587 return __builtin_arm_smuad(__a, __b);
588}
589static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
590__smuadx(int16x2_t __a, int16x2_t __b) {
591 return __builtin_arm_smuadx(__a, __b);
592}
593static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
594__smusd(int16x2_t __a, int16x2_t __b) {
595 return __builtin_arm_smusd(__a, __b);
596}
597static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
598__smusdx(int16x2_t __a, int16x2_t __b) {
599 return __builtin_arm_smusdx(__a, __b);
600}
601#endif
602
603/* 8.6 Floating-point data-processing intrinsics */
604#if (defined(__ARM_FEATURE_DIRECTED_ROUNDING) && \
605 (__ARM_FEATURE_DIRECTED_ROUNDING)) && \
606 (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)
607static __inline__ double __attribute__((__always_inline__, __nodebug__))
608__rintn(double __a) {
609 return __builtin_roundeven(__a);
610}
611
612static __inline__ float __attribute__((__always_inline__, __nodebug__))
613__rintnf(float __a) {
614 return __builtin_roundevenf(__a);
615}
616#endif
617
618/* 8.8 CRC32 intrinsics */
619static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
620__crc32b(uint32_t __a, uint8_t __b) {
621 return __builtin_arm_crc32b(__a, __b);
622}
623
624static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
625__crc32h(uint32_t __a, uint16_t __b) {
626 return __builtin_arm_crc32h(__a, __b);
627}
628
629static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
630__crc32w(uint32_t __a, uint32_t __b) {
631 return __builtin_arm_crc32w(__a, __b);
632}
633
634static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
635__crc32d(uint32_t __a, uint64_t __b) {
636 return __builtin_arm_crc32d(__a, __b);
637}
638
639static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
640__crc32cb(uint32_t __a, uint8_t __b) {
641 return __builtin_arm_crc32cb(__a, __b);
642}
643
644static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
645__crc32ch(uint32_t __a, uint16_t __b) {
646 return __builtin_arm_crc32ch(__a, __b);
647}
648
649static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
650__crc32cw(uint32_t __a, uint32_t __b) {
651 return __builtin_arm_crc32cw(__a, __b);
652}
653
654static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
655__crc32cd(uint32_t __a, uint64_t __b) {
656 return __builtin_arm_crc32cd(__a, __b);
657}
658
659/* 8.6 Floating-point data-processing intrinsics */
660/* Armv8.3-A Javascript conversion intrinsic */
661#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
662static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a")))
663__jcvt(double __a) {
664 return __builtin_arm_jcvt(__a);
665}
666#endif
667
668/* Armv8.5-A FP rounding intrinsics */
669#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
670static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
671__rint32zf(float __a) {
672 return __builtin_arm_rint32zf(__a);
673}
674
675static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
676__rint32z(double __a) {
677 return __builtin_arm_rint32z(__a);
678}
679
680static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
681__rint64zf(float __a) {
682 return __builtin_arm_rint64zf(__a);
683}
684
685static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
686__rint64z(double __a) {
687 return __builtin_arm_rint64z(__a);
688}
689
690static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
691__rint32xf(float __a) {
692 return __builtin_arm_rint32xf(__a);
693}
694
695static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
696__rint32x(double __a) {
697 return __builtin_arm_rint32x(__a);
698}
699
700static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
701__rint64xf(float __a) {
702 return __builtin_arm_rint64xf(__a);
703}
704
705static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
706__rint64x(double __a) {
707 return __builtin_arm_rint64x(__a);
708}
709#endif
710
711/* 8.9 Armv8.7-A load/store 64-byte intrinsics */
712#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
713typedef struct {
714 uint64_t val[8];
715} data512_t;
716
717static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
718__arm_ld64b(const void *__addr) {
719 data512_t __value;
720 __builtin_arm_ld64b(__addr, __value.val);
721 return __value;
722}
723static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64")))
724__arm_st64b(void *__addr, data512_t __value) {
725 __builtin_arm_st64b(__addr, __value.val);
726}
727static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
728__arm_st64bv(void *__addr, data512_t __value) {
729 return __builtin_arm_st64bv(__addr, __value.val);
730}
731static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
732__arm_st64bv0(void *__addr, data512_t __value) {
733 return __builtin_arm_st64bv0(__addr, __value.val);
734}
735#endif
736
737/* 11.1 Special register intrinsics */
738#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
739#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
740#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg)
741#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
742#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
743#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
744#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
745#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
746#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v)
747#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
748#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
749#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
750
751/* 10.3 MTE intrinsics */
752#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
753#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
754#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
755#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded)
756#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
757#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
758#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
759
760/* 18 memcpy family of operations intrinsics - MOPS */
761#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
762 __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
763#endif
764
765/* 11.3 Coprocessor Intrinsics */
766#if defined(__ARM_FEATURE_COPROC)
767
768#if (__ARM_FEATURE_COPROC & 0x1)
769
770#if (__ARM_ARCH < 8)
771#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \
772 __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
773#endif /* __ARM_ARCH < 8 */
774
775#define __arm_ldc(coproc, CRd, p) __builtin_arm_ldc(coproc, CRd, p)
776#define __arm_stc(coproc, CRd, p) __builtin_arm_stc(coproc, CRd, p)
777
778#define __arm_mcr(coproc, opc1, value, CRn, CRm, opc2) \
779 __builtin_arm_mcr(coproc, opc1, value, CRn, CRm, opc2)
780#define __arm_mrc(coproc, opc1, CRn, CRm, opc2) \
781 __builtin_arm_mrc(coproc, opc1, CRn, CRm, opc2)
782
783#if (__ARM_ARCH != 4) && (__ARM_ARCH < 8)
784#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
785#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
786#endif /* (__ARM_ARCH != 4) && (__ARM_ARCH != 8) */
787
788#if (__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__)
789#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \
790 __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
791#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
792#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
793#endif /* ___ARM_ARCH_8M_MAIN__ */
794
795#endif /* __ARM_FEATURE_COPROC & 0x1 */
796
797#if (__ARM_FEATURE_COPROC & 0x2)
798#define __arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) \
799 __builtin_arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2)
800#define __arm_ldc2(coproc, CRd, p) __builtin_arm_ldc2(coproc, CRd, p)
801#define __arm_stc2(coproc, CRd, p) __builtin_arm_stc2(coproc, CRd, p)
802#define __arm_ldc2l(coproc, CRd, p) __builtin_arm_ldc2l(coproc, CRd, p)
803#define __arm_stc2l(coproc, CRd, p) __builtin_arm_stc2l(coproc, CRd, p)
804#define __arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) \
805 __builtin_arm_mcr2(coproc, opc1, value, CRn, CRm, opc2)
806#define __arm_mrc2(coproc, opc1, CRn, CRm, opc2) \
807 __builtin_arm_mrc2(coproc, opc1, CRn, CRm, opc2)
808#endif
809
810#if (__ARM_FEATURE_COPROC & 0x4)
811#define __arm_mcrr(coproc, opc1, value, CRm) \
812 __builtin_arm_mcrr(coproc, opc1, value, CRm)
813#define __arm_mrrc(coproc, opc1, CRm) __builtin_arm_mrrc(coproc, opc1, CRm)
814#endif
815
816#if (__ARM_FEATURE_COPROC & 0x8)
817#define __arm_mcrr2(coproc, opc1, value, CRm) \
818 __builtin_arm_mcrr2(coproc, opc1, value, CRm)
819#define __arm_mrrc2(coproc, opc1, CRm) __builtin_arm_mrrc2(coproc, opc1, CRm)
820#endif
821
822#endif // __ARM_FEATURE_COPROC
823
824/* 17 Transactional Memory Extension (TME) Intrinsics */
825#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME
826
827#define _TMFAILURE_REASON 0x00007fffu
828#define _TMFAILURE_RTRY 0x00008000u
829#define _TMFAILURE_CNCL 0x00010000u
830#define _TMFAILURE_MEM 0x00020000u
831#define _TMFAILURE_IMP 0x00040000u
832#define _TMFAILURE_ERR 0x00080000u
833#define _TMFAILURE_SIZE 0x00100000u
834#define _TMFAILURE_NEST 0x00200000u
835#define _TMFAILURE_DBG 0x00400000u
836#define _TMFAILURE_INT 0x00800000u
837#define _TMFAILURE_TRIVIAL 0x01000000u
838
839#define __tstart() __builtin_arm_tstart()
840#define __tcommit() __builtin_arm_tcommit()
841#define __tcancel(__arg) __builtin_arm_tcancel(__arg)
842#define __ttest() __builtin_arm_ttest()
843
844#endif /* __ARM_FEATURE_TME */
845
846/* 8.7 Armv8.5-A Random number generation intrinsics */
847#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
848static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
849__rndr(uint64_t *__p) {
850 return __builtin_arm_rndr(__p);
851}
852static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
853__rndrrs(uint64_t *__p) {
854 return __builtin_arm_rndrrs(__p);
855}
856#endif
857
858/* 11.2 Guarded Control Stack intrinsics */
859#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
860static __inline__ void * __attribute__((__always_inline__, __nodebug__))
861__gcspr() {
862 return (void *)__builtin_arm_rsr64("gcspr_el0");
863}
864
865static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs")))
866__gcspopm() {
867 return __builtin_arm_gcspopm(0);
868}
869
870static __inline__ void *__attribute__((__always_inline__, __nodebug__,
871 target("gcs")))
872__gcsss(void *__stack) {
873 return __builtin_arm_gcsss(__stack);
874}
875#endif
876
877#if defined(__cplusplus)
878}
879#endif
880
881#endif /* __ARM_ACLE_H */
__DEVICE__ int __clzll(long long __a)
__DEVICE__ int __clz(int __a)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ vector float vector float vector float __c
Definition: altivec.h:4800
static __inline__ uint32_t volatile uint32_t * __p
Definition: arm_acle.h:57
void __yield(void)
void __wfi(void)
static __inline__ uint32_t uint8_t __b
Definition: arm_acle.h:620
void __sev(void)
void __dmb(unsigned int)
void __sevl(void)
return __v
Definition: arm_acle.h:88
void __isb(unsigned int)
__asm__("swp %0, %1, [%2]" :"=r"(__v) :"r"(__x), "r"(__p) :"memory")
static __inline__ uint32_t uint32_t __y
Definition: arm_acle.h:125
void __dsb(unsigned int)
void __wfe(void)
static __inline__ void int __a
Definition: emmintrin.h:4084
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 __crc32d(unsigned int __C, unsigned int __D)
Adds the unsigned integer operand to the CRC-32C checksum of the second unsigned integer operand.
Definition: ia32intrin.h:446
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 __crc32b(unsigned int __C, unsigned char __D)
Adds the unsigned integer operand to the CRC-32C checksum of the unsigned char operand.
Definition: ia32intrin.h:406
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 __crc32w(unsigned int __C, unsigned short __D)
Adds the unsigned integer operand to the CRC-32C checksum of the unsigned short operand.
Definition: ia32intrin.h:426
static __inline__ void unsigned int __value
Definition: movdirintrin.h:20
unsigned long uint64_t
long int64_t
int int32_t
unsigned int uint32_t