@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
50
50
* have the AQ or RL bits set. These don't return anything, so there's only
51
51
* one version to worry about.
52
52
*/
53
- #define ATOMIC_OP (op , asm_op , c_op , I , asm_type , c_type , prefix ) \
54
- static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
55
- { \
56
- __asm__ __volatile__ ( \
57
- "amo" #asm_op "." #asm_type " zero, %1, %0" \
58
- : "+A" (v->counter) \
59
- : "r" (I) \
60
- : "memory"); \
53
+ #define ATOMIC_OP (op , asm_op , I , asm_type , c_type , prefix ) \
54
+ static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
55
+ { \
56
+ __asm__ __volatile__ ( \
57
+ "amo" #asm_op "." #asm_type " zero, %1, %0" \
58
+ : "+A" (v->counter) \
59
+ : "r" (I) \
60
+ : "memory"); \
61
61
}
62
62
63
63
#ifdef CONFIG_GENERIC_ATOMIC64
64
- #define ATOMIC_OPS (op , asm_op , c_op , I ) \
65
- ATOMIC_OP (op, asm_op, c_op, I, w, int, )
64
+ #define ATOMIC_OPS (op , asm_op , I ) \
65
+ ATOMIC_OP (op, asm_op, I, w, int, )
66
66
#else
67
- #define ATOMIC_OPS (op , asm_op , c_op , I ) \
68
- ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \
69
- ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
67
+ #define ATOMIC_OPS (op , asm_op , I ) \
68
+ ATOMIC_OP (op, asm_op, I, w, int, ) \
69
+ ATOMIC_OP (op, asm_op, I, d, long, 64)
70
70
#endif
71
71
72
- ATOMIC_OPS (add , add , + , i )
73
- ATOMIC_OPS (sub , add , + , - i )
74
- ATOMIC_OPS (and , and , & , i )
75
- ATOMIC_OPS ( or , or , |, i )
76
- ATOMIC_OPS (xor , xor , ^, i )
72
+ ATOMIC_OPS (add , add , i )
73
+ ATOMIC_OPS (sub , add , - i )
74
+ ATOMIC_OPS (and , and , i )
75
+ ATOMIC_OPS ( or , or , i )
76
+ ATOMIC_OPS (xor , xor , i )
77
77
78
78
#undef ATOMIC_OP
79
79
#undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^, i)
83
83
* There's two flavors of these: the arithmatic ops have both fetch and return
84
84
* versions, while the logical ops only have fetch versions.
85
85
*/
86
- #define ATOMIC_FETCH_OP (op , asm_op , c_op , I , asm_or , c_or , asm_type , c_type , prefix ) \
86
+ #define ATOMIC_FETCH_OP (op , asm_op , I , asm_or , c_or , asm_type , c_type , prefix ) \
87
87
static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
88
88
{ \
89
89
register c_type ret; \
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
103
103
104
104
#ifdef CONFIG_GENERIC_ATOMIC64
105
105
#define ATOMIC_OPS (op , asm_op , c_op , I , asm_or , c_or ) \
106
- ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
106
+ ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
107
107
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
108
108
#else
109
109
#define ATOMIC_OPS (op , asm_op , c_op , I , asm_or , c_or ) \
110
- ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
110
+ ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
111
111
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
112
- ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \
112
+ ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \
113
113
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
114
114
#endif
115
115
@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl, )
126
126
#undef ATOMIC_OPS
127
127
128
128
#ifdef CONFIG_GENERIC_ATOMIC64
129
- #define ATOMIC_OPS (op , asm_op , c_op , I , asm_or , c_or ) \
130
- ATOMIC_FETCH_OP (op , asm_op , c_op , I , asm_or , c_or , w , int , )
129
+ #define ATOMIC_OPS (op , asm_op , I , asm_or , c_or ) \
130
+ ATOMIC_FETCH_OP (op , asm_op , I , asm_or , c_or , w , int , )
131
131
#else
132
- #define ATOMIC_OPS (op , asm_op , c_op , I , asm_or , c_or ) \
133
- ATOMIC_FETCH_OP (op , asm_op , c_op , I , asm_or , c_or , w , int , ) \
134
- ATOMIC_FETCH_OP (op , asm_op , c_op , I , asm_or , c_or , d , long , 64 )
132
+ #define ATOMIC_OPS (op , asm_op , I , asm_or , c_or ) \
133
+ ATOMIC_FETCH_OP (op , asm_op , I , asm_or , c_or , w , int , ) \
134
+ ATOMIC_FETCH_OP (op , asm_op , I , asm_or , c_or , d , long , 64 )
135
135
#endif
136
136
137
- ATOMIC_OPS (and , and , & , i , , _relaxed )
138
- ATOMIC_OPS (and , and , & , i , .aq , _acquire )
139
- ATOMIC_OPS (and , and , & , i , .rl , _release )
140
- ATOMIC_OPS (and , and , & , i , .aqrl , )
137
+ ATOMIC_OPS (and , and , i , , _relaxed )
138
+ ATOMIC_OPS (and , and , i , .aq , _acquire )
139
+ ATOMIC_OPS (and , and , i , .rl , _release )
140
+ ATOMIC_OPS (and , and , i , .aqrl , )
141
141
142
- ATOMIC_OPS ( or , or , |, i , , _relaxed )
143
- ATOMIC_OPS ( or , or , |, i , .aq , _acquire )
144
- ATOMIC_OPS ( or , or , |, i , .rl , _release )
145
- ATOMIC_OPS ( or , or , |, i , .aqrl , )
142
+ ATOMIC_OPS ( or , or , i , , _relaxed )
143
+ ATOMIC_OPS ( or , or , i , .aq , _acquire )
144
+ ATOMIC_OPS ( or , or , i , .rl , _release )
145
+ ATOMIC_OPS ( or , or , i , .aqrl , )
146
146
147
- ATOMIC_OPS (xor , xor , ^, i , , _relaxed )
148
- ATOMIC_OPS (xor , xor , ^, i , .aq , _acquire )
149
- ATOMIC_OPS (xor , xor , ^, i , .rl , _release )
150
- ATOMIC_OPS (xor , xor , ^, i , .aqrl , )
147
+ ATOMIC_OPS (xor , xor , i , , _relaxed )
148
+ ATOMIC_OPS (xor , xor , i , .aq , _acquire )
149
+ ATOMIC_OPS (xor , xor , i , .rl , _release )
150
+ ATOMIC_OPS (xor , xor , i , .aqrl , )
151
151
152
152
#undef ATOMIC_OPS
153
153
@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add, <, 0)
182
182
#undef ATOMIC_OP
183
183
#undef ATOMIC_OPS
184
184
185
- #define ATOMIC_OP (op , func_op , c_op , I , c_type , prefix ) \
185
+ #define ATOMIC_OP (op , func_op , I , c_type , prefix ) \
186
186
static __always_inline void atomic ##prefix ##_ ##op (atomic ##prefix ##_t * v ) \
187
187
{ \
188
188
atomic ##prefix ##_ ##func_op (I , v ); \
189
189
}
190
190
191
- #define ATOMIC_FETCH_OP (op , func_op , c_op , I , c_type , prefix ) \
191
+ #define ATOMIC_FETCH_OP (op , func_op , I , c_type , prefix ) \
192
192
static __always_inline c_type atomic ##prefix ##_fetch_##op(atomic##prefix##_t *v) \
193
193
{ \
194
194
return atomic##prefix##_fetch_##func_op(I, v); \
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
202
202
203
203
#ifdef CONFIG_GENERIC_ATOMIC64
204
204
#define ATOMIC_OPS (op , asm_op , c_op , I ) \
205
- ATOMIC_OP (op, asm_op, c_op, I, int, ) \
206
- ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
205
+ ATOMIC_OP (op, asm_op, I, int, ) \
206
+ ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
207
207
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
208
208
#else
209
209
#define ATOMIC_OPS (op , asm_op , c_op , I ) \
210
- ATOMIC_OP (op, asm_op, c_op, I, int, ) \
211
- ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
210
+ ATOMIC_OP (op, asm_op, I, int, ) \
211
+ ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
212
212
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
213
- ATOMIC_OP (op, asm_op, c_op, I, long, 64) \
214
- ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \
213
+ ATOMIC_OP (op, asm_op, I, long, 64) \
214
+ ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \
215
215
ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
216
216
#endif
217
217
@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
300
300
301
301
/*
302
302
* atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
303
- * {cmp,}xchg and the operations that return, so they need a barrier. We just
304
- * use the other implementations directly.
303
+ * {cmp,}xchg and the operations that return, so they need a barrier.
304
+ */
305
+ /*
306
+ * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
307
+ * assigning the same barrier to both the LR and SC operations, but that might
308
+ * not make any sense. We're waiting on a memory model specification to
309
+ * determine exactly what the right thing to do is here.
305
310
*/
306
311
#define ATOMIC_OP (c_t , prefix , c_or , size , asm_or ) \
307
312
static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
0 commit comments