Skip to content

Commit fbffe89

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
locking/atomic, arch/arc: Implement atomic_fetch_{add,sub,and,andnot,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Vineet Gupta <vgupta@synopsys.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Noam Camus <noamc@ezchip.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-snps-arc@lists.infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 1f51dee commit fbffe89

File tree

1 file changed

+94
-9
lines changed

1 file changed

+94
-9
lines changed

arch/arc/include/asm/atomic.h

Lines changed: 94 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,37 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
6767
return val; \
6868
}
6969

70+
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
71+
static inline int atomic_fetch_##op(int i, atomic_t *v) \
72+
{ \
73+
unsigned int val, orig; \
74+
SCOND_FAIL_RETRY_VAR_DEF \
75+
\
76+
/* \
77+
* Explicit full memory barrier needed before/after as \
78+
* LLOCK/SCOND thmeselves don't provide any such semantics \
79+
*/ \
80+
smp_mb(); \
81+
\
82+
__asm__ __volatile__( \
83+
"1: llock %[orig], [%[ctr]] \n" \
84+
" " #asm_op " %[val], %[orig], %[i] \n" \
85+
" scond %[val], [%[ctr]] \n" \
86+
" \n" \
87+
SCOND_FAIL_RETRY_ASM \
88+
\
89+
: [val] "=&r" (val), \
90+
[orig] "=&r" (orig) \
91+
SCOND_FAIL_RETRY_VARS \
92+
: [ctr] "r" (&v->counter), \
93+
[i] "ir" (i) \
94+
: "cc"); \
95+
\
96+
smp_mb(); \
97+
\
98+
return orig; \
99+
}
100+
70101
#else /* !CONFIG_ARC_HAS_LLSC */
71102

72103
#ifndef CONFIG_SMP
@@ -129,21 +160,46 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
129160
return temp; \
130161
}
131162

163+
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
164+
static inline int atomic_fetch_##op(int i, atomic_t *v) \
165+
{ \
166+
unsigned long flags; \
167+
unsigned long orig; \
168+
\
169+
/* \
170+
* spin lock/unlock provides the needed smp_mb() before/after \
171+
*/ \
172+
atomic_ops_lock(flags); \
173+
orig = v->counter; \
174+
v->counter c_op i; \
175+
atomic_ops_unlock(flags); \
176+
\
177+
return orig; \
178+
}
179+
132180
#endif /* !CONFIG_ARC_HAS_LLSC */
133181

134182
#define ATOMIC_OPS(op, c_op, asm_op) \
135183
ATOMIC_OP(op, c_op, asm_op) \
136-
ATOMIC_OP_RETURN(op, c_op, asm_op)
184+
ATOMIC_OP_RETURN(op, c_op, asm_op) \
185+
ATOMIC_FETCH_OP(op, c_op, asm_op)
137186

138187
ATOMIC_OPS(add, +=, add)
139188
ATOMIC_OPS(sub, -=, sub)
140189

141190
#define atomic_andnot atomic_andnot
142191

143-
ATOMIC_OP(and, &=, and)
144-
ATOMIC_OP(andnot, &= ~, bic)
145-
ATOMIC_OP(or, |=, or)
146-
ATOMIC_OP(xor, ^=, xor)
192+
#define atomic_fetch_or atomic_fetch_or
193+
194+
#undef ATOMIC_OPS
195+
#define ATOMIC_OPS(op, c_op, asm_op) \
196+
ATOMIC_OP(op, c_op, asm_op) \
197+
ATOMIC_FETCH_OP(op, c_op, asm_op)
198+
199+
ATOMIC_OPS(and, &=, and)
200+
ATOMIC_OPS(andnot, &= ~, bic)
201+
ATOMIC_OPS(or, |=, or)
202+
ATOMIC_OPS(xor, ^=, xor)
147203

148204
#undef SCOND_FAIL_RETRY_VAR_DEF
149205
#undef SCOND_FAIL_RETRY_ASM
@@ -208,22 +264,51 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
208264
return temp; \
209265
}
210266

267+
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
268+
static inline int atomic_fetch_##op(int i, atomic_t *v) \
269+
{ \
270+
unsigned int temp = i; \
271+
\
272+
/* Explicit full memory barrier needed before/after */ \
273+
smp_mb(); \
274+
\
275+
__asm__ __volatile__( \
276+
" mov r2, %0\n" \
277+
" mov r3, %1\n" \
278+
" .word %2\n" \
279+
" mov %0, r2" \
280+
: "+r"(temp) \
281+
: "r"(&v->counter), "i"(asm_op) \
282+
: "r2", "r3", "memory"); \
283+
\
284+
smp_mb(); \
285+
\
286+
return temp; \
287+
}
288+
211289
#define ATOMIC_OPS(op, c_op, asm_op) \
212290
ATOMIC_OP(op, c_op, asm_op) \
213-
ATOMIC_OP_RETURN(op, c_op, asm_op)
291+
ATOMIC_OP_RETURN(op, c_op, asm_op) \
292+
ATOMIC_FETCH_OP(op, c_op, asm_op)
214293

215294
ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
216295
#define atomic_sub(i, v) atomic_add(-(i), (v))
217296
#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
218297

219-
ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
298+
#undef ATOMIC_OPS
299+
#define ATOMIC_OPS(op, c_op, asm_op) \
300+
ATOMIC_OP(op, c_op, asm_op) \
301+
ATOMIC_FETCH_OP(op, c_op, asm_op)
302+
303+
ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
220304
#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
221-
ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
222-
ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
305+
ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
306+
ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
223307

224308
#endif /* CONFIG_ARC_PLAT_EZNPS */
225309

226310
#undef ATOMIC_OPS
311+
#undef ATOMIC_FETCH_OP
227312
#undef ATOMIC_OP_RETURN
228313
#undef ATOMIC_OP
229314

0 commit comments

Comments
 (0)