Skip to content

Commit ed6aefe

Browse files
committed
Revert "ARCv2: spinlock/rwlock/atomics: Delayed retry of failed SCOND with exponential backoff"
This reverts commit e78fdfe. The issue was fixed in hardware in HS2.1C release and there are no known external users of affected RTL so revert the whole delayed retry series ! Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
1 parent 819f360 commit ed6aefe

File tree

3 files changed

+4
-340
lines changed

3 files changed

+4
-340
lines changed

arch/arc/Kconfig

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -389,11 +389,6 @@ config ARC_HAS_LLSC
389389
default y
390390
depends on !ARC_CANT_LLSC
391391

392-
config ARC_STAR_9000923308
393-
bool "Workaround for llock/scond livelock"
394-
default n
395-
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
396-
397392
config ARC_HAS_SWAPE
398393
bool "Insn: SWAPE (endian-swap)"
399394
default y

arch/arc/include/asm/atomic.h

Lines changed: 4 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -25,51 +25,17 @@
2525

2626
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
2727

28-
#ifdef CONFIG_ARC_STAR_9000923308
29-
30-
#define SCOND_FAIL_RETRY_VAR_DEF \
31-
unsigned int delay = 1, tmp; \
32-
33-
#define SCOND_FAIL_RETRY_ASM \
34-
" bz 4f \n" \
35-
" ; --- scond fail delay --- \n" \
36-
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
37-
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
38-
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
39-
" asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \
40-
" mov.z %[delay], 1 \n" /* handle overflow */ \
41-
" b 1b \n" /* start over */ \
42-
"4: ; --- success --- \n" \
43-
44-
#define SCOND_FAIL_RETRY_VARS \
45-
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
46-
47-
#else /* !CONFIG_ARC_STAR_9000923308 */
48-
49-
#define SCOND_FAIL_RETRY_VAR_DEF
50-
51-
#define SCOND_FAIL_RETRY_ASM \
52-
" bnz 1b \n" \
53-
54-
#define SCOND_FAIL_RETRY_VARS
55-
56-
#endif
57-
5828
#define ATOMIC_OP(op, c_op, asm_op) \
5929
static inline void atomic_##op(int i, atomic_t *v) \
6030
{ \
61-
unsigned int val; \
62-
SCOND_FAIL_RETRY_VAR_DEF \
31+
unsigned int val; \
6332
\
6433
__asm__ __volatile__( \
6534
"1: llock %[val], [%[ctr]] \n" \
6635
" " #asm_op " %[val], %[val], %[i] \n" \
6736
" scond %[val], [%[ctr]] \n" \
68-
" \n" \
69-
SCOND_FAIL_RETRY_ASM \
70-
\
37+
" bnz 1b \n" \
7138
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
72-
SCOND_FAIL_RETRY_VARS \
7339
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
7440
[i] "ir" (i) \
7541
: "cc"); \
@@ -78,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
7844
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
7945
static inline int atomic_##op##_return(int i, atomic_t *v) \
8046
{ \
81-
unsigned int val; \
82-
SCOND_FAIL_RETRY_VAR_DEF \
47+
unsigned int val; \
8348
\
8449
/* \
8550
* Explicit full memory barrier needed before/after as \
@@ -91,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
9156
"1: llock %[val], [%[ctr]] \n" \
9257
" " #asm_op " %[val], %[val], %[i] \n" \
9358
" scond %[val], [%[ctr]] \n" \
94-
" \n" \
95-
SCOND_FAIL_RETRY_ASM \
96-
\
59+
" bnz 1b \n" \
9760
: [val] "=&r" (val) \
98-
SCOND_FAIL_RETRY_VARS \
9961
: [ctr] "r" (&v->counter), \
10062
[i] "ir" (i) \
10163
: "cc"); \

arch/arc/include/asm/spinlock.h

Lines changed: 0 additions & 293 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,6 @@
2020

2121
#ifdef CONFIG_ARC_HAS_LLSC
2222

23-
/*
24-
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
25-
*/
26-
#ifndef CONFIG_ARC_STAR_9000923308
27-
2823
static inline void arch_spin_lock(arch_spinlock_t *lock)
2924
{
3025
unsigned int val;
@@ -238,294 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
238233
smp_mb();
239234
}
240235

241-
#else /* CONFIG_ARC_STAR_9000923308 */
242-
243-
/*
244-
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245-
* coherency transactions in the SCU. The exclusive line state keeps rotating
246-
* among contenting cores leading to a never ending cycle. So break the cycle
247-
* by deferring the retry of failed exclusive access (SCOND). The actual delay
248-
* needed is function of number of contending cores as well as the unrelated
249-
* coherency traffic from other cores. To keep the code simple, start off with
250-
* small delay of 1 which would suffice most cases and in case of contention
251-
* double the delay. Eventually the delay is sufficient such that the coherency
252-
* pipeline is drained, thus a subsequent exclusive access would succeed.
253-
*/
254-
255-
#define SCOND_FAIL_RETRY_VAR_DEF \
256-
unsigned int delay, tmp; \
257-
258-
#define SCOND_FAIL_RETRY_ASM \
259-
" ; --- scond fail delay --- \n" \
260-
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
261-
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262-
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263-
" asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \
264-
" mov.z %[delay], 1 \n" /* handle overflow */ \
265-
" b 1b \n" /* start over */ \
266-
" \n" \
267-
"4: ; --- done --- \n" \
268-
269-
#define SCOND_FAIL_RETRY_VARS \
270-
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
271-
272-
static inline void arch_spin_lock(arch_spinlock_t *lock)
273-
{
274-
unsigned int val;
275-
SCOND_FAIL_RETRY_VAR_DEF;
276-
277-
smp_mb();
278-
279-
__asm__ __volatile__(
280-
"0: mov %[delay], 1 \n"
281-
"1: llock %[val], [%[slock]] \n"
282-
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
283-
" scond %[LOCKED], [%[slock]] \n" /* acquire */
284-
" bz 4f \n" /* done */
285-
" \n"
286-
SCOND_FAIL_RETRY_ASM
287-
288-
: [val] "=&r" (val)
289-
SCOND_FAIL_RETRY_VARS
290-
: [slock] "r" (&(lock->slock)),
291-
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
292-
: "memory", "cc");
293-
294-
smp_mb();
295-
}
296-
297-
/* 1 - lock taken successfully */
298-
static inline int arch_spin_trylock(arch_spinlock_t *lock)
299-
{
300-
unsigned int val, got_it = 0;
301-
SCOND_FAIL_RETRY_VAR_DEF;
302-
303-
smp_mb();
304-
305-
__asm__ __volatile__(
306-
"0: mov %[delay], 1 \n"
307-
"1: llock %[val], [%[slock]] \n"
308-
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
309-
" scond %[LOCKED], [%[slock]] \n" /* acquire */
310-
" bz.d 4f \n"
311-
" mov.z %[got_it], 1 \n" /* got it */
312-
" \n"
313-
SCOND_FAIL_RETRY_ASM
314-
315-
: [val] "=&r" (val),
316-
[got_it] "+&r" (got_it)
317-
SCOND_FAIL_RETRY_VARS
318-
: [slock] "r" (&(lock->slock)),
319-
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
320-
: "memory", "cc");
321-
322-
smp_mb();
323-
324-
return got_it;
325-
}
326-
327-
static inline void arch_spin_unlock(arch_spinlock_t *lock)
328-
{
329-
smp_mb();
330-
331-
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
332-
333-
smp_mb();
334-
}
335-
336-
/*
337-
* Read-write spinlocks, allowing multiple readers but only one writer.
338-
* Unfair locking as Writers could be starved indefinitely by Reader(s)
339-
*/
340-
341-
static inline void arch_read_lock(arch_rwlock_t *rw)
342-
{
343-
unsigned int val;
344-
SCOND_FAIL_RETRY_VAR_DEF;
345-
346-
smp_mb();
347-
348-
/*
349-
* zero means writer holds the lock exclusively, deny Reader.
350-
* Otherwise grant lock to first/subseq reader
351-
*
352-
* if (rw->counter > 0) {
353-
* rw->counter--;
354-
* ret = 1;
355-
* }
356-
*/
357-
358-
__asm__ __volatile__(
359-
"0: mov %[delay], 1 \n"
360-
"1: llock %[val], [%[rwlock]] \n"
361-
" brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
362-
" sub %[val], %[val], 1 \n" /* reader lock */
363-
" scond %[val], [%[rwlock]] \n"
364-
" bz 4f \n" /* done */
365-
" \n"
366-
SCOND_FAIL_RETRY_ASM
367-
368-
: [val] "=&r" (val)
369-
SCOND_FAIL_RETRY_VARS
370-
: [rwlock] "r" (&(rw->counter)),
371-
[WR_LOCKED] "ir" (0)
372-
: "memory", "cc");
373-
374-
smp_mb();
375-
}
376-
377-
/* 1 - lock taken successfully */
378-
static inline int arch_read_trylock(arch_rwlock_t *rw)
379-
{
380-
unsigned int val, got_it = 0;
381-
SCOND_FAIL_RETRY_VAR_DEF;
382-
383-
smp_mb();
384-
385-
__asm__ __volatile__(
386-
"0: mov %[delay], 1 \n"
387-
"1: llock %[val], [%[rwlock]] \n"
388-
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
389-
" sub %[val], %[val], 1 \n" /* counter-- */
390-
" scond %[val], [%[rwlock]] \n"
391-
" bz.d 4f \n"
392-
" mov.z %[got_it], 1 \n" /* got it */
393-
" \n"
394-
SCOND_FAIL_RETRY_ASM
395-
396-
: [val] "=&r" (val),
397-
[got_it] "+&r" (got_it)
398-
SCOND_FAIL_RETRY_VARS
399-
: [rwlock] "r" (&(rw->counter)),
400-
[WR_LOCKED] "ir" (0)
401-
: "memory", "cc");
402-
403-
smp_mb();
404-
405-
return got_it;
406-
}
407-
408-
static inline void arch_write_lock(arch_rwlock_t *rw)
409-
{
410-
unsigned int val;
411-
SCOND_FAIL_RETRY_VAR_DEF;
412-
413-
smp_mb();
414-
415-
/*
416-
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
417-
* deny writer. Otherwise if unlocked grant to writer
418-
* Hence the claim that Linux rwlocks are unfair to writers.
419-
* (can be starved for an indefinite time by readers).
420-
*
421-
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
422-
* rw->counter = 0;
423-
* ret = 1;
424-
* }
425-
*/
426-
427-
__asm__ __volatile__(
428-
"0: mov %[delay], 1 \n"
429-
"1: llock %[val], [%[rwlock]] \n"
430-
" brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
431-
" mov %[val], %[WR_LOCKED] \n"
432-
" scond %[val], [%[rwlock]] \n"
433-
" bz 4f \n"
434-
" \n"
435-
SCOND_FAIL_RETRY_ASM
436-
437-
: [val] "=&r" (val)
438-
SCOND_FAIL_RETRY_VARS
439-
: [rwlock] "r" (&(rw->counter)),
440-
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
441-
[WR_LOCKED] "ir" (0)
442-
: "memory", "cc");
443-
444-
smp_mb();
445-
}
446-
447-
/* 1 - lock taken successfully */
448-
static inline int arch_write_trylock(arch_rwlock_t *rw)
449-
{
450-
unsigned int val, got_it = 0;
451-
SCOND_FAIL_RETRY_VAR_DEF;
452-
453-
smp_mb();
454-
455-
__asm__ __volatile__(
456-
"0: mov %[delay], 1 \n"
457-
"1: llock %[val], [%[rwlock]] \n"
458-
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
459-
" mov %[val], %[WR_LOCKED] \n"
460-
" scond %[val], [%[rwlock]] \n"
461-
" bz.d 4f \n"
462-
" mov.z %[got_it], 1 \n" /* got it */
463-
" \n"
464-
SCOND_FAIL_RETRY_ASM
465-
466-
: [val] "=&r" (val),
467-
[got_it] "+&r" (got_it)
468-
SCOND_FAIL_RETRY_VARS
469-
: [rwlock] "r" (&(rw->counter)),
470-
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
471-
[WR_LOCKED] "ir" (0)
472-
: "memory", "cc");
473-
474-
smp_mb();
475-
476-
return got_it;
477-
}
478-
479-
static inline void arch_read_unlock(arch_rwlock_t *rw)
480-
{
481-
unsigned int val;
482-
483-
smp_mb();
484-
485-
/*
486-
* rw->counter++;
487-
*/
488-
__asm__ __volatile__(
489-
"1: llock %[val], [%[rwlock]] \n"
490-
" add %[val], %[val], 1 \n"
491-
" scond %[val], [%[rwlock]] \n"
492-
" bnz 1b \n"
493-
" \n"
494-
: [val] "=&r" (val)
495-
: [rwlock] "r" (&(rw->counter))
496-
: "memory", "cc");
497-
498-
smp_mb();
499-
}
500-
501-
static inline void arch_write_unlock(arch_rwlock_t *rw)
502-
{
503-
unsigned int val;
504-
505-
smp_mb();
506-
507-
/*
508-
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
509-
*/
510-
__asm__ __volatile__(
511-
"1: llock %[val], [%[rwlock]] \n"
512-
" scond %[UNLOCKED], [%[rwlock]]\n"
513-
" bnz 1b \n"
514-
" \n"
515-
: [val] "=&r" (val)
516-
: [rwlock] "r" (&(rw->counter)),
517-
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
518-
: "memory", "cc");
519-
520-
smp_mb();
521-
}
522-
523-
#undef SCOND_FAIL_RETRY_VAR_DEF
524-
#undef SCOND_FAIL_RETRY_ASM
525-
#undef SCOND_FAIL_RETRY_VARS
526-
527-
#endif /* CONFIG_ARC_STAR_9000923308 */
528-
529236
#else /* !CONFIG_ARC_HAS_LLSC */
530237

531238
static inline void arch_spin_lock(arch_spinlock_t *lock)

0 commit comments

Comments
 (0)