Skip to content

Commit 470ada6

Browse files
author
Martin Schwidefsky
committed
s390/spinlock: refactor arch_spin_lock_wait[_flags]
Reorder the spinlock wait code to make it more readable. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
1 parent 939c5ae commit 470ada6

File tree

1 file changed

+47
-34
lines changed

1 file changed

+47
-34
lines changed

arch/s390/lib/spinlock.c

Lines changed: 47 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
3131
int count;
3232

3333
while (1) {
34-
owner = lp->lock;
35-
if (!owner || smp_vcpu_scheduled(~owner)) {
36-
count = spin_retry;
37-
do {
38-
if (arch_spin_is_locked(lp))
39-
continue;
40-
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
41-
return;
42-
} while (count-- > 0);
43-
if (MACHINE_IS_LPAR)
44-
continue;
34+
owner = ACCESS_ONCE(lp->lock);
35+
/* Try to get the lock if it is free. */
36+
if (!owner) {
37+
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
38+
return;
39+
continue;
4540
}
46-
owner = lp->lock;
47-
if (owner)
41+
/* Check if the lock owner is running. */
42+
if (!smp_vcpu_scheduled(~owner)) {
43+
smp_yield_cpu(~owner);
44+
continue;
45+
}
46+
/* Loop for a while on the lock value. */
47+
count = spin_retry;
48+
do {
49+
owner = ACCESS_ONCE(lp->lock);
50+
} while (owner && count-- > 0);
51+
if (!owner)
52+
continue;
53+
/*
54+
* For multiple layers of hypervisors, e.g. z/VM + LPAR
55+
* yield the CPU if the lock is still unavailable.
56+
*/
57+
if (!MACHINE_IS_LPAR)
4858
smp_yield_cpu(~owner);
49-
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
50-
return;
5159
}
5260
}
5361
EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
6068

6169
local_irq_restore(flags);
6270
while (1) {
63-
owner = lp->lock;
64-
if (!owner || smp_vcpu_scheduled(~owner)) {
65-
count = spin_retry;
66-
do {
67-
if (arch_spin_is_locked(lp))
68-
continue;
69-
local_irq_disable();
70-
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
71-
return;
72-
local_irq_restore(flags);
73-
} while (count-- > 0);
74-
if (MACHINE_IS_LPAR)
75-
continue;
71+
owner = ACCESS_ONCE(lp->lock);
72+
/* Try to get the lock if it is free. */
73+
if (!owner) {
74+
local_irq_disable();
75+
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
76+
return;
77+
local_irq_restore(flags);
7678
}
77-
owner = lp->lock;
78-
if (owner)
79+
/* Check if the lock owner is running. */
80+
if (!smp_vcpu_scheduled(~owner)) {
81+
smp_yield_cpu(~owner);
82+
continue;
83+
}
84+
/* Loop for a while on the lock value. */
85+
count = spin_retry;
86+
do {
87+
owner = ACCESS_ONCE(lp->lock);
88+
} while (owner && count-- > 0);
89+
if (!owner)
90+
continue;
91+
/*
92+
* For multiple layers of hypervisors, e.g. z/VM + LPAR
93+
* yield the CPU if the lock is still unavailable.
94+
*/
95+
if (!MACHINE_IS_LPAR)
7996
smp_yield_cpu(~owner);
80-
local_irq_disable();
81-
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
82-
return;
83-
local_irq_restore(flags);
8497
}
8598
}
8699
EXPORT_SYMBOL(arch_spin_lock_wait_flags);

0 commit comments

Comments
 (0)