@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
31
31
int count ;
32
32
33
33
while (1 ) {
34
- owner = lp -> lock ;
35
- if (!owner || smp_vcpu_scheduled (~owner )) {
36
- count = spin_retry ;
37
- do {
38
- if (arch_spin_is_locked (lp ))
39
- continue ;
40
- if (_raw_compare_and_swap (& lp -> lock , 0 , cpu ))
41
- return ;
42
- } while (count -- > 0 );
43
- if (MACHINE_IS_LPAR )
44
- continue ;
34
+ owner = ACCESS_ONCE (lp -> lock );
35
+ /* Try to get the lock if it is free. */
36
+ if (!owner ) {
37
+ if (_raw_compare_and_swap (& lp -> lock , 0 , cpu ))
38
+ return ;
39
+ continue ;
45
40
}
46
- owner = lp -> lock ;
47
- if (owner )
41
+ /* Check if the lock owner is running. */
42
+ if (!smp_vcpu_scheduled (~owner )) {
43
+ smp_yield_cpu (~owner );
44
+ continue ;
45
+ }
46
+ /* Loop for a while on the lock value. */
47
+ count = spin_retry ;
48
+ do {
49
+ owner = ACCESS_ONCE (lp -> lock );
50
+ } while (owner && count -- > 0 );
51
+ if (!owner )
52
+ continue ;
53
+ /*
54
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
55
+ * yield the CPU if the lock is still unavailable.
56
+ */
57
+ if (!MACHINE_IS_LPAR )
48
58
smp_yield_cpu (~owner );
49
- if (_raw_compare_and_swap (& lp -> lock , 0 , cpu ))
50
- return ;
51
59
}
52
60
}
53
61
EXPORT_SYMBOL (arch_spin_lock_wait );
@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
60
68
61
69
local_irq_restore (flags );
62
70
while (1 ) {
63
- owner = lp -> lock ;
64
- if (!owner || smp_vcpu_scheduled (~owner )) {
65
- count = spin_retry ;
66
- do {
67
- if (arch_spin_is_locked (lp ))
68
- continue ;
69
- local_irq_disable ();
70
- if (_raw_compare_and_swap (& lp -> lock , 0 , cpu ))
71
- return ;
72
- local_irq_restore (flags );
73
- } while (count -- > 0 );
74
- if (MACHINE_IS_LPAR )
75
- continue ;
71
+ owner = ACCESS_ONCE (lp -> lock );
72
+ /* Try to get the lock if it is free. */
73
+ if (!owner ) {
74
+ local_irq_disable ();
75
+ if (_raw_compare_and_swap (& lp -> lock , 0 , cpu ))
76
+ return ;
77
+ local_irq_restore (flags );
76
78
}
77
- owner = lp -> lock ;
78
- if (owner )
79
+ /* Check if the lock owner is running. */
80
+ if (!smp_vcpu_scheduled (~owner )) {
81
+ smp_yield_cpu (~owner );
82
+ continue ;
83
+ }
84
+ /* Loop for a while on the lock value. */
85
+ count = spin_retry ;
86
+ do {
87
+ owner = ACCESS_ONCE (lp -> lock );
88
+ } while (owner && count -- > 0 );
89
+ if (!owner )
90
+ continue ;
91
+ /*
92
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
93
+ * yield the CPU if the lock is still unavailable.
94
+ */
95
+ if (!MACHINE_IS_LPAR )
79
96
smp_yield_cpu (~owner );
80
- local_irq_disable ();
81
- if (_raw_compare_and_swap (& lp -> lock , 0 , cpu ))
82
- return ;
83
- local_irq_restore (flags );
84
97
}
85
98
}
86
99
EXPORT_SYMBOL (arch_spin_lock_wait_flags );
0 commit comments