Skip to content

Commit 71966f3

Browse files
author
Ingo Molnar
committed
Merge branch 'locking/core' into x86/core, to prepare for dependent patch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents 34e7724 + 92ae183 commit 71966f3

File tree

60 files changed

+1397
-96
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+1397
-96
lines changed

Documentation/memory-barriers.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1662,7 +1662,7 @@ CPU from reordering them.
16621662

16631663
There are some more advanced barrier functions:
16641664

1665-
(*) set_mb(var, value)
1665+
(*) smp_store_mb(var, value)
16661666

16671667
This assigns the value to the variable and then inserts a full memory
16681668
barrier after it, depending on the function. It isn't guaranteed to
@@ -1975,7 +1975,7 @@ after it has altered the task state:
19751975
CPU 1
19761976
===============================
19771977
set_current_state();
1978-
set_mb();
1978+
smp_store_mb();
19791979
STORE current->state
19801980
<general barrier>
19811981
LOAD event_indicated
@@ -2016,7 +2016,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
20162016
CPU 1 CPU 2
20172017
=============================== ===============================
20182018
set_current_state(); STORE event_indicated
2019-
set_mb(); wake_up();
2019+
smp_store_mb(); wake_up();
20202020
STORE current->state <write barrier>
20212021
<general barrier> STORE current->state
20222022
LOAD event_indicated

arch/alpha/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,4 @@
6666
#undef __ASM__MB
6767
#undef ____cmpxchg
6868

69-
#define __HAVE_ARCH_CMPXCHG 1
70-
7169
#endif /* _ALPHA_CMPXCHG_H */

arch/arm/include/asm/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ do { \
8181
#define read_barrier_depends() do { } while(0)
8282
#define smp_read_barrier_depends() do { } while(0)
8383

84-
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
84+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
8585

8686
#define smp_mb__before_atomic() smp_mb()
8787
#define smp_mb__after_atomic() smp_mb()

arch/arm64/include/asm/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ do { \
114114
#define read_barrier_depends() do { } while(0)
115115
#define smp_read_barrier_depends() do { } while(0)
116116

117-
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
117+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
118118
#define nop() asm volatile("nop");
119119

120120
#define smp_mb__before_atomic() smp_mb()

arch/avr32/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
7070
if something tries to do an invalid cmpxchg(). */
7171
extern void __cmpxchg_called_with_bad_pointer(void);
7272

73-
#define __HAVE_ARCH_CMPXCHG 1
74-
7573
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
7674
unsigned long new, int size)
7775
{

arch/hexagon/include/asm/cmpxchg.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
6464
* looks just like atomic_cmpxchg on our arch currently with a bunch of
6565
* variable casting.
6666
*/
67-
#define __HAVE_ARCH_CMPXCHG 1
6867

6968
#define cmpxchg(ptr, old, new) \
7069
({ \

arch/ia64/include/asm/barrier.h

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -77,12 +77,7 @@ do { \
7777
___p1; \
7878
})
7979

80-
/*
81-
* XXX check on this ---I suspect what Linus really wants here is
82-
* acquire vs release semantics but we can't discuss this stuff with
83-
* Linus just yet. Grrr...
84-
*/
85-
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
80+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
8681

8782
/*
8883
* The group barrier in front of the rsm & ssm are necessary to ensure

arch/ia64/include/uapi/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,6 @@ extern void ia64_xchg_called_with_bad_pointer(void);
6161
* indicated by comparing RETURN with OLD.
6262
*/
6363

64-
#define __HAVE_ARCH_CMPXCHG 1
65-
6664
/*
6765
* This function doesn't exist, so you'll get a linker error
6866
* if something tries to do an invalid cmpxchg().

arch/m32r/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,6 @@ __xchg_local(unsigned long x, volatile void *ptr, int size)
107107
((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
108108
sizeof(*(ptr))))
109109

110-
#define __HAVE_ARCH_CMPXCHG 1
111-
112110
static inline unsigned long
113111
__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
114112
{

arch/m68k/include/asm/cmpxchg.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ extern unsigned long __invalid_cmpxchg_size(volatile void *,
9090
* indicated by comparing RETURN with OLD.
9191
*/
9292
#ifdef CONFIG_RMW_INSNS
93-
#define __HAVE_ARCH_CMPXCHG 1
9493

9594
static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
9695
unsigned long new, int size)

arch/metag/include/asm/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ static inline void fence(void)
8484
#define read_barrier_depends() do { } while (0)
8585
#define smp_read_barrier_depends() do { } while (0)
8686

87-
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
87+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
8888

8989
#define smp_store_release(p, v) \
9090
do { \

arch/metag/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
5151
return old;
5252
}
5353

54-
#define __HAVE_ARCH_CMPXCHG 1
55-
5654
#define cmpxchg(ptr, o, n) \
5755
({ \
5856
__typeof__(*(ptr)) _o_ = (o); \

arch/mips/include/asm/barrier.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@
112112
#define __WEAK_LLSC_MB " \n"
113113
#endif
114114

115-
#define set_mb(var, value) \
116-
do { var = value; smp_mb(); } while (0)
115+
#define smp_store_mb(var, value) \
116+
do { WRITE_ONCE(var, value); smp_mb(); } while (0)
117117

118118
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
119119

arch/mips/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
138138
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
139139
})
140140

141-
#define __HAVE_ARCH_CMPXCHG 1
142-
143141
#define __cmpxchg_asm(ld, st, m, old, new) \
144142
({ \
145143
__typeof(*(m)) __ret; \

arch/parisc/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
4646
#define xchg(ptr, x) \
4747
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
4848

49-
#define __HAVE_ARCH_CMPXCHG 1
50-
5149
/* bug catcher for when unsupported size is used - won't link */
5250
extern void __cmpxchg_called_with_bad_pointer(void);
5351

arch/powerpc/include/asm/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
3535
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
3636

37-
#define set_mb(var, value) do { var = value; mb(); } while (0)
37+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
3838

3939
#ifdef __SUBARCH_HAS_LWSYNC
4040
# define SMPWMB LWSYNC

arch/powerpc/include/asm/cmpxchg.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
144144
* Compare and exchange - if *p == old, set it to new,
145145
* and return the old value of *p.
146146
*/
147-
#define __HAVE_ARCH_CMPXCHG 1
148147

149148
static __always_inline unsigned long
150149
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)

arch/s390/include/asm/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
#define smp_mb__before_atomic() smp_mb()
3737
#define smp_mb__after_atomic() smp_mb()
3838

39-
#define set_mb(var, value) do { var = value; mb(); } while (0)
39+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
4040

4141
#define smp_store_release(p, v) \
4242
do { \

arch/s390/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,6 @@
3232
__old; \
3333
})
3434

35-
#define __HAVE_ARCH_CMPXCHG
36-
3735
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
3836
({ \
3937
register __typeof__(*(p1)) __old1 asm("2") = (o1); \

arch/score/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,6 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
4242
(unsigned long)(o), \
4343
(unsigned long)(n)))
4444

45-
#define __HAVE_ARCH_CMPXCHG 1
46-
4745
#include <asm-generic/cmpxchg-local.h>
4846

4947
#endif /* _ASM_SCORE_CMPXCHG_H */

arch/sh/include/asm/barrier.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
3333
#endif
3434

35-
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
35+
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
3636

3737
#include <asm-generic/barrier.h>
3838

arch/sh/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ extern void __xchg_called_with_bad_pointer(void);
4646
* if something tries to do an invalid cmpxchg(). */
4747
extern void __cmpxchg_called_with_bad_pointer(void);
4848

49-
#define __HAVE_ARCH_CMPXCHG 1
50-
5149
static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
5250
unsigned long new, int size)
5351
{

arch/sparc/include/asm/barrier_64.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
4040
#define dma_rmb() rmb()
4141
#define dma_wmb() wmb()
4242

43-
#define set_mb(__var, __value) \
44-
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
43+
#define smp_store_mb(__var, __value) \
44+
do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
4545

4646
#ifdef CONFIG_SMP
4747
#define smp_mb() mb()

arch/sparc/include/asm/cmpxchg_32.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
3434
*
3535
* Cribbed from <asm-parisc/atomic.h>
3636
*/
37-
#define __HAVE_ARCH_CMPXCHG 1
3837

3938
/* bug catcher for when unsupported size is used - won't link */
4039
void __cmpxchg_called_with_bad_pointer(void);

arch/sparc/include/asm/cmpxchg_64.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
6565

6666
#include <asm-generic/cmpxchg-local.h>
6767

68-
#define __HAVE_ARCH_CMPXCHG 1
69-
7068
static inline unsigned long
7169
__cmpxchg_u32(volatile int *m, int old, int new)
7270
{

arch/tile/include/asm/atomic_64.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
105105

106106
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
107107

108-
/* Define this to indicate that cmpxchg is an efficient operation. */
109-
#define __HAVE_ARCH_CMPXCHG
110-
111108
#endif /* !__ASSEMBLY__ */
112109

113110
#endif /* _ASM_TILE_ATOMIC_64_H */

arch/x86/Kconfig

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ config X86
127127
select MODULES_USE_ELF_RELA if X86_64
128128
select CLONE_BACKWARDS if X86_32
129129
select ARCH_USE_BUILTIN_BSWAP
130-
select ARCH_USE_QUEUE_RWLOCK
130+
select ARCH_USE_QUEUED_SPINLOCKS
131+
select ARCH_USE_QUEUED_RWLOCKS
131132
select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
132133
select OLD_SIGACTION if X86_32
133134
select COMPAT_OLD_SIGACTION if IA32_EMULATION
@@ -666,7 +667,7 @@ config PARAVIRT_DEBUG
666667
config PARAVIRT_SPINLOCKS
667668
bool "Paravirtualization layer for spinlocks"
668669
depends on PARAVIRT && SMP
669-
select UNINLINE_SPIN_UNLOCK
670+
select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
670671
---help---
671672
Paravirtualized spinlocks allow a pvops backend to replace the
672673
spinlock implementation with something virtualization-friendly

arch/x86/include/asm/barrier.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,12 @@
3535
#define smp_mb() mb()
3636
#define smp_rmb() dma_rmb()
3737
#define smp_wmb() barrier()
38-
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
38+
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
3939
#else /* !SMP */
4040
#define smp_mb() barrier()
4141
#define smp_rmb() barrier()
4242
#define smp_wmb() barrier()
43-
#define set_mb(var, value) do { var = value; barrier(); } while (0)
43+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
4444
#endif /* SMP */
4545

4646
#define read_barrier_depends() do { } while (0)

arch/x86/include/asm/cmpxchg.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@
44
#include <linux/compiler.h>
55
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
66

7-
#define __HAVE_ARCH_CMPXCHG 1
8-
97
/*
108
* Non-existant functions to indicate usage errors at link time
119
* (or compile-time if the compiler implements __compiletime_error().

arch/x86/include/asm/paravirt.h

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -712,6 +712,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
712712

713713
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
714714

715+
#ifdef CONFIG_QUEUED_SPINLOCKS
716+
717+
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
718+
u32 val)
719+
{
720+
PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
721+
}
722+
723+
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
724+
{
725+
PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
726+
}
727+
728+
static __always_inline void pv_wait(u8 *ptr, u8 val)
729+
{
730+
PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
731+
}
732+
733+
static __always_inline void pv_kick(int cpu)
734+
{
735+
PVOP_VCALL1(pv_lock_ops.kick, cpu);
736+
}
737+
738+
#else /* !CONFIG_QUEUED_SPINLOCKS */
739+
715740
static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
716741
__ticket_t ticket)
717742
{
@@ -724,7 +749,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
724749
PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
725750
}
726751

727-
#endif
752+
#endif /* CONFIG_QUEUED_SPINLOCKS */
753+
754+
#endif /* SMP && PARAVIRT_SPINLOCKS */
728755

729756
#ifdef CONFIG_X86_32
730757
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"

arch/x86/include/asm/paravirt_types.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,9 +334,19 @@ struct arch_spinlock;
334334
typedef u16 __ticket_t;
335335
#endif
336336

337+
struct qspinlock;
338+
337339
struct pv_lock_ops {
340+
#ifdef CONFIG_QUEUED_SPINLOCKS
341+
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
342+
struct paravirt_callee_save queued_spin_unlock;
343+
344+
void (*wait)(u8 *ptr, u8 val);
345+
void (*kick)(int cpu);
346+
#else /* !CONFIG_QUEUED_SPINLOCKS */
338347
struct paravirt_callee_save lock_spinning;
339348
void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
349+
#endif /* !CONFIG_QUEUED_SPINLOCKS */
340350
};
341351

342352
/* This contains all the paravirt structures: we get a convenient

0 commit comments

Comments
 (0)