@@ -69,13 +69,8 @@ inline size_t AtomicNotifier::num_waiters() const noexcept {
69
69
70
70
inline void AtomicNotifier::notify_one () noexcept {
71
71
std::atomic_thread_fence (std::memory_order_seq_cst);
72
- // if((_state.load(std::memory_order_acquire) & WAITER_MASK) != 0) {
73
- // _state.fetch_add(EPOCH_INC, std::memory_order_relaxed);
74
- // _state.notify_one();
75
- // }
76
-
77
72
for (uint64_t state = _state.load (std::memory_order_acquire); state & WAITER_MASK;) {
78
- if (_state.compare_exchange_weak (state, state + EPOCH_INC, std::memory_order_acquire )) {
73
+ if (_state.compare_exchange_weak (state, state + EPOCH_INC, std::memory_order_acq_rel )) {
79
74
_state.notify_one ();
80
75
break ;
81
76
}
@@ -84,12 +79,8 @@ inline void AtomicNotifier::notify_one() noexcept {
84
79
85
80
inline void AtomicNotifier::notify_all () noexcept {
86
81
std::atomic_thread_fence (std::memory_order_seq_cst);
87
- // if((_state.load(std::memory_order_acquire) & WAITER_MASK) != 0) {
88
- // _state.fetch_add(EPOCH_INC, std::memory_order_relaxed);
89
- // _state.notify_all();
90
- // }
91
82
for (uint64_t state = _state.load (std::memory_order_acquire); state & WAITER_MASK;) {
92
- if (_state.compare_exchange_weak (state, state + EPOCH_INC, std::memory_order_acquire )) {
83
+ if (_state.compare_exchange_weak (state, state + EPOCH_INC, std::memory_order_acq_rel )) {
93
84
_state.notify_all ();
94
85
break ;
95
86
}
@@ -123,9 +114,6 @@ inline void AtomicNotifier::commit_wait(Waiter* waiter) noexcept {
123
114
_state.wait (prev, std::memory_order_acquire);
124
115
prev = _state.load (std::memory_order_acquire);
125
116
}
126
- // memory_order_relaxed would suffice for correctness, but the faster
127
- // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
128
- // (and thus system calls)
129
117
_state.fetch_sub (WAITER_INC, std::memory_order_seq_cst);
130
118
}
131
119
0 commit comments