Skip to content

Commit bd5132d

Browse files
Introduce atomic read/write functions with full barrier semantics.
Writing correct code using atomic variables is often difficult due to the memory barrier semantics (or lack thereof) of the underlying operations. This commit introduces atomic read/write functions with full barrier semantics to ease this cognitive load. For example, some spinlocks protect a single value, and these new functions make it easy to convert the value to an atomic variable (thus eliminating the need for the spinlock) without modifying the barrier semantics previously provided by the spinlock. Since these functions may be less performant than the other atomic reads and writes, they are not suitable for every use-case. However, using a single atomic operation with full barrier semantics may be more performant in cases where a separate explicit barrier would otherwise be required. The base implementations for these new functions are atomic exchanges (for writes) and atomic fetch/adds with 0 (for reads). These implementations can be overwritten with better architecture- specific versions as they are discovered. This commit leaves converting existing code to use these new functions as a future exercise. Reviewed-by: Andres Freund, Yong Li, Jeff Davis Discussion: https://postgr.es/m/20231110205128.GB1315705%40nathanxps13
1 parent 5f2e179 commit bd5132d

File tree

2 files changed

+94
-0
lines changed

2 files changed

+94
-0
lines changed

src/include/port/atomics.h

+58
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,26 @@ pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
237237
return pg_atomic_read_u32_impl(ptr);
238238
}
239239

240+
/*
241+
* pg_atomic_read_membarrier_u32 - read with barrier semantics.
242+
*
243+
* This read is guaranteed to return the current value, provided that the value
244+
* is only ever updated via operations with barrier semantics, such as
245+
* pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
246+
* While this may be less performant than pg_atomic_read_u32(), it may be
247+
* easier to reason about correctness with this function in less performance-
248+
* sensitive code.
249+
*
250+
* Full barrier semantics.
251+
*/
252+
static inline uint32
253+
pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
254+
{
255+
AssertPointerAlignment(ptr, 4);
256+
257+
return pg_atomic_read_membarrier_u32_impl(ptr);
258+
}
259+
240260
/*
241261
* pg_atomic_write_u32 - write to atomic variable.
242262
*
@@ -274,6 +294,26 @@ pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
274294
pg_atomic_unlocked_write_u32_impl(ptr, val);
275295
}
276296

297+
/*
298+
* pg_atomic_write_membarrier_u32 - write with barrier semantics.
299+
*
300+
* The write is guaranteed to succeed as a whole, i.e., it's not possible to
301+
* observe a partial write for any reader. Note that this correctly interacts
302+
* with both pg_atomic_compare_exchange_u32() and
303+
* pg_atomic_read_membarrier_u32(). While this may be less performant than
304+
* pg_atomic_write_u32(), it may be easier to reason about correctness with
305+
* this function in less performance-sensitive code.
306+
*
307+
* Full barrier semantics.
308+
*/
309+
static inline void
310+
pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
311+
{
312+
AssertPointerAlignment(ptr, 4);
313+
314+
pg_atomic_write_membarrier_u32_impl(ptr, val);
315+
}
316+
277317
/*
278318
* pg_atomic_exchange_u32 - exchange newval with current value
279319
*
@@ -427,6 +467,15 @@ pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
427467
return pg_atomic_read_u64_impl(ptr);
428468
}
429469

470+
static inline uint64
471+
pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
472+
{
473+
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
474+
AssertPointerAlignment(ptr, 8);
475+
#endif
476+
return pg_atomic_read_membarrier_u64_impl(ptr);
477+
}
478+
430479
static inline void
431480
pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
432481
{
@@ -436,6 +485,15 @@ pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
436485
pg_atomic_write_u64_impl(ptr, val);
437486
}
438487

488+
static inline void
489+
pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
490+
{
491+
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
492+
AssertPointerAlignment(ptr, 8);
493+
#endif
494+
pg_atomic_write_membarrier_u64_impl(ptr, val);
495+
}
496+
439497
static inline uint64
440498
pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
441499
{

src/include/port/atomics/generic.h

+36
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,24 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
243243
}
244244
#endif
245245

246+
#if !defined(PG_HAVE_ATOMIC_READ_MEMBARRIER_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
247+
#define PG_HAVE_ATOMIC_READ_MEMBARRIER_U32
248+
static inline uint32
249+
pg_atomic_read_membarrier_u32_impl(volatile pg_atomic_uint32 *ptr)
250+
{
251+
return pg_atomic_fetch_add_u32_impl(ptr, 0);
252+
}
253+
#endif
254+
255+
#if !defined(PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U32) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)
256+
#define PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U32
257+
static inline void
258+
pg_atomic_write_membarrier_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
259+
{
260+
(void) pg_atomic_exchange_u32_impl(ptr, val);
261+
}
262+
#endif
263+
246264
#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
247265
#define PG_HAVE_ATOMIC_EXCHANGE_U64
248266
static inline uint64
@@ -399,3 +417,21 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
399417
return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
400418
}
401419
#endif
420+
421+
#if !defined(PG_HAVE_ATOMIC_READ_MEMBARRIER_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
422+
#define PG_HAVE_ATOMIC_READ_MEMBARRIER_U64
423+
static inline uint64
424+
pg_atomic_read_membarrier_u64_impl(volatile pg_atomic_uint64 *ptr)
425+
{
426+
return pg_atomic_fetch_add_u64_impl(ptr, 0);
427+
}
428+
#endif
429+
430+
#if !defined(PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U64) && defined(PG_HAVE_ATOMIC_EXCHANGE_U64)
431+
#define PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U64
432+
static inline void
433+
pg_atomic_write_membarrier_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
434+
{
435+
(void) pg_atomic_exchange_u64_impl(ptr, val);
436+
}
437+
#endif

0 commit comments

Comments
 (0)