Skip to content

Commit 230fa25

Browse files
committed
kernel: Provide READ_ONCE and ASSIGN_ONCE
ACCESS_ONCE does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145) Let's provide READ_ONCE/ASSIGN_ONCE that will do all accesses via scalar types as suggested by Linus Torvalds. Accesses larger than the machines word size cannot be guaranteed to be atomic. These macros will use memcpy and emit a build warning. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
1 parent 1365039 commit 230fa25

File tree

1 file changed

+74
-0
lines changed

1 file changed

+74
-0
lines changed

include/linux/compiler.h

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
186186
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187187
#endif
188188

189+
#include <uapi/linux/types.h>
190+
191+
static __always_inline void data_access_exceeds_word_size(void)
192+
#ifdef __compiletime_warning
193+
__compiletime_warning("data access exceeds word size and won't be atomic")
194+
#endif
195+
;
196+
197+
static __always_inline void data_access_exceeds_word_size(void)
198+
{
199+
}
200+
201+
static __always_inline void __read_once_size(volatile void *p, void *res, int size)
202+
{
203+
switch (size) {
204+
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
205+
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
206+
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
207+
#ifdef CONFIG_64BIT
208+
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
209+
#endif
210+
default:
211+
barrier();
212+
__builtin_memcpy((void *)res, (const void *)p, size);
213+
data_access_exceeds_word_size();
214+
barrier();
215+
}
216+
}
217+
218+
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
219+
{
220+
switch (size) {
221+
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
222+
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
223+
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
224+
#ifdef CONFIG_64BIT
225+
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
226+
#endif
227+
default:
228+
barrier();
229+
__builtin_memcpy((void *)p, (const void *)res, size);
230+
data_access_exceeds_word_size();
231+
barrier();
232+
}
233+
}
234+
235+
/*
236+
* Prevent the compiler from merging or refetching reads or writes. The
237+
* compiler is also forbidden from reordering successive instances of
238+
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
239+
* compiler is aware of some particular ordering. One way to make the
240+
* compiler aware of ordering is to put the two invocations of READ_ONCE,
241+
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
242+
*
243+
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
244+
* data types like structs or unions. If the size of the accessed data
245+
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246+
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
247+
* compile-time warning.
248+
*
249+
* Their two major use cases are: (1) Mediating communication between
250+
* process-level code and irq/NMI handlers, all running on the same CPU,
251+
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
252+
* mutilate accesses that either do not require ordering or that interact
253+
* with an explicit memory barrier or atomic instruction that provides the
254+
* required ordering.
255+
*/
256+
257+
#define READ_ONCE(x) \
258+
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259+
260+
#define ASSIGN_ONCE(val, x) \
261+
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
262+
189263
#endif /* __KERNEL__ */
190264

191265
#endif /* __ASSEMBLY__ */

0 commit comments

Comments
 (0)