Skip to content

Commit 3382290

Browse files
wildea01Ingo Molnar
authored andcommitted
locking/barriers: Convert users of lockless_dereference() to READ_ONCE()
[ Note, this is a Git cherry-pick of the following commit: 506458e ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()") ... for easier x86 PTI code testing and back-porting. ] READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it can be used instead of lockless_dereference() without any change in semantics. Signed-off-by: Will Deacon <will.deacon@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent c2bc660 commit 3382290

File tree

13 files changed

+27
-27
lines changed

13 files changed

+27
-27
lines changed

arch/x86/events/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
23712371
struct ldt_struct *ldt;
23722372

23732373
/* IRQs are off, so this synchronizes with smp_store_release */
2374-
ldt = lockless_dereference(current->active_mm->context.ldt);
2374+
ldt = READ_ONCE(current->active_mm->context.ldt);
23752375
if (!ldt || idx >= ldt->nr_entries)
23762376
return 0;
23772377

arch/x86/include/asm/mmu_context.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
7373
#ifdef CONFIG_MODIFY_LDT_SYSCALL
7474
struct ldt_struct *ldt;
7575

76-
/* lockless_dereference synchronizes with smp_store_release */
77-
ldt = lockless_dereference(mm->context.ldt);
76+
/* READ_ONCE synchronizes with smp_store_release */
77+
ldt = READ_ONCE(mm->context.ldt);
7878

7979
/*
8080
* Any change to mm->context.ldt is followed by an IPI to all

arch/x86/kernel/ldt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
103103
static void install_ldt(struct mm_struct *current_mm,
104104
struct ldt_struct *ldt)
105105
{
106-
/* Synchronizes with lockless_dereference in load_mm_ldt. */
106+
/* Synchronizes with READ_ONCE in load_mm_ldt. */
107107
smp_store_release(&current_mm->context.ldt, ldt);
108108

109109
/* Activate the LDT for all CPUs using current_mm. */

drivers/md/dm-mpath.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
366366

367367
pgpath = path_to_pgpath(path);
368368

369-
if (unlikely(lockless_dereference(m->current_pg) != pg)) {
369+
if (unlikely(READ_ONCE(m->current_pg) != pg)) {
370370
/* Only update current_pgpath if pg changed */
371371
spin_lock_irqsave(&m->lock, flags);
372372
m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
390390
}
391391

392392
/* Were we instructed to switch PG? */
393-
if (lockless_dereference(m->next_pg)) {
393+
if (READ_ONCE(m->next_pg)) {
394394
spin_lock_irqsave(&m->lock, flags);
395395
pg = m->next_pg;
396396
if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
406406

407407
/* Don't change PG until it has no remaining paths */
408408
check_current_pg:
409-
pg = lockless_dereference(m->current_pg);
409+
pg = READ_ONCE(m->current_pg);
410410
if (pg) {
411411
pgpath = choose_path_in_pg(m, pg, nr_bytes);
412412
if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
473473
struct request *clone;
474474

475475
/* Do we need to select a new pgpath? */
476-
pgpath = lockless_dereference(m->current_pgpath);
476+
pgpath = READ_ONCE(m->current_pgpath);
477477
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
478478
pgpath = choose_pgpath(m, nr_bytes);
479479

@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
535535
bool queue_io;
536536

537537
/* Do we need to select a new pgpath? */
538-
pgpath = lockless_dereference(m->current_pgpath);
538+
pgpath = READ_ONCE(m->current_pgpath);
539539
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
540540
if (!pgpath || !queue_io)
541541
pgpath = choose_pgpath(m, nr_bytes);
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
18041804
struct pgpath *current_pgpath;
18051805
int r;
18061806

1807-
current_pgpath = lockless_dereference(m->current_pgpath);
1807+
current_pgpath = READ_ONCE(m->current_pgpath);
18081808
if (!current_pgpath)
18091809
current_pgpath = choose_pgpath(m, 0);
18101810

@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
18261826
}
18271827

18281828
if (r == -ENOTCONN) {
1829-
if (!lockless_dereference(m->current_pg)) {
1829+
if (!READ_ONCE(m->current_pg)) {
18301830
/* Path status changed, redo selection */
18311831
(void) choose_pgpath(m, 0);
18321832
}
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
18951895
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
18961896

18971897
/* Guess which priority_group will be used at next mapping time */
1898-
pg = lockless_dereference(m->current_pg);
1899-
next_pg = lockless_dereference(m->next_pg);
1900-
if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
1898+
pg = READ_ONCE(m->current_pg);
1899+
next_pg = READ_ONCE(m->next_pg);
1900+
if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
19011901
pg = next_pg;
19021902

19031903
if (!pg) {

fs/dcache.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
231231
{
232232
/*
233233
* Be careful about RCU walk racing with rename:
234-
* use 'lockless_dereference' to fetch the name pointer.
234+
* use 'READ_ONCE' to fetch the name pointer.
235235
*
236236
* NOTE! Even if a rename will mean that the length
237237
* was not loaded atomically, we don't care. The
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
245245
* early because the data cannot match (there can
246246
* be no NUL in the ct/tcount data)
247247
*/
248-
const unsigned char *cs = lockless_dereference(dentry->d_name.name);
248+
const unsigned char *cs = READ_ONCE(dentry->d_name.name);
249249

250250
return dentry_string_cmp(cs, ct, tcount);
251251
}

fs/overlayfs/ovl_entry.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
7777

7878
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
7979
{
80-
return lockless_dereference(oi->__upperdentry);
80+
return READ_ONCE(oi->__upperdentry);
8181
}

fs/overlayfs/readdir.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
754754
if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
755755
struct inode *inode = file_inode(file);
756756

757-
realfile = lockless_dereference(od->upperfile);
757+
realfile = READ_ONCE(od->upperfile);
758758
if (!realfile) {
759759
struct path upperpath;
760760

include/linux/rculist.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
275275
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
276276
*/
277277
#define list_entry_rcu(ptr, type, member) \
278-
container_of(lockless_dereference(ptr), type, member)
278+
container_of(READ_ONCE(ptr), type, member)
279279

280280
/*
281281
* Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
368368
* example is when items are added to the list, but never deleted.
369369
*/
370370
#define list_entry_lockless(ptr, type, member) \
371-
container_of((typeof(ptr))lockless_dereference(ptr), type, member)
371+
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
372372

373373
/**
374374
* list_for_each_entry_lockless - iterate over rcu list of given type

include/linux/rcupdate.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { }
346346
#define __rcu_dereference_check(p, c, space) \
347347
({ \
348348
/* Dependency order vs. p above. */ \
349-
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
349+
typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
350350
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
351351
rcu_dereference_sparse(p, space); \
352352
((typeof(*p) __force __kernel *)(________p1)); \
@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { }
360360
#define rcu_dereference_raw(p) \
361361
({ \
362362
/* Dependency order vs. p above. */ \
363-
typeof(p) ________p1 = lockless_dereference(p); \
363+
typeof(p) ________p1 = READ_ONCE(p); \
364364
((typeof(*p) __force __kernel *)(________p1)); \
365365
})
366366

kernel/events/core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4233,7 +4233,7 @@ static void perf_remove_from_owner(struct perf_event *event)
42334233
* indeed free this event, otherwise we need to serialize on
42344234
* owner->perf_event_mutex.
42354235
*/
4236-
owner = lockless_dereference(event->owner);
4236+
owner = READ_ONCE(event->owner);
42374237
if (owner) {
42384238
/*
42394239
* Since delayed_put_task_struct() also drops the last
@@ -4330,7 +4330,7 @@ int perf_event_release_kernel(struct perf_event *event)
43304330
* Cannot change, child events are not migrated, see the
43314331
* comment with perf_event_ctx_lock_nested().
43324332
*/
4333-
ctx = lockless_dereference(child->ctx);
4333+
ctx = READ_ONCE(child->ctx);
43344334
/*
43354335
* Since child_mutex nests inside ctx::mutex, we must jump
43364336
* through hoops. We start by grabbing a reference on the ctx.

kernel/seccomp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
190190
u32 ret = SECCOMP_RET_ALLOW;
191191
/* Make sure cross-thread synced filter points somewhere sane. */
192192
struct seccomp_filter *f =
193-
lockless_dereference(current->seccomp.filter);
193+
READ_ONCE(current->seccomp.filter);
194194

195195
/* Ensure unexpected behavior doesn't result in failing open. */
196196
if (unlikely(WARN_ON(f == NULL)))

kernel/task_work.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
6868
* we raced with task_work_run(), *pprev == NULL/exited.
6969
*/
7070
raw_spin_lock_irqsave(&task->pi_lock, flags);
71-
while ((work = lockless_dereference(*pprev))) {
71+
while ((work = READ_ONCE(*pprev))) {
7272
if (work->func != func)
7373
pprev = &work->next;
7474
else if (cmpxchg(pprev, work, work->next) == work)

mm/slab.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
259259
* memcg_caches issues a write barrier to match this (see
260260
* memcg_create_kmem_cache()).
261261
*/
262-
cachep = lockless_dereference(arr->entries[idx]);
262+
cachep = READ_ONCE(arr->entries[idx]);
263263
rcu_read_unlock();
264264

265265
return cachep;

0 commit comments

Comments
 (0)