Skip to content

Commit 686a7e3

Browse files
Eric Dumazetdavem330
authored andcommitted
inetpeer: fix race in unused_list manipulations
Several crashes in cleanup_once() were reported in recent kernels. Commit d6cc1d6 (inetpeer: various changes) added a race in unlink_from_unused(). One way to avoid taking unused_peers.lock before doing the list_empty() test is to catch 0->1 refcnt transitions, using full barrier atomic operations variants (atomic_cmpxchg() and atomic_inc_return()) instead of previous atomic_inc() and atomic_add_unless() variants. We then call unlink_from_unused() only for the owner of the 0->1 transition. Add a new atomic_add_unless_return() static helper With help from Arun Sharma. Refs: https://bugzilla.kernel.org/show_bug.cgi?id=32772 Reported-by: Arun Sharma <asharma@fb.com> Reported-by: Maximilian Engelhardt <maxi@daemonizer.de> Reported-by: Yann Dupont <Yann.Dupont@univ-nantes.fr> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent e7a46b4 commit 686a7e3

File tree

1 file changed

+27
-15
lines changed

1 file changed

+27
-15
lines changed

net/ipv4/inetpeer.c

Lines changed: 27 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
154154
/* Called with or without local BH being disabled. */
155155
static void unlink_from_unused(struct inet_peer *p)
156156
{
157-
if (!list_empty(&p->unused)) {
158-
spin_lock_bh(&unused_peers.lock);
159-
list_del_init(&p->unused);
160-
spin_unlock_bh(&unused_peers.lock);
161-
}
157+
spin_lock_bh(&unused_peers.lock);
158+
list_del_init(&p->unused);
159+
spin_unlock_bh(&unused_peers.lock);
162160
}
163161

164162
static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
205203
u; \
206204
})
207205

206+
static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
207+
{
208+
int cur, old = atomic_read(ptr);
209+
210+
while (old != u) {
211+
*newv = old + a;
212+
cur = atomic_cmpxchg(ptr, old, *newv);
213+
if (cur == old)
214+
return true;
215+
old = cur;
216+
}
217+
return false;
218+
}
219+
208220
/*
209221
* Called with rcu_read_lock()
210222
* Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
213225
* We exit from this function if number of links exceeds PEER_MAXDEPTH
214226
*/
215227
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
216-
struct inet_peer_base *base)
228+
struct inet_peer_base *base,
229+
int *newrefcnt)
217230
{
218231
struct inet_peer *u = rcu_dereference(base->root);
219232
int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
226239
* distinction between an unused entry (refcnt=0) and
227240
* a freed one.
228241
*/
229-
if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
242+
if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
230243
u = NULL;
231244
return u;
232245
}
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
465478
struct inet_peer_base *base = family_to_base(daddr->family);
466479
struct inet_peer *p;
467480
unsigned int sequence;
468-
int invalidated;
481+
int invalidated, newrefcnt = 0;
469482

470483
/* Look up for the address quickly, lockless.
471484
* Because of a concurrent writer, we might not find an existing entry.
472485
*/
473486
rcu_read_lock();
474487
sequence = read_seqbegin(&base->lock);
475-
p = lookup_rcu(daddr, base);
488+
p = lookup_rcu(daddr, base, &newrefcnt);
476489
invalidated = read_seqretry(&base->lock, sequence);
477490
rcu_read_unlock();
478491

479492
if (p) {
480-
/* The existing node has been found.
493+
found: /* The existing node has been found.
481494
* Remove the entry from unused list if it was there.
482495
*/
483-
unlink_from_unused(p);
496+
if (newrefcnt == 1)
497+
unlink_from_unused(p);
484498
return p;
485499
}
486500

@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
494508
write_seqlock_bh(&base->lock);
495509
p = lookup(daddr, stack, base);
496510
if (p != peer_avl_empty) {
497-
atomic_inc(&p->refcnt);
511+
newrefcnt = atomic_inc_return(&p->refcnt);
498512
write_sequnlock_bh(&base->lock);
499-
/* Remove the entry from unused list if it was there. */
500-
unlink_from_unused(p);
501-
return p;
513+
goto found;
502514
}
503515
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
504516
if (p) {

0 commit comments

Comments
 (0)