Skip to content

Commit d6cc1d6

Browse files
Eric Dumazetdavem330
authored andcommitted
inetpeer: various changes
Try to reduce cache line contentions in peer management, to reduce IP defragmentation overhead. - peer_fake_node is marked 'const' to make sure its not modified. (tested with CONFIG_DEBUG_RODATA=y) - Group variables in two structures to reduce number of dirtied cache lines. One named "peers" for avl tree root, its number of entries, and associated lock. (candidate for RCU conversion) - A second one named "unused_peers" for unused list and its lock - Add a !list_empty() test in unlink_from_unused() to avoid taking lock when entry is not unused. - Use atomic_dec_and_lock() in inet_putpeer() to avoid taking lock in some cases. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 6b10de3 commit d6cc1d6

File tree

1 file changed

+56
-38
lines changed

1 file changed

+56
-38
lines changed

net/ipv4/inetpeer.c

Lines changed: 56 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -70,17 +70,25 @@
7070
static struct kmem_cache *peer_cachep __read_mostly;
7171

7272
#define node_height(x) x->avl_height
73-
static struct inet_peer peer_fake_node = {
74-
.avl_left = &peer_fake_node,
75-
.avl_right = &peer_fake_node,
73+
74+
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
75+
static const struct inet_peer peer_fake_node = {
76+
.avl_left = peer_avl_empty,
77+
.avl_right = peer_avl_empty,
7678
.avl_height = 0
7779
};
78-
#define peer_avl_empty (&peer_fake_node)
79-
static struct inet_peer *peer_root = peer_avl_empty;
80-
static DEFINE_RWLOCK(peer_pool_lock);
80+
81+
static struct {
82+
struct inet_peer *root;
83+
rwlock_t lock;
84+
int total;
85+
} peers = {
86+
.root = peer_avl_empty,
87+
.lock = __RW_LOCK_UNLOCKED(peers.lock),
88+
.total = 0,
89+
};
8190
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
8291

83-
static int peer_total;
8492
/* Exported for sysctl_net_ipv4. */
8593
int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
8694
* aggressively at this stage */
@@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
8997
int inet_peer_gc_mintime __read_mostly = 10 * HZ;
9098
int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
9199

92-
static LIST_HEAD(unused_peers);
93-
static DEFINE_SPINLOCK(inet_peer_unused_lock);
100+
static struct {
101+
struct list_head list;
102+
spinlock_t lock;
103+
} unused_peers = {
104+
.list = LIST_HEAD_INIT(unused_peers.list),
105+
.lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock),
106+
};
94107

95108
static void peer_check_expire(unsigned long dummy);
96109
static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -131,9 +144,11 @@ void __init inet_initpeers(void)
131144
/* Called with or without local BH being disabled. */
132145
static void unlink_from_unused(struct inet_peer *p)
133146
{
134-
spin_lock_bh(&inet_peer_unused_lock);
135-
list_del_init(&p->unused);
136-
spin_unlock_bh(&inet_peer_unused_lock);
147+
if (!list_empty(&p->unused)) {
148+
spin_lock_bh(&unused_peers.lock);
149+
list_del_init(&p->unused);
150+
spin_unlock_bh(&unused_peers.lock);
151+
}
137152
}
138153

139154
/*
@@ -146,9 +161,9 @@ static void unlink_from_unused(struct inet_peer *p)
146161
struct inet_peer *u, **v; \
147162
if (_stack != NULL) { \
148163
stackptr = _stack; \
149-
*stackptr++ = &peer_root; \
164+
*stackptr++ = &peers.root; \
150165
} \
151-
for (u = peer_root; u != peer_avl_empty; ) { \
166+
for (u = peers.root; u != peer_avl_empty; ) { \
152167
if (_daddr == u->v4daddr) \
153168
break; \
154169
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
@@ -262,7 +277,7 @@ do { \
262277
n->avl_right = peer_avl_empty; \
263278
**--stackptr = n; \
264279
peer_avl_rebalance(stack, stackptr); \
265-
} while(0)
280+
} while (0)
266281

267282
/* May be called with local BH enabled. */
268283
static void unlink_from_pool(struct inet_peer *p)
@@ -271,7 +286,7 @@ static void unlink_from_pool(struct inet_peer *p)
271286

272287
do_free = 0;
273288

274-
write_lock_bh(&peer_pool_lock);
289+
write_lock_bh(&peers.lock);
275290
/* Check the reference counter. It was artificially incremented by 1
276291
* in cleanup() function to prevent sudden disappearing. If the
277292
* reference count is still 1 then the node is referenced only as `p'
@@ -303,10 +318,10 @@ static void unlink_from_pool(struct inet_peer *p)
303318
delp[1] = &t->avl_left; /* was &p->avl_left */
304319
}
305320
peer_avl_rebalance(stack, stackptr);
306-
peer_total--;
321+
peers.total--;
307322
do_free = 1;
308323
}
309-
write_unlock_bh(&peer_pool_lock);
324+
write_unlock_bh(&peers.lock);
310325

311326
if (do_free)
312327
kmem_cache_free(peer_cachep, p);
@@ -326,16 +341,16 @@ static int cleanup_once(unsigned long ttl)
326341
struct inet_peer *p = NULL;
327342

328343
/* Remove the first entry from the list of unused nodes. */
329-
spin_lock_bh(&inet_peer_unused_lock);
330-
if (!list_empty(&unused_peers)) {
344+
spin_lock_bh(&unused_peers.lock);
345+
if (!list_empty(&unused_peers.list)) {
331346
__u32 delta;
332347

333-
p = list_first_entry(&unused_peers, struct inet_peer, unused);
348+
p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
334349
delta = (__u32)jiffies - p->dtime;
335350

336351
if (delta < ttl) {
337352
/* Do not prune fresh entries. */
338-
spin_unlock_bh(&inet_peer_unused_lock);
353+
spin_unlock_bh(&unused_peers.lock);
339354
return -1;
340355
}
341356

@@ -345,7 +360,7 @@ static int cleanup_once(unsigned long ttl)
345360
* before unlink_from_pool() call. */
346361
atomic_inc(&p->refcnt);
347362
}
348-
spin_unlock_bh(&inet_peer_unused_lock);
363+
spin_unlock_bh(&unused_peers.lock);
349364

350365
if (p == NULL)
351366
/* It means that the total number of USED entries has
@@ -364,11 +379,11 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
364379
struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
365380

366381
/* Look up for the address quickly. */
367-
read_lock_bh(&peer_pool_lock);
382+
read_lock_bh(&peers.lock);
368383
p = lookup(daddr, NULL);
369384
if (p != peer_avl_empty)
370385
atomic_inc(&p->refcnt);
371-
read_unlock_bh(&peer_pool_lock);
386+
read_unlock_bh(&peers.lock);
372387

373388
if (p != peer_avl_empty) {
374389
/* The existing node has been found. */
@@ -390,7 +405,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
390405
atomic_set(&n->ip_id_count, secure_ip_id(daddr));
391406
n->tcp_ts_stamp = 0;
392407

393-
write_lock_bh(&peer_pool_lock);
408+
write_lock_bh(&peers.lock);
394409
/* Check if an entry has suddenly appeared. */
395410
p = lookup(daddr, stack);
396411
if (p != peer_avl_empty)
@@ -399,10 +414,10 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
399414
/* Link the node. */
400415
link_to_pool(n);
401416
INIT_LIST_HEAD(&n->unused);
402-
peer_total++;
403-
write_unlock_bh(&peer_pool_lock);
417+
peers.total++;
418+
write_unlock_bh(&peers.lock);
404419

405-
if (peer_total >= inet_peer_threshold)
420+
if (peers.total >= inet_peer_threshold)
406421
/* Remove one less-recently-used entry. */
407422
cleanup_once(0);
408423

@@ -411,7 +426,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
411426
out_free:
412427
/* The appropriate node is already in the pool. */
413428
atomic_inc(&p->refcnt);
414-
write_unlock_bh(&peer_pool_lock);
429+
write_unlock_bh(&peers.lock);
415430
/* Remove the entry from unused list if it was there. */
416431
unlink_from_unused(p);
417432
/* Free preallocated the preallocated node. */
@@ -425,12 +440,12 @@ static void peer_check_expire(unsigned long dummy)
425440
unsigned long now = jiffies;
426441
int ttl;
427442

428-
if (peer_total >= inet_peer_threshold)
443+
if (peers.total >= inet_peer_threshold)
429444
ttl = inet_peer_minttl;
430445
else
431446
ttl = inet_peer_maxttl
432447
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
433-
peer_total / inet_peer_threshold * HZ;
448+
peers.total / inet_peer_threshold * HZ;
434449
while (!cleanup_once(ttl)) {
435450
if (jiffies != now)
436451
break;
@@ -439,22 +454,25 @@ static void peer_check_expire(unsigned long dummy)
439454
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
440455
* interval depending on the total number of entries (more entries,
441456
* less interval). */
442-
if (peer_total >= inet_peer_threshold)
457+
if (peers.total >= inet_peer_threshold)
443458
peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
444459
else
445460
peer_periodic_timer.expires = jiffies
446461
+ inet_peer_gc_maxtime
447462
- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
448-
peer_total / inet_peer_threshold * HZ;
463+
peers.total / inet_peer_threshold * HZ;
449464
add_timer(&peer_periodic_timer);
450465
}
451466

452467
void inet_putpeer(struct inet_peer *p)
453468
{
454-
spin_lock_bh(&inet_peer_unused_lock);
455-
if (atomic_dec_and_test(&p->refcnt)) {
456-
list_add_tail(&p->unused, &unused_peers);
469+
local_bh_disable();
470+
471+
if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
472+
list_add_tail(&p->unused, &unused_peers.list);
457473
p->dtime = (__u32)jiffies;
474+
spin_unlock(&unused_peers.lock);
458475
}
459-
spin_unlock_bh(&inet_peer_unused_lock);
476+
477+
local_bh_enable();
460478
}

0 commit comments

Comments
 (0)