70
70
static struct kmem_cache * peer_cachep __read_mostly ;
71
71
72
72
#define node_height (x ) x->avl_height
73
- static struct inet_peer peer_fake_node = {
74
- .avl_left = & peer_fake_node ,
75
- .avl_right = & peer_fake_node ,
73
+
74
+ #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
75
+ static const struct inet_peer peer_fake_node = {
76
+ .avl_left = peer_avl_empty ,
77
+ .avl_right = peer_avl_empty ,
76
78
.avl_height = 0
77
79
};
78
- #define peer_avl_empty (&peer_fake_node)
79
- static struct inet_peer * peer_root = peer_avl_empty ;
80
- static DEFINE_RWLOCK (peer_pool_lock );
80
+
81
+ static struct {
82
+ struct inet_peer * root ;
83
+ rwlock_t lock ;
84
+ int total ;
85
+ } peers = {
86
+ .root = peer_avl_empty ,
87
+ .lock = __RW_LOCK_UNLOCKED (peers .lock ),
88
+ .total = 0 ,
89
+ };
81
90
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
82
91
83
- static int peer_total ;
84
92
/* Exported for sysctl_net_ipv4. */
85
93
int inet_peer_threshold __read_mostly = 65536 + 128 ; /* start to throw entries more
86
94
* aggressively at this stage */
@@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
89
97
int inet_peer_gc_mintime __read_mostly = 10 * HZ ;
90
98
int inet_peer_gc_maxtime __read_mostly = 120 * HZ ;
91
99
92
- static LIST_HEAD (unused_peers );
93
- static DEFINE_SPINLOCK (inet_peer_unused_lock );
100
+ static struct {
101
+ struct list_head list ;
102
+ spinlock_t lock ;
103
+ } unused_peers = {
104
+ .list = LIST_HEAD_INIT (unused_peers .list ),
105
+ .lock = __SPIN_LOCK_UNLOCKED (unused_peers .lock ),
106
+ };
94
107
95
108
static void peer_check_expire (unsigned long dummy );
96
109
static DEFINE_TIMER (peer_periodic_timer , peer_check_expire , 0 , 0 );
@@ -131,9 +144,11 @@ void __init inet_initpeers(void)
131
144
/* Called with or without local BH being disabled. */
132
145
static void unlink_from_unused (struct inet_peer * p )
133
146
{
134
- spin_lock_bh (& inet_peer_unused_lock );
135
- list_del_init (& p -> unused );
136
- spin_unlock_bh (& inet_peer_unused_lock );
147
+ if (!list_empty (& p -> unused )) {
148
+ spin_lock_bh (& unused_peers .lock );
149
+ list_del_init (& p -> unused );
150
+ spin_unlock_bh (& unused_peers .lock );
151
+ }
137
152
}
138
153
139
154
/*
@@ -146,9 +161,9 @@ static void unlink_from_unused(struct inet_peer *p)
146
161
struct inet_peer *u, **v; \
147
162
if (_stack != NULL) { \
148
163
stackptr = _stack; \
149
- *stackptr++ = &peer_root ; \
164
+ *stackptr++ = &peers.root ; \
150
165
} \
151
- for (u = peer_root ; u != peer_avl_empty; ) { \
166
+ for (u = peers.root ; u != peer_avl_empty; ) { \
152
167
if (_daddr == u->v4daddr) \
153
168
break; \
154
169
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
@@ -262,7 +277,7 @@ do { \
262
277
n->avl_right = peer_avl_empty; \
263
278
**--stackptr = n; \
264
279
peer_avl_rebalance(stack, stackptr); \
265
- } while(0)
280
+ } while (0)
266
281
267
282
/* May be called with local BH enabled. */
268
283
static void unlink_from_pool (struct inet_peer * p )
@@ -271,7 +286,7 @@ static void unlink_from_pool(struct inet_peer *p)
271
286
272
287
do_free = 0 ;
273
288
274
- write_lock_bh (& peer_pool_lock );
289
+ write_lock_bh (& peers . lock );
275
290
/* Check the reference counter. It was artificially incremented by 1
276
291
* in cleanup() function to prevent sudden disappearing. If the
277
292
* reference count is still 1 then the node is referenced only as `p'
@@ -303,10 +318,10 @@ static void unlink_from_pool(struct inet_peer *p)
303
318
delp [1 ] = & t -> avl_left ; /* was &p->avl_left */
304
319
}
305
320
peer_avl_rebalance (stack , stackptr );
306
- peer_total -- ;
321
+ peers . total -- ;
307
322
do_free = 1 ;
308
323
}
309
- write_unlock_bh (& peer_pool_lock );
324
+ write_unlock_bh (& peers . lock );
310
325
311
326
if (do_free )
312
327
kmem_cache_free (peer_cachep , p );
@@ -326,16 +341,16 @@ static int cleanup_once(unsigned long ttl)
326
341
struct inet_peer * p = NULL ;
327
342
328
343
/* Remove the first entry from the list of unused nodes. */
329
- spin_lock_bh (& inet_peer_unused_lock );
330
- if (!list_empty (& unused_peers )) {
344
+ spin_lock_bh (& unused_peers . lock );
345
+ if (!list_empty (& unused_peers . list )) {
331
346
__u32 delta ;
332
347
333
- p = list_first_entry (& unused_peers , struct inet_peer , unused );
348
+ p = list_first_entry (& unused_peers . list , struct inet_peer , unused );
334
349
delta = (__u32 )jiffies - p -> dtime ;
335
350
336
351
if (delta < ttl ) {
337
352
/* Do not prune fresh entries. */
338
- spin_unlock_bh (& inet_peer_unused_lock );
353
+ spin_unlock_bh (& unused_peers . lock );
339
354
return -1 ;
340
355
}
341
356
@@ -345,7 +360,7 @@ static int cleanup_once(unsigned long ttl)
345
360
* before unlink_from_pool() call. */
346
361
atomic_inc (& p -> refcnt );
347
362
}
348
- spin_unlock_bh (& inet_peer_unused_lock );
363
+ spin_unlock_bh (& unused_peers . lock );
349
364
350
365
if (p == NULL )
351
366
/* It means that the total number of USED entries has
@@ -364,11 +379,11 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
364
379
struct inet_peer * * stack [PEER_MAXDEPTH ], * * * stackptr ;
365
380
366
381
/* Look up for the address quickly. */
367
- read_lock_bh (& peer_pool_lock );
382
+ read_lock_bh (& peers . lock );
368
383
p = lookup (daddr , NULL );
369
384
if (p != peer_avl_empty )
370
385
atomic_inc (& p -> refcnt );
371
- read_unlock_bh (& peer_pool_lock );
386
+ read_unlock_bh (& peers . lock );
372
387
373
388
if (p != peer_avl_empty ) {
374
389
/* The existing node has been found. */
@@ -390,7 +405,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
390
405
atomic_set (& n -> ip_id_count , secure_ip_id (daddr ));
391
406
n -> tcp_ts_stamp = 0 ;
392
407
393
- write_lock_bh (& peer_pool_lock );
408
+ write_lock_bh (& peers . lock );
394
409
/* Check if an entry has suddenly appeared. */
395
410
p = lookup (daddr , stack );
396
411
if (p != peer_avl_empty )
@@ -399,10 +414,10 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
399
414
/* Link the node. */
400
415
link_to_pool (n );
401
416
INIT_LIST_HEAD (& n -> unused );
402
- peer_total ++ ;
403
- write_unlock_bh (& peer_pool_lock );
417
+ peers . total ++ ;
418
+ write_unlock_bh (& peers . lock );
404
419
405
- if (peer_total >= inet_peer_threshold )
420
+ if (peers . total >= inet_peer_threshold )
406
421
/* Remove one less-recently-used entry. */
407
422
cleanup_once (0 );
408
423
@@ -411,7 +426,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
411
426
out_free :
412
427
/* The appropriate node is already in the pool. */
413
428
atomic_inc (& p -> refcnt );
414
- write_unlock_bh (& peer_pool_lock );
429
+ write_unlock_bh (& peers . lock );
415
430
/* Remove the entry from unused list if it was there. */
416
431
unlink_from_unused (p );
417
432
/* Free preallocated the preallocated node. */
@@ -425,12 +440,12 @@ static void peer_check_expire(unsigned long dummy)
425
440
unsigned long now = jiffies ;
426
441
int ttl ;
427
442
428
- if (peer_total >= inet_peer_threshold )
443
+ if (peers . total >= inet_peer_threshold )
429
444
ttl = inet_peer_minttl ;
430
445
else
431
446
ttl = inet_peer_maxttl
432
447
- (inet_peer_maxttl - inet_peer_minttl ) / HZ *
433
- peer_total / inet_peer_threshold * HZ ;
448
+ peers . total / inet_peer_threshold * HZ ;
434
449
while (!cleanup_once (ttl )) {
435
450
if (jiffies != now )
436
451
break ;
@@ -439,22 +454,25 @@ static void peer_check_expire(unsigned long dummy)
439
454
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
440
455
* interval depending on the total number of entries (more entries,
441
456
* less interval). */
442
- if (peer_total >= inet_peer_threshold )
457
+ if (peers . total >= inet_peer_threshold )
443
458
peer_periodic_timer .expires = jiffies + inet_peer_gc_mintime ;
444
459
else
445
460
peer_periodic_timer .expires = jiffies
446
461
+ inet_peer_gc_maxtime
447
462
- (inet_peer_gc_maxtime - inet_peer_gc_mintime ) / HZ *
448
- peer_total / inet_peer_threshold * HZ ;
463
+ peers . total / inet_peer_threshold * HZ ;
449
464
add_timer (& peer_periodic_timer );
450
465
}
451
466
452
467
void inet_putpeer (struct inet_peer * p )
453
468
{
454
- spin_lock_bh (& inet_peer_unused_lock );
455
- if (atomic_dec_and_test (& p -> refcnt )) {
456
- list_add_tail (& p -> unused , & unused_peers );
469
+ local_bh_disable ();
470
+
471
+ if (atomic_dec_and_lock (& p -> refcnt , & unused_peers .lock )) {
472
+ list_add_tail (& p -> unused , & unused_peers .list );
457
473
p -> dtime = (__u32 )jiffies ;
474
+ spin_unlock (& unused_peers .lock );
458
475
}
459
- spin_unlock_bh (& inet_peer_unused_lock );
476
+
477
+ local_bh_enable ();
460
478
}
0 commit comments