@@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
69
69
__cacheline_aligned_in_smp DEFINE_SPINLOCK (nf_conntrack_expect_lock );
70
70
EXPORT_SYMBOL_GPL (nf_conntrack_expect_lock );
71
71
72
+ struct hlist_nulls_head * nf_conntrack_hash __read_mostly ;
73
+ EXPORT_SYMBOL_GPL (nf_conntrack_hash );
74
+
72
75
static __read_mostly spinlock_t nf_conntrack_locks_all_lock ;
73
76
static __read_mostly seqcount_t nf_conntrack_generation ;
74
77
static __read_mostly bool nf_conntrack_locks_all ;
@@ -164,9 +167,9 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
164
167
tuple -> dst .protonum ));
165
168
}
166
169
167
- static u32 hash_bucket (u32 hash , const struct net * net )
170
+ static u32 scale_hash (u32 hash )
168
171
{
169
- return reciprocal_scale (hash , net -> ct . htable_size );
172
+ return reciprocal_scale (hash , nf_conntrack_htable_size );
170
173
}
171
174
172
175
static u32 __hash_conntrack (const struct net * net ,
@@ -179,7 +182,7 @@ static u32 __hash_conntrack(const struct net *net,
179
182
static u32 hash_conntrack (const struct net * net ,
180
183
const struct nf_conntrack_tuple * tuple )
181
184
{
182
- return __hash_conntrack ( net , tuple , net -> ct . htable_size );
185
+ return scale_hash ( hash_conntrack_raw ( tuple , net ) );
183
186
}
184
187
185
188
bool
@@ -478,8 +481,8 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
478
481
begin :
479
482
do {
480
483
sequence = read_seqcount_begin (& nf_conntrack_generation );
481
- bucket = hash_bucket (hash , net );
482
- ct_hash = net -> ct . hash ;
484
+ bucket = scale_hash (hash );
485
+ ct_hash = nf_conntrack_hash ;
483
486
} while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
484
487
485
488
hlist_nulls_for_each_entry_rcu (h , n , & ct_hash [bucket ], hnnode ) {
@@ -543,12 +546,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
543
546
unsigned int hash ,
544
547
unsigned int reply_hash )
545
548
{
546
- struct net * net = nf_ct_net (ct );
547
-
548
549
hlist_nulls_add_head_rcu (& ct -> tuplehash [IP_CT_DIR_ORIGINAL ].hnnode ,
549
- & net -> ct . hash [hash ]);
550
+ & nf_conntrack_hash [hash ]);
550
551
hlist_nulls_add_head_rcu (& ct -> tuplehash [IP_CT_DIR_REPLY ].hnnode ,
551
- & net -> ct . hash [reply_hash ]);
552
+ & nf_conntrack_hash [reply_hash ]);
552
553
}
553
554
554
555
int
@@ -573,12 +574,12 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
573
574
} while (nf_conntrack_double_lock (net , hash , reply_hash , sequence ));
574
575
575
576
/* See if there's one in the list already, including reverse */
576
- hlist_nulls_for_each_entry (h , n , & net -> ct . hash [hash ], hnnode )
577
+ hlist_nulls_for_each_entry (h , n , & nf_conntrack_hash [hash ], hnnode )
577
578
if (nf_ct_key_equal (h , & ct -> tuplehash [IP_CT_DIR_ORIGINAL ].tuple ,
578
579
zone , net ))
579
580
goto out ;
580
581
581
- hlist_nulls_for_each_entry (h , n , & net -> ct . hash [reply_hash ], hnnode )
582
+ hlist_nulls_for_each_entry (h , n , & nf_conntrack_hash [reply_hash ], hnnode )
582
583
if (nf_ct_key_equal (h , & ct -> tuplehash [IP_CT_DIR_REPLY ].tuple ,
583
584
zone , net ))
584
585
goto out ;
@@ -633,7 +634,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
633
634
sequence = read_seqcount_begin (& nf_conntrack_generation );
634
635
/* reuse the hash saved before */
635
636
hash = * (unsigned long * )& ct -> tuplehash [IP_CT_DIR_REPLY ].hnnode .pprev ;
636
- hash = hash_bucket (hash , net );
637
+ hash = scale_hash (hash );
637
638
reply_hash = hash_conntrack (net ,
638
639
& ct -> tuplehash [IP_CT_DIR_REPLY ].tuple );
639
640
@@ -663,12 +664,12 @@ __nf_conntrack_confirm(struct sk_buff *skb)
663
664
/* See if there's one in the list already, including reverse:
664
665
NAT could have grabbed it without realizing, since we're
665
666
not in the hash. If there is, we lost race. */
666
- hlist_nulls_for_each_entry (h , n , & net -> ct . hash [hash ], hnnode )
667
+ hlist_nulls_for_each_entry (h , n , & nf_conntrack_hash [hash ], hnnode )
667
668
if (nf_ct_key_equal (h , & ct -> tuplehash [IP_CT_DIR_ORIGINAL ].tuple ,
668
669
zone , net ))
669
670
goto out ;
670
671
671
- hlist_nulls_for_each_entry (h , n , & net -> ct . hash [reply_hash ], hnnode )
672
+ hlist_nulls_for_each_entry (h , n , & nf_conntrack_hash [reply_hash ], hnnode )
672
673
if (nf_ct_key_equal (h , & ct -> tuplehash [IP_CT_DIR_REPLY ].tuple ,
673
674
zone , net ))
674
675
goto out ;
@@ -736,7 +737,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
736
737
do {
737
738
sequence = read_seqcount_begin (& nf_conntrack_generation );
738
739
hash = hash_conntrack (net , tuple );
739
- ct_hash = net -> ct . hash ;
740
+ ct_hash = nf_conntrack_hash ;
740
741
} while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
741
742
742
743
hlist_nulls_for_each_entry_rcu (h , n , & ct_hash [hash ], hnnode ) {
@@ -773,16 +774,16 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
773
774
local_bh_disable ();
774
775
restart :
775
776
sequence = read_seqcount_begin (& nf_conntrack_generation );
776
- hash = hash_bucket (_hash , net );
777
- for (; i < net -> ct . htable_size ; i ++ ) {
777
+ hash = scale_hash (_hash );
778
+ for (; i < nf_conntrack_htable_size ; i ++ ) {
778
779
lockp = & nf_conntrack_locks [hash % CONNTRACK_LOCKS ];
779
780
nf_conntrack_lock (lockp );
780
781
if (read_seqcount_retry (& nf_conntrack_generation , sequence )) {
781
782
spin_unlock (lockp );
782
783
goto restart ;
783
784
}
784
- hlist_nulls_for_each_entry_rcu (h , n , & net -> ct . hash [hash ],
785
- hnnode ) {
785
+ hlist_nulls_for_each_entry_rcu (h , n , & nf_conntrack_hash [hash ],
786
+ hnnode ) {
786
787
tmp = nf_ct_tuplehash_to_ctrack (h );
787
788
if (!test_bit (IPS_ASSURED_BIT , & tmp -> status ) &&
788
789
!nf_ct_is_dying (tmp ) &&
@@ -793,7 +794,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
793
794
cnt ++ ;
794
795
}
795
796
796
- hash = (hash + 1 ) % net -> ct . htable_size ;
797
+ hash = (hash + 1 ) % nf_conntrack_htable_size ;
797
798
spin_unlock (lockp );
798
799
799
800
if (ct || cnt >= NF_CT_EVICTION_RANGE )
@@ -1376,12 +1377,12 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1376
1377
int cpu ;
1377
1378
spinlock_t * lockp ;
1378
1379
1379
- for (; * bucket < net -> ct . htable_size ; (* bucket )++ ) {
1380
+ for (; * bucket < nf_conntrack_htable_size ; (* bucket )++ ) {
1380
1381
lockp = & nf_conntrack_locks [* bucket % CONNTRACK_LOCKS ];
1381
1382
local_bh_disable ();
1382
1383
nf_conntrack_lock (lockp );
1383
- if (* bucket < net -> ct . htable_size ) {
1384
- hlist_nulls_for_each_entry (h , n , & net -> ct . hash [* bucket ], hnnode ) {
1384
+ if (* bucket < nf_conntrack_htable_size ) {
1385
+ hlist_nulls_for_each_entry (h , n , & nf_conntrack_hash [* bucket ], hnnode ) {
1385
1386
if (NF_CT_DIRECTION (h ) != IP_CT_DIR_ORIGINAL )
1386
1387
continue ;
1387
1388
ct = nf_ct_tuplehash_to_ctrack (h );
@@ -1478,6 +1479,8 @@ void nf_conntrack_cleanup_end(void)
1478
1479
while (untrack_refs () > 0 )
1479
1480
schedule ();
1480
1481
1482
+ nf_ct_free_hashtable (nf_conntrack_hash , nf_conntrack_htable_size );
1483
+
1481
1484
#ifdef CONFIG_NF_CONNTRACK_ZONES
1482
1485
nf_ct_extend_unregister (& nf_ct_zone_extend );
1483
1486
#endif
@@ -1528,7 +1531,6 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1528
1531
}
1529
1532
1530
1533
list_for_each_entry (net , net_exit_list , exit_list ) {
1531
- nf_ct_free_hashtable (net -> ct .hash , net -> ct .htable_size );
1532
1534
nf_conntrack_proto_pernet_fini (net );
1533
1535
nf_conntrack_helper_pernet_fini (net );
1534
1536
nf_conntrack_ecache_pernet_fini (net );
@@ -1599,22 +1601,22 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1599
1601
* though since that required taking the locks.
1600
1602
*/
1601
1603
1602
- for (i = 0 ; i < init_net . ct . htable_size ; i ++ ) {
1603
- while (!hlist_nulls_empty (& init_net . ct . hash [i ])) {
1604
- h = hlist_nulls_entry (init_net . ct . hash [i ].first ,
1605
- struct nf_conntrack_tuple_hash , hnnode );
1604
+ for (i = 0 ; i < nf_conntrack_htable_size ; i ++ ) {
1605
+ while (!hlist_nulls_empty (& nf_conntrack_hash [i ])) {
1606
+ h = hlist_nulls_entry (nf_conntrack_hash [i ].first ,
1607
+ struct nf_conntrack_tuple_hash , hnnode );
1606
1608
ct = nf_ct_tuplehash_to_ctrack (h );
1607
1609
hlist_nulls_del_rcu (& h -> hnnode );
1608
1610
bucket = __hash_conntrack (nf_ct_net (ct ),
1609
1611
& h -> tuple , hashsize );
1610
1612
hlist_nulls_add_head_rcu (& h -> hnnode , & hash [bucket ]);
1611
1613
}
1612
1614
}
1613
- old_size = init_net . ct . htable_size ;
1614
- old_hash = init_net . ct . hash ;
1615
+ old_size = nf_conntrack_htable_size ;
1616
+ old_hash = nf_conntrack_hash ;
1615
1617
1616
- init_net . ct . htable_size = nf_conntrack_htable_size = hashsize ;
1617
- init_net . ct . hash = hash ;
1618
+ nf_conntrack_hash = hash ;
1619
+ nf_conntrack_htable_size = hashsize ;
1618
1620
1619
1621
write_seqcount_end (& nf_conntrack_generation );
1620
1622
nf_conntrack_all_unlock ();
@@ -1670,6 +1672,11 @@ int nf_conntrack_init_start(void)
1670
1672
* entries. */
1671
1673
max_factor = 4 ;
1672
1674
}
1675
+
1676
+ nf_conntrack_hash = nf_ct_alloc_hashtable (& nf_conntrack_htable_size , 1 );
1677
+ if (!nf_conntrack_hash )
1678
+ return - ENOMEM ;
1679
+
1673
1680
nf_conntrack_max = max_factor * nf_conntrack_htable_size ;
1674
1681
1675
1682
printk (KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n" ,
@@ -1748,6 +1755,7 @@ int nf_conntrack_init_start(void)
1748
1755
err_acct :
1749
1756
nf_conntrack_expect_fini ();
1750
1757
err_expect :
1758
+ nf_ct_free_hashtable (nf_conntrack_hash , nf_conntrack_htable_size );
1751
1759
return ret ;
1752
1760
}
1753
1761
@@ -1800,12 +1808,6 @@ int nf_conntrack_init_net(struct net *net)
1800
1808
goto err_cache ;
1801
1809
}
1802
1810
1803
- net -> ct .htable_size = nf_conntrack_htable_size ;
1804
- net -> ct .hash = nf_ct_alloc_hashtable (& net -> ct .htable_size , 1 );
1805
- if (!net -> ct .hash ) {
1806
- printk (KERN_ERR "Unable to create nf_conntrack_hash\n" );
1807
- goto err_hash ;
1808
- }
1809
1811
ret = nf_conntrack_expect_pernet_init (net );
1810
1812
if (ret < 0 )
1811
1813
goto err_expect ;
@@ -1837,8 +1839,6 @@ int nf_conntrack_init_net(struct net *net)
1837
1839
err_acct :
1838
1840
nf_conntrack_expect_pernet_fini (net );
1839
1841
err_expect :
1840
- nf_ct_free_hashtable (net -> ct .hash , net -> ct .htable_size );
1841
- err_hash :
1842
1842
kmem_cache_destroy (net -> ct .nf_conntrack_cachep );
1843
1843
err_cache :
1844
1844
kfree (net -> ct .slabname );
0 commit comments