@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
59
59
return NULL ;
60
60
61
61
INIT_HLIST_HEAD (& newtbl -> known_gates );
62
+ INIT_HLIST_HEAD (& newtbl -> walk_head );
62
63
atomic_set (& newtbl -> entries , 0 );
63
64
spin_lock_init (& newtbl -> gates_lock );
65
+ spin_lock_init (& newtbl -> walk_lock );
64
66
65
67
return newtbl ;
66
68
}
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
249
251
static struct mesh_path *
250
252
__mesh_path_lookup_by_idx (struct mesh_table * tbl , int idx )
251
253
{
252
- int i = 0 , ret ;
253
- struct mesh_path * mpath = NULL ;
254
- struct rhashtable_iter iter ;
255
-
256
- ret = rhashtable_walk_init (& tbl -> rhead , & iter , GFP_ATOMIC );
257
- if (ret )
258
- return NULL ;
259
-
260
- rhashtable_walk_start (& iter );
254
+ int i = 0 ;
255
+ struct mesh_path * mpath ;
261
256
262
- while ((mpath = rhashtable_walk_next (& iter ))) {
263
- if (IS_ERR (mpath ) && PTR_ERR (mpath ) == - EAGAIN )
264
- continue ;
265
- if (IS_ERR (mpath ))
266
- break ;
257
+ hlist_for_each_entry_rcu (mpath , & tbl -> walk_head , walk_list ) {
267
258
if (i ++ == idx )
268
259
break ;
269
260
}
270
- rhashtable_walk_stop (& iter );
271
- rhashtable_walk_exit (& iter );
272
261
273
- if (IS_ERR ( mpath ) || !mpath )
262
+ if (!mpath )
274
263
return NULL ;
275
264
276
265
if (mpath_expired (mpath )) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
432
421
return ERR_PTR (- ENOMEM );
433
422
434
423
tbl = sdata -> u .mesh .mesh_paths ;
424
+ spin_lock_bh (& tbl -> walk_lock );
435
425
do {
436
426
ret = rhashtable_lookup_insert_fast (& tbl -> rhead ,
437
427
& new_mpath -> rhash ,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
441
431
mpath = rhashtable_lookup_fast (& tbl -> rhead ,
442
432
dst ,
443
433
mesh_rht_params );
444
-
434
+ else if (!ret )
435
+ hlist_add_head (& new_mpath -> walk_list , & tbl -> walk_head );
445
436
} while (unlikely (ret == - EEXIST && !mpath ));
437
+ spin_unlock_bh (& tbl -> walk_lock );
446
438
447
- if (ret && ret != - EEXIST )
448
- return ERR_PTR (ret );
449
-
450
- /* At this point either new_mpath was added, or we found a
451
- * matching entry already in the table; in the latter case
452
- * free the unnecessary new entry.
453
- */
454
- if (ret == - EEXIST ) {
439
+ if (ret ) {
455
440
kfree (new_mpath );
441
+
442
+ if (ret != - EEXIST )
443
+ return ERR_PTR (ret );
444
+
456
445
new_mpath = mpath ;
457
446
}
447
+
458
448
sdata -> u .mesh .mesh_paths_generation ++ ;
459
449
return new_mpath ;
460
450
}
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
480
470
481
471
memcpy (new_mpath -> mpp , mpp , ETH_ALEN );
482
472
tbl = sdata -> u .mesh .mpp_paths ;
473
+
474
+ spin_lock_bh (& tbl -> walk_lock );
483
475
ret = rhashtable_lookup_insert_fast (& tbl -> rhead ,
484
476
& new_mpath -> rhash ,
485
477
mesh_rht_params );
478
+ if (!ret )
479
+ hlist_add_head_rcu (& new_mpath -> walk_list , & tbl -> walk_head );
480
+ spin_unlock_bh (& tbl -> walk_lock );
481
+
482
+ if (ret )
483
+ kfree (new_mpath );
486
484
487
485
sdata -> u .mesh .mpp_paths_generation ++ ;
488
486
return ret ;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
503
501
struct mesh_table * tbl = sdata -> u .mesh .mesh_paths ;
504
502
static const u8 bcast [ETH_ALEN ] = {0xff , 0xff , 0xff , 0xff , 0xff , 0xff };
505
503
struct mesh_path * mpath ;
506
- struct rhashtable_iter iter ;
507
- int ret ;
508
-
509
- ret = rhashtable_walk_init (& tbl -> rhead , & iter , GFP_ATOMIC );
510
- if (ret )
511
- return ;
512
504
513
- rhashtable_walk_start (& iter );
514
-
515
- while ((mpath = rhashtable_walk_next (& iter ))) {
516
- if (IS_ERR (mpath ) && PTR_ERR (mpath ) == - EAGAIN )
517
- continue ;
518
- if (IS_ERR (mpath ))
519
- break ;
505
+ rcu_read_lock ();
506
+ hlist_for_each_entry_rcu (mpath , & tbl -> walk_head , walk_list ) {
520
507
if (rcu_access_pointer (mpath -> next_hop ) == sta &&
521
508
mpath -> flags & MESH_PATH_ACTIVE &&
522
509
!(mpath -> flags & MESH_PATH_FIXED )) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
530
517
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE , bcast );
531
518
}
532
519
}
533
- rhashtable_walk_stop (& iter );
534
- rhashtable_walk_exit (& iter );
520
+ rcu_read_unlock ();
535
521
}
536
522
537
523
static void mesh_path_free_rcu (struct mesh_table * tbl ,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
551
537
552
538
static void __mesh_path_del (struct mesh_table * tbl , struct mesh_path * mpath )
553
539
{
540
+ hlist_del_rcu (& mpath -> walk_list );
554
541
rhashtable_remove_fast (& tbl -> rhead , & mpath -> rhash , mesh_rht_params );
555
542
mesh_path_free_rcu (tbl , mpath );
556
543
}
@@ -571,79 +558,41 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
571
558
struct ieee80211_sub_if_data * sdata = sta -> sdata ;
572
559
struct mesh_table * tbl = sdata -> u .mesh .mesh_paths ;
573
560
struct mesh_path * mpath ;
574
- struct rhashtable_iter iter ;
575
- int ret ;
576
-
577
- ret = rhashtable_walk_init (& tbl -> rhead , & iter , GFP_ATOMIC );
578
- if (ret )
579
- return ;
580
-
581
- rhashtable_walk_start (& iter );
582
-
583
- while ((mpath = rhashtable_walk_next (& iter ))) {
584
- if (IS_ERR (mpath ) && PTR_ERR (mpath ) == - EAGAIN )
585
- continue ;
586
- if (IS_ERR (mpath ))
587
- break ;
561
+ struct hlist_node * n ;
588
562
563
+ spin_lock_bh (& tbl -> walk_lock );
564
+ hlist_for_each_entry_safe (mpath , n , & tbl -> walk_head , walk_list ) {
589
565
if (rcu_access_pointer (mpath -> next_hop ) == sta )
590
566
__mesh_path_del (tbl , mpath );
591
567
}
592
-
593
- rhashtable_walk_stop (& iter );
594
- rhashtable_walk_exit (& iter );
568
+ spin_unlock_bh (& tbl -> walk_lock );
595
569
}
596
570
597
571
static void mpp_flush_by_proxy (struct ieee80211_sub_if_data * sdata ,
598
572
const u8 * proxy )
599
573
{
600
574
struct mesh_table * tbl = sdata -> u .mesh .mpp_paths ;
601
575
struct mesh_path * mpath ;
602
- struct rhashtable_iter iter ;
603
- int ret ;
604
-
605
- ret = rhashtable_walk_init (& tbl -> rhead , & iter , GFP_ATOMIC );
606
- if (ret )
607
- return ;
608
-
609
- rhashtable_walk_start (& iter );
610
-
611
- while ((mpath = rhashtable_walk_next (& iter ))) {
612
- if (IS_ERR (mpath ) && PTR_ERR (mpath ) == - EAGAIN )
613
- continue ;
614
- if (IS_ERR (mpath ))
615
- break ;
576
+ struct hlist_node * n ;
616
577
578
+ spin_lock_bh (& tbl -> walk_lock );
579
+ hlist_for_each_entry_safe (mpath , n , & tbl -> walk_head , walk_list ) {
617
580
if (ether_addr_equal (mpath -> mpp , proxy ))
618
581
__mesh_path_del (tbl , mpath );
619
582
}
620
-
621
- rhashtable_walk_stop (& iter );
622
- rhashtable_walk_exit (& iter );
583
+ spin_unlock_bh (& tbl -> walk_lock );
623
584
}
624
585
625
586
static void table_flush_by_iface (struct mesh_table * tbl )
626
587
{
627
588
struct mesh_path * mpath ;
628
- struct rhashtable_iter iter ;
629
- int ret ;
630
-
631
- ret = rhashtable_walk_init (& tbl -> rhead , & iter , GFP_ATOMIC );
632
- if (ret )
633
- return ;
634
-
635
- rhashtable_walk_start (& iter );
589
+ struct hlist_node * n ;
636
590
637
- while ((mpath = rhashtable_walk_next (& iter ))) {
638
- if (IS_ERR (mpath ) && PTR_ERR (mpath ) == - EAGAIN )
639
- continue ;
640
- if (IS_ERR (mpath ))
641
- break ;
591
+ spin_lock_bh (& tbl -> walk_lock );
592
+ hlist_for_each_entry_safe (mpath , n , & tbl -> walk_head , walk_list ) {
642
593
__mesh_path_del (tbl , mpath );
643
594
}
644
-
645
- rhashtable_walk_stop (& iter );
646
- rhashtable_walk_exit (& iter );
595
+ spin_unlock_bh (& tbl -> walk_lock );
647
596
}
648
597
649
598
/**
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
675
624
{
676
625
struct mesh_path * mpath ;
677
626
678
- rcu_read_lock ( );
627
+ spin_lock_bh ( & tbl -> walk_lock );
679
628
mpath = rhashtable_lookup_fast (& tbl -> rhead , addr , mesh_rht_params );
680
629
if (!mpath ) {
681
630
rcu_read_unlock ();
682
631
return - ENXIO ;
683
632
}
684
633
685
634
__mesh_path_del (tbl , mpath );
686
- rcu_read_unlock ( );
635
+ spin_unlock_bh ( & tbl -> walk_lock );
687
636
return 0 ;
688
637
}
689
638
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
854
803
struct mesh_table * tbl )
855
804
{
856
805
struct mesh_path * mpath ;
857
- struct rhashtable_iter iter ;
858
- int ret ;
806
+ struct hlist_node * n ;
859
807
860
- ret = rhashtable_walk_init (& tbl -> rhead , & iter , GFP_KERNEL );
861
- if (ret )
862
- return ;
863
-
864
- rhashtable_walk_start (& iter );
865
-
866
- while ((mpath = rhashtable_walk_next (& iter ))) {
867
- if (IS_ERR (mpath ) && PTR_ERR (mpath ) == - EAGAIN )
868
- continue ;
869
- if (IS_ERR (mpath ))
870
- break ;
808
+ spin_lock_bh (& tbl -> walk_lock );
809
+ hlist_for_each_entry_safe (mpath , n , & tbl -> walk_head , walk_list ) {
871
810
if ((!(mpath -> flags & MESH_PATH_RESOLVING )) &&
872
811
(!(mpath -> flags & MESH_PATH_FIXED )) &&
873
812
time_after (jiffies , mpath -> exp_time + MESH_PATH_EXPIRE ))
874
813
__mesh_path_del (tbl , mpath );
875
814
}
876
-
877
- rhashtable_walk_stop (& iter );
878
- rhashtable_walk_exit (& iter );
815
+ spin_unlock_bh (& tbl -> walk_lock );
879
816
}
880
817
881
818
void mesh_path_expire (struct ieee80211_sub_if_data * sdata )
0 commit comments