@@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup(
531
531
(((unsigned int )(dev_bytenr >> 16 )) ^
532
532
((unsigned int )((uintptr_t )bdev ))) &
533
533
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1 );
534
- struct list_head * elem ;
535
-
536
- list_for_each (elem , h -> table + hashval ) {
537
- struct btrfsic_block * const b =
538
- list_entry (elem , struct btrfsic_block ,
539
- collision_resolving_node );
534
+ struct btrfsic_block * b ;
540
535
536
+ list_for_each_entry (b , h -> table + hashval , collision_resolving_node ) {
541
537
if (b -> dev_state -> bdev == bdev && b -> dev_bytenr == dev_bytenr )
542
538
return b ;
543
539
}
@@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
588
584
((unsigned int )((uintptr_t )bdev_ref_to )) ^
589
585
((unsigned int )((uintptr_t )bdev_ref_from ))) &
590
586
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1 );
591
- struct list_head * elem ;
592
-
593
- list_for_each (elem , h -> table + hashval ) {
594
- struct btrfsic_block_link * const l =
595
- list_entry (elem , struct btrfsic_block_link ,
596
- collision_resolving_node );
587
+ struct btrfsic_block_link * l ;
597
588
589
+ list_for_each_entry (l , h -> table + hashval , collision_resolving_node ) {
598
590
BUG_ON (NULL == l -> block_ref_to );
599
591
BUG_ON (NULL == l -> block_ref_from );
600
592
if (l -> block_ref_to -> dev_state -> bdev == bdev_ref_to &&
@@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
639
631
const unsigned int hashval =
640
632
(((unsigned int )((uintptr_t )bdev )) &
641
633
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1 ));
642
- struct list_head * elem ;
643
-
644
- list_for_each (elem , h -> table + hashval ) {
645
- struct btrfsic_dev_state * const ds =
646
- list_entry (elem , struct btrfsic_dev_state ,
647
- collision_resolving_node );
634
+ struct btrfsic_dev_state * ds ;
648
635
636
+ list_for_each_entry (ds , h -> table + hashval , collision_resolving_node ) {
649
637
if (ds -> bdev == bdev )
650
638
return ds ;
651
639
}
@@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1720
1708
1721
1709
static void btrfsic_dump_database (struct btrfsic_state * state )
1722
1710
{
1723
- struct list_head * elem_all ;
1711
+ const struct btrfsic_block * b_all ;
1724
1712
1725
1713
BUG_ON (NULL == state );
1726
1714
1727
1715
printk (KERN_INFO "all_blocks_list:\n" );
1728
- list_for_each (elem_all , & state -> all_blocks_list ) {
1729
- const struct btrfsic_block * const b_all =
1730
- list_entry (elem_all , struct btrfsic_block ,
1731
- all_blocks_node );
1732
- struct list_head * elem_ref_to ;
1733
- struct list_head * elem_ref_from ;
1716
+ list_for_each_entry (b_all , & state -> all_blocks_list , all_blocks_node ) {
1717
+ const struct btrfsic_block_link * l ;
1734
1718
1735
1719
printk (KERN_INFO "%c-block @%llu (%s/%llu/%d)\n" ,
1736
1720
btrfsic_get_block_type (state , b_all ),
1737
1721
b_all -> logical_bytenr , b_all -> dev_state -> name ,
1738
1722
b_all -> dev_bytenr , b_all -> mirror_num );
1739
1723
1740
- list_for_each (elem_ref_to , & b_all -> ref_to_list ) {
1741
- const struct btrfsic_block_link * const l =
1742
- list_entry (elem_ref_to ,
1743
- struct btrfsic_block_link ,
1744
- node_ref_to );
1745
-
1724
+ list_for_each_entry (l , & b_all -> ref_to_list , node_ref_to ) {
1746
1725
printk (KERN_INFO " %c @%llu (%s/%llu/%d)"
1747
1726
" refers %u* to"
1748
1727
" %c @%llu (%s/%llu/%d)\n" ,
@@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
1757
1736
l -> block_ref_to -> mirror_num );
1758
1737
}
1759
1738
1760
- list_for_each (elem_ref_from , & b_all -> ref_from_list ) {
1761
- const struct btrfsic_block_link * const l =
1762
- list_entry (elem_ref_from ,
1763
- struct btrfsic_block_link ,
1764
- node_ref_from );
1765
-
1739
+ list_for_each_entry (l , & b_all -> ref_from_list , node_ref_from ) {
1766
1740
printk (KERN_INFO " %c @%llu (%s/%llu/%d)"
1767
1741
" is ref %u* from"
1768
1742
" %c @%llu (%s/%llu/%d)\n" ,
@@ -1845,8 +1819,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1845
1819
& state -> block_hashtable );
1846
1820
if (NULL != block ) {
1847
1821
u64 bytenr = 0 ;
1848
- struct list_head * elem_ref_to ;
1849
- struct list_head * tmp_ref_to ;
1822
+ struct btrfsic_block_link * l , * tmp ;
1850
1823
1851
1824
if (block -> is_superblock ) {
1852
1825
bytenr = btrfs_super_bytenr ((struct btrfs_super_block * )
@@ -1967,13 +1940,8 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1967
1940
* because it still carries valueable information
1968
1941
* like whether it was ever written and IO completed.
1969
1942
*/
1970
- list_for_each_safe (elem_ref_to , tmp_ref_to ,
1971
- & block -> ref_to_list ) {
1972
- struct btrfsic_block_link * const l =
1973
- list_entry (elem_ref_to ,
1974
- struct btrfsic_block_link ,
1975
- node_ref_to );
1976
-
1943
+ list_for_each_entry_safe (l , tmp , & block -> ref_to_list ,
1944
+ node_ref_to ) {
1977
1945
if (state -> print_mask & BTRFSIC_PRINT_MASK_VERBOSE )
1978
1946
btrfsic_print_rem_link (state , l );
1979
1947
l -> ref_cnt -- ;
@@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2436
2404
struct btrfsic_block * const block ,
2437
2405
int recursion_level )
2438
2406
{
2439
- struct list_head * elem_ref_to ;
2407
+ const struct btrfsic_block_link * l ;
2440
2408
int ret = 0 ;
2441
2409
2442
2410
if (recursion_level >= 3 + BTRFS_MAX_LEVEL ) {
@@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
2464
2432
* This algorithm is recursive because the amount of used stack
2465
2433
* space is very small and the max recursion depth is limited.
2466
2434
*/
2467
- list_for_each (elem_ref_to , & block -> ref_to_list ) {
2468
- const struct btrfsic_block_link * const l =
2469
- list_entry (elem_ref_to , struct btrfsic_block_link ,
2470
- node_ref_to );
2471
-
2435
+ list_for_each_entry (l , & block -> ref_to_list , node_ref_to ) {
2472
2436
if (state -> print_mask & BTRFSIC_PRINT_MASK_VERBOSE )
2473
2437
printk (KERN_INFO
2474
2438
"rl=%d, %c @%llu (%s/%llu/%d)"
@@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock(
2561
2525
const struct btrfsic_block * block ,
2562
2526
int recursion_level )
2563
2527
{
2564
- struct list_head * elem_ref_from ;
2528
+ const struct btrfsic_block_link * l ;
2565
2529
2566
2530
if (recursion_level >= 3 + BTRFS_MAX_LEVEL ) {
2567
2531
/* refer to comment at "abort cyclic linkage (case 1)" */
@@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock(
2576
2540
* This algorithm is recursive because the amount of used stack space
2577
2541
* is very small and the max recursion depth is limited.
2578
2542
*/
2579
- list_for_each (elem_ref_from , & block -> ref_from_list ) {
2580
- const struct btrfsic_block_link * const l =
2581
- list_entry (elem_ref_from , struct btrfsic_block_link ,
2582
- node_ref_from );
2583
-
2543
+ list_for_each_entry (l , & block -> ref_from_list , node_ref_from ) {
2584
2544
if (state -> print_mask & BTRFSIC_PRINT_MASK_VERBOSE )
2585
2545
printk (KERN_INFO
2586
2546
"rl=%d, %c @%llu (%s/%llu/%d)"
@@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
2669
2629
const struct btrfsic_block * block ,
2670
2630
int indent_level )
2671
2631
{
2672
- struct list_head * elem_ref_to ;
2632
+ const struct btrfsic_block_link * l ;
2673
2633
int indent_add ;
2674
2634
static char buf [80 ];
2675
2635
int cursor_position ;
@@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
2704
2664
}
2705
2665
2706
2666
cursor_position = indent_level ;
2707
- list_for_each (elem_ref_to , & block -> ref_to_list ) {
2708
- const struct btrfsic_block_link * const l =
2709
- list_entry (elem_ref_to , struct btrfsic_block_link ,
2710
- node_ref_to );
2711
-
2667
+ list_for_each_entry (l , & block -> ref_to_list , node_ref_to ) {
2712
2668
while (cursor_position < indent_level ) {
2713
2669
printk (" " );
2714
2670
cursor_position ++ ;
@@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root,
3165
3121
void btrfsic_unmount (struct btrfs_root * root ,
3166
3122
struct btrfs_fs_devices * fs_devices )
3167
3123
{
3168
- struct list_head * elem_all ;
3169
- struct list_head * tmp_all ;
3124
+ struct btrfsic_block * b_all , * tmp_all ;
3170
3125
struct btrfsic_state * state ;
3171
3126
struct list_head * dev_head = & fs_devices -> devices ;
3172
3127
struct btrfs_device * device ;
@@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root,
3206
3161
* just free all memory that was allocated dynamically.
3207
3162
* Free the blocks and the block_links.
3208
3163
*/
3209
- list_for_each_safe (elem_all , tmp_all , & state -> all_blocks_list ) {
3210
- struct btrfsic_block * const b_all =
3211
- list_entry (elem_all , struct btrfsic_block ,
3212
- all_blocks_node );
3213
- struct list_head * elem_ref_to ;
3214
- struct list_head * tmp_ref_to ;
3215
-
3216
- list_for_each_safe (elem_ref_to , tmp_ref_to ,
3217
- & b_all -> ref_to_list ) {
3218
- struct btrfsic_block_link * const l =
3219
- list_entry (elem_ref_to ,
3220
- struct btrfsic_block_link ,
3221
- node_ref_to );
3164
+ list_for_each_entry_safe (b_all , tmp_all , & state -> all_blocks_list ,
3165
+ all_blocks_node ) {
3166
+ struct btrfsic_block_link * l , * tmp ;
3222
3167
3168
+ list_for_each_entry_safe (l , tmp , & b_all -> ref_to_list ,
3169
+ node_ref_to ) {
3223
3170
if (state -> print_mask & BTRFSIC_PRINT_MASK_VERBOSE )
3224
3171
btrfsic_print_rem_link (state , l );
3225
3172
0 commit comments