@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node(
52
52
{
53
53
delayed_node -> root = root ;
54
54
delayed_node -> inode_id = inode_id ;
55
- atomic_set (& delayed_node -> refs , 0 );
55
+ refcount_set (& delayed_node -> refs , 0 );
56
56
delayed_node -> ins_root = RB_ROOT ;
57
57
delayed_node -> del_root = RB_ROOT ;
58
58
mutex_init (& delayed_node -> mutex );
@@ -81,22 +81,22 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
81
81
82
82
node = READ_ONCE (btrfs_inode -> delayed_node );
83
83
if (node ) {
84
- atomic_inc (& node -> refs );
84
+ refcount_inc (& node -> refs );
85
85
return node ;
86
86
}
87
87
88
88
spin_lock (& root -> inode_lock );
89
89
node = radix_tree_lookup (& root -> delayed_nodes_tree , ino );
90
90
if (node ) {
91
91
if (btrfs_inode -> delayed_node ) {
92
- atomic_inc (& node -> refs ); /* can be accessed */
92
+ refcount_inc (& node -> refs ); /* can be accessed */
93
93
BUG_ON (btrfs_inode -> delayed_node != node );
94
94
spin_unlock (& root -> inode_lock );
95
95
return node ;
96
96
}
97
97
btrfs_inode -> delayed_node = node ;
98
98
/* can be accessed and cached in the inode */
99
- atomic_add (2 , & node -> refs );
99
+ refcount_add (2 , & node -> refs );
100
100
spin_unlock (& root -> inode_lock );
101
101
return node ;
102
102
}
@@ -125,7 +125,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
125
125
btrfs_init_delayed_node (node , root , ino );
126
126
127
127
/* cached in the btrfs inode and can be accessed */
128
- atomic_add ( 2 , & node -> refs );
128
+ refcount_set ( & node -> refs , 2 );
129
129
130
130
ret = radix_tree_preload (GFP_NOFS );
131
131
if (ret ) {
@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
166
166
} else {
167
167
list_add_tail (& node -> n_list , & root -> node_list );
168
168
list_add_tail (& node -> p_list , & root -> prepare_list );
169
- atomic_inc (& node -> refs ); /* inserted into list */
169
+ refcount_inc (& node -> refs ); /* inserted into list */
170
170
root -> nodes ++ ;
171
171
set_bit (BTRFS_DELAYED_NODE_IN_LIST , & node -> flags );
172
172
}
@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
180
180
spin_lock (& root -> lock );
181
181
if (test_bit (BTRFS_DELAYED_NODE_IN_LIST , & node -> flags )) {
182
182
root -> nodes -- ;
183
- atomic_dec (& node -> refs ); /* not in the list */
183
+ refcount_dec (& node -> refs ); /* not in the list */
184
184
list_del_init (& node -> n_list );
185
185
if (!list_empty (& node -> p_list ))
186
186
list_del_init (& node -> p_list );
@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node(
201
201
202
202
p = delayed_root -> node_list .next ;
203
203
node = list_entry (p , struct btrfs_delayed_node , n_list );
204
- atomic_inc (& node -> refs );
204
+ refcount_inc (& node -> refs );
205
205
out :
206
206
spin_unlock (& delayed_root -> lock );
207
207
@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
228
228
p = node -> n_list .next ;
229
229
230
230
next = list_entry (p , struct btrfs_delayed_node , n_list );
231
- atomic_inc (& next -> refs );
231
+ refcount_inc (& next -> refs );
232
232
out :
233
233
spin_unlock (& delayed_root -> lock );
234
234
@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node(
253
253
btrfs_dequeue_delayed_node (delayed_root , delayed_node );
254
254
mutex_unlock (& delayed_node -> mutex );
255
255
256
- if (atomic_dec_and_test (& delayed_node -> refs )) {
256
+ if (refcount_dec_and_test (& delayed_node -> refs )) {
257
257
bool free = false;
258
258
struct btrfs_root * root = delayed_node -> root ;
259
259
spin_lock (& root -> inode_lock );
260
- if (atomic_read (& delayed_node -> refs ) == 0 ) {
260
+ if (refcount_read (& delayed_node -> refs ) == 0 ) {
261
261
radix_tree_delete (& root -> delayed_nodes_tree ,
262
262
delayed_node -> inode_id );
263
263
free = true;
@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
286
286
p = delayed_root -> prepare_list .next ;
287
287
list_del_init (p );
288
288
node = list_entry (p , struct btrfs_delayed_node , p_list );
289
- atomic_inc (& node -> refs );
289
+ refcount_inc (& node -> refs );
290
290
out :
291
291
spin_unlock (& delayed_root -> lock );
292
292
@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
1621
1621
* insert/delete delayed items in this period. So we also needn't
1622
1622
* requeue or dequeue this delayed node.
1623
1623
*/
1624
- atomic_dec (& delayed_node -> refs );
1624
+ refcount_dec (& delayed_node -> refs );
1625
1625
1626
1626
return true;
1627
1627
}
@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1963
1963
inode_id = delayed_nodes [n - 1 ]-> inode_id + 1 ;
1964
1964
1965
1965
for (i = 0 ; i < n ; i ++ )
1966
- atomic_inc (& delayed_nodes [i ]-> refs );
1966
+ refcount_inc (& delayed_nodes [i ]-> refs );
1967
1967
spin_unlock (& root -> inode_lock );
1968
1968
1969
1969
for (i = 0 ; i < n ; i ++ ) {
0 commit comments