Skip to content

Commit 6de5f18

Browse files
ereshetovakdave
authored andcommitted
btrfs: convert btrfs_delayed_node.refs from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 6df8cdf commit 6de5f18

File tree

2 files changed

+16
-16
lines changed

2 files changed

+16
-16
lines changed

fs/btrfs/delayed-inode.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node(
5252
{
5353
delayed_node->root = root;
5454
delayed_node->inode_id = inode_id;
55-
atomic_set(&delayed_node->refs, 0);
55+
refcount_set(&delayed_node->refs, 0);
5656
delayed_node->ins_root = RB_ROOT;
5757
delayed_node->del_root = RB_ROOT;
5858
mutex_init(&delayed_node->mutex);
@@ -81,22 +81,22 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
8181

8282
node = READ_ONCE(btrfs_inode->delayed_node);
8383
if (node) {
84-
atomic_inc(&node->refs);
84+
refcount_inc(&node->refs);
8585
return node;
8686
}
8787

8888
spin_lock(&root->inode_lock);
8989
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
9090
if (node) {
9191
if (btrfs_inode->delayed_node) {
92-
atomic_inc(&node->refs); /* can be accessed */
92+
refcount_inc(&node->refs); /* can be accessed */
9393
BUG_ON(btrfs_inode->delayed_node != node);
9494
spin_unlock(&root->inode_lock);
9595
return node;
9696
}
9797
btrfs_inode->delayed_node = node;
9898
/* can be accessed and cached in the inode */
99-
atomic_add(2, &node->refs);
99+
refcount_add(2, &node->refs);
100100
spin_unlock(&root->inode_lock);
101101
return node;
102102
}
@@ -125,7 +125,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
125125
btrfs_init_delayed_node(node, root, ino);
126126

127127
/* cached in the btrfs inode and can be accessed */
128-
atomic_add(2, &node->refs);
128+
refcount_set(&node->refs, 2);
129129

130130
ret = radix_tree_preload(GFP_NOFS);
131131
if (ret) {
@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
166166
} else {
167167
list_add_tail(&node->n_list, &root->node_list);
168168
list_add_tail(&node->p_list, &root->prepare_list);
169-
atomic_inc(&node->refs); /* inserted into list */
169+
refcount_inc(&node->refs); /* inserted into list */
170170
root->nodes++;
171171
set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
172172
}
@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
180180
spin_lock(&root->lock);
181181
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
182182
root->nodes--;
183-
atomic_dec(&node->refs); /* not in the list */
183+
refcount_dec(&node->refs); /* not in the list */
184184
list_del_init(&node->n_list);
185185
if (!list_empty(&node->p_list))
186186
list_del_init(&node->p_list);
@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node(
201201

202202
p = delayed_root->node_list.next;
203203
node = list_entry(p, struct btrfs_delayed_node, n_list);
204-
atomic_inc(&node->refs);
204+
refcount_inc(&node->refs);
205205
out:
206206
spin_unlock(&delayed_root->lock);
207207

@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
228228
p = node->n_list.next;
229229

230230
next = list_entry(p, struct btrfs_delayed_node, n_list);
231-
atomic_inc(&next->refs);
231+
refcount_inc(&next->refs);
232232
out:
233233
spin_unlock(&delayed_root->lock);
234234

@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node(
253253
btrfs_dequeue_delayed_node(delayed_root, delayed_node);
254254
mutex_unlock(&delayed_node->mutex);
255255

256-
if (atomic_dec_and_test(&delayed_node->refs)) {
256+
if (refcount_dec_and_test(&delayed_node->refs)) {
257257
bool free = false;
258258
struct btrfs_root *root = delayed_node->root;
259259
spin_lock(&root->inode_lock);
260-
if (atomic_read(&delayed_node->refs) == 0) {
260+
if (refcount_read(&delayed_node->refs) == 0) {
261261
radix_tree_delete(&root->delayed_nodes_tree,
262262
delayed_node->inode_id);
263263
free = true;
@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
286286
p = delayed_root->prepare_list.next;
287287
list_del_init(p);
288288
node = list_entry(p, struct btrfs_delayed_node, p_list);
289-
atomic_inc(&node->refs);
289+
refcount_inc(&node->refs);
290290
out:
291291
spin_unlock(&delayed_root->lock);
292292

@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
16211621
* insert/delete delayed items in this period. So we also needn't
16221622
* requeue or dequeue this delayed node.
16231623
*/
1624-
atomic_dec(&delayed_node->refs);
1624+
refcount_dec(&delayed_node->refs);
16251625

16261626
return true;
16271627
}
@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
19631963
inode_id = delayed_nodes[n - 1]->inode_id + 1;
19641964

19651965
for (i = 0; i < n; i++)
1966-
atomic_inc(&delayed_nodes[i]->refs);
1966+
refcount_inc(&delayed_nodes[i]->refs);
19671967
spin_unlock(&root->inode_lock);
19681968

19691969
for (i = 0; i < n; i++) {

fs/btrfs/delayed-inode.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
#include <linux/list.h>
2727
#include <linux/wait.h>
2828
#include <linux/atomic.h>
29-
29+
#include <linux/refcount.h>
3030
#include "ctree.h"
3131

3232
/* types of the delayed item */
@@ -67,7 +67,7 @@ struct btrfs_delayed_node {
6767
struct rb_root del_root;
6868
struct mutex mutex;
6969
struct btrfs_inode_item inode_item;
70-
atomic_t refs;
70+
refcount_t refs;
7171
u64 index_cnt;
7272
unsigned long flags;
7373
int count;

0 commit comments

Comments
 (0)