Skip to content

Commit f3f1c05

Browse files
iamkafaidavem330
authored andcommitted
bpf: Introduce bpf_map ID
This patch generates an unique ID for each created bpf_map. The approach is similar to the earlier patch for bpf_prog ID. It is worth to note that the bpf_map's ID and bpf_prog's ID are in two independent ID spaces and both have the same valid range: [1, INT_MAX). Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent dc4bb0e commit f3f1c05

File tree

2 files changed

+34
-1
lines changed

2 files changed

+34
-1
lines changed

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ struct bpf_map {
4646
u32 max_entries;
4747
u32 map_flags;
4848
u32 pages;
49+
u32 id;
4950
struct user_struct *user;
5051
const struct bpf_map_ops *ops;
5152
struct work_struct work;

kernel/bpf/syscall.c

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
DEFINE_PER_CPU(int, bpf_prog_active);
2828
static DEFINE_IDR(prog_idr);
2929
static DEFINE_SPINLOCK(prog_idr_lock);
30+
static DEFINE_IDR(map_idr);
31+
static DEFINE_SPINLOCK(map_idr_lock);
3032

3133
int sysctl_unprivileged_bpf_disabled __read_mostly;
3234

@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
117119
free_uid(user);
118120
}
119121

122+
static int bpf_map_alloc_id(struct bpf_map *map)
123+
{
124+
int id;
125+
126+
spin_lock_bh(&map_idr_lock);
127+
id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
128+
if (id > 0)
129+
map->id = id;
130+
spin_unlock_bh(&map_idr_lock);
131+
132+
if (WARN_ON_ONCE(!id))
133+
return -ENOSPC;
134+
135+
return id > 0 ? 0 : id;
136+
}
137+
138+
static void bpf_map_free_id(struct bpf_map *map)
139+
{
140+
spin_lock_bh(&map_idr_lock);
141+
idr_remove(&map_idr, map->id);
142+
spin_unlock_bh(&map_idr_lock);
143+
}
144+
120145
/* called from workqueue */
121146
static void bpf_map_free_deferred(struct work_struct *work)
122147
{
@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
141166
void bpf_map_put(struct bpf_map *map)
142167
{
143168
if (atomic_dec_and_test(&map->refcnt)) {
169+
bpf_map_free_id(map);
144170
INIT_WORK(&map->work, bpf_map_free_deferred);
145171
schedule_work(&map->work);
146172
}
@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
239265
if (err)
240266
goto free_map_nouncharge;
241267

268+
err = bpf_map_alloc_id(map);
269+
if (err)
270+
goto free_map;
271+
242272
err = bpf_map_new_fd(map);
243273
if (err < 0)
244274
/* failed to allocate fd */
245-
goto free_map;
275+
goto free_id;
246276

247277
trace_bpf_map_create(map, err);
248278
return err;
249279

280+
free_id:
281+
bpf_map_free_id(map);
250282
free_map:
251283
bpf_map_uncharge_memlock(map);
252284
free_map_nouncharge:

0 commit comments

Comments
 (0)