Skip to content

Commit fd62d9f

Browse files
yotamgidavem330
authored andcommitted
net/sched: matchall: Fix configuration race
In the current version, the matchall internal state is split into two structs: cls_matchall_head and cls_matchall_filter. This makes little sense, as matchall instance supports only one filter, and there is no situation where one exists and the other does not. In addition, that led to some races when filter was deleted while packet was processed. Unify that two structs into one, thus simplifying the process of matchall creation and deletion. As a result, the new, delete and get callbacks have a dummy implementation where all the work is done in destroy and change callbacks, as was done in cls_cgroup. Fixes: bf3994d ("net/sched: introduce Match-all classifier") Reported-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Yotam Gigi <yotamg@mellanox.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 4993b39 commit fd62d9f

File tree

1 file changed

+45
-82
lines changed

1 file changed

+45
-82
lines changed

net/sched/cls_matchall.c

Lines changed: 45 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -16,55 +16,41 @@
1616
#include <net/sch_generic.h>
1717
#include <net/pkt_cls.h>
1818

19-
struct cls_mall_filter {
19+
struct cls_mall_head {
2020
struct tcf_exts exts;
2121
struct tcf_result res;
2222
u32 handle;
23-
struct rcu_head rcu;
2423
u32 flags;
25-
};
26-
27-
struct cls_mall_head {
28-
struct cls_mall_filter *filter;
2924
struct rcu_head rcu;
3025
};
3126

3227
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
3328
struct tcf_result *res)
3429
{
3530
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36-
struct cls_mall_filter *f = head->filter;
3731

38-
if (tc_skip_sw(f->flags))
32+
if (tc_skip_sw(head->flags))
3933
return -1;
4034

41-
return tcf_exts_exec(skb, &f->exts, res);
35+
return tcf_exts_exec(skb, &head->exts, res);
4236
}
4337

4438
static int mall_init(struct tcf_proto *tp)
4539
{
46-
struct cls_mall_head *head;
47-
48-
head = kzalloc(sizeof(*head), GFP_KERNEL);
49-
if (!head)
50-
return -ENOBUFS;
51-
52-
rcu_assign_pointer(tp->root, head);
53-
5440
return 0;
5541
}
5642

57-
static void mall_destroy_filter(struct rcu_head *head)
43+
static void mall_destroy_rcu(struct rcu_head *rcu)
5844
{
59-
struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
45+
struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46+
rcu);
6047

61-
tcf_exts_destroy(&f->exts);
62-
63-
kfree(f);
48+
tcf_exts_destroy(&head->exts);
49+
kfree(head);
6450
}
6551

6652
static int mall_replace_hw_filter(struct tcf_proto *tp,
67-
struct cls_mall_filter *f,
53+
struct cls_mall_head *head,
6854
unsigned long cookie)
6955
{
7056
struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,15 +60,15 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
7460
offload.type = TC_SETUP_MATCHALL;
7561
offload.cls_mall = &mall_offload;
7662
offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77-
offload.cls_mall->exts = &f->exts;
63+
offload.cls_mall->exts = &head->exts;
7864
offload.cls_mall->cookie = cookie;
7965

8066
return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
8167
&offload);
8268
}
8369

8470
static void mall_destroy_hw_filter(struct tcf_proto *tp,
85-
struct cls_mall_filter *f,
71+
struct cls_mall_head *head,
8672
unsigned long cookie)
8773
{
8874
struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
10389
{
10490
struct cls_mall_head *head = rtnl_dereference(tp->root);
10591
struct net_device *dev = tp->q->dev_queue->dev;
106-
struct cls_mall_filter *f = head->filter;
10792

108-
if (!force && f)
109-
return false;
93+
if (!head)
94+
return true;
11095

111-
if (f) {
112-
if (tc_should_offload(dev, tp, f->flags))
113-
mall_destroy_hw_filter(tp, f, (unsigned long) f);
96+
if (tc_should_offload(dev, tp, head->flags))
97+
mall_destroy_hw_filter(tp, head, (unsigned long) head);
11498

115-
call_rcu(&f->rcu, mall_destroy_filter);
116-
}
117-
kfree_rcu(head, rcu);
99+
call_rcu(&head->rcu, mall_destroy_rcu);
118100
return true;
119101
}
120102

121103
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122104
{
123-
struct cls_mall_head *head = rtnl_dereference(tp->root);
124-
struct cls_mall_filter *f = head->filter;
125-
126-
if (f && f->handle == handle)
127-
return (unsigned long) f;
128-
return 0;
105+
return 0UL;
129106
}
130107

131108
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
134111
};
135112

136113
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137-
struct cls_mall_filter *f,
114+
struct cls_mall_head *head,
138115
unsigned long base, struct nlattr **tb,
139116
struct nlattr *est, bool ovr)
140117
{
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
147124
return err;
148125

149126
if (tb[TCA_MATCHALL_CLASSID]) {
150-
f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151-
tcf_bind_filter(tp, &f->res, base);
127+
head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
128+
tcf_bind_filter(tp, &head->res, base);
152129
}
153130

154-
tcf_exts_change(tp, &f->exts, &e);
131+
tcf_exts_change(tp, &head->exts, &e);
155132

156133
return 0;
157134
}
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
162139
unsigned long *arg, bool ovr)
163140
{
164141
struct cls_mall_head *head = rtnl_dereference(tp->root);
165-
struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166142
struct net_device *dev = tp->q->dev_queue->dev;
167-
struct cls_mall_filter *f;
168143
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
144+
struct cls_mall_head *new;
169145
u32 flags = 0;
170146
int err;
171147

172148
if (!tca[TCA_OPTIONS])
173149
return -EINVAL;
174150

175-
if (head->filter)
176-
return -EBUSY;
177-
178-
if (fold)
179-
return -EINVAL;
151+
if (head)
152+
return -EEXIST;
180153

181154
err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182155
tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
189162
return -EINVAL;
190163
}
191164

192-
f = kzalloc(sizeof(*f), GFP_KERNEL);
193-
if (!f)
165+
new = kzalloc(sizeof(*new), GFP_KERNEL);
166+
if (!new)
194167
return -ENOBUFS;
195168

196-
tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
169+
tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
197170

198171
if (!handle)
199172
handle = 1;
200-
f->handle = handle;
201-
f->flags = flags;
173+
new->handle = handle;
174+
new->flags = flags;
202175

203-
err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
176+
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
204177
if (err)
205178
goto errout;
206179

207180
if (tc_should_offload(dev, tp, flags)) {
208-
err = mall_replace_hw_filter(tp, f, (unsigned long) f);
181+
err = mall_replace_hw_filter(tp, new, (unsigned long) new);
209182
if (err) {
210183
if (tc_skip_sw(flags))
211184
goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
214187
}
215188
}
216189

217-
*arg = (unsigned long) f;
218-
rcu_assign_pointer(head->filter, f);
219-
190+
*arg = (unsigned long) head;
191+
rcu_assign_pointer(tp->root, new);
192+
if (head)
193+
call_rcu(&head->rcu, mall_destroy_rcu);
220194
return 0;
221195

222196
errout:
223-
kfree(f);
197+
kfree(new);
224198
return err;
225199
}
226200

227201
static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228202
{
229-
struct cls_mall_head *head = rtnl_dereference(tp->root);
230-
struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231-
struct net_device *dev = tp->q->dev_queue->dev;
232-
233-
if (tc_should_offload(dev, tp, f->flags))
234-
mall_destroy_hw_filter(tp, f, (unsigned long) f);
235-
236-
RCU_INIT_POINTER(head->filter, NULL);
237-
tcf_unbind_filter(tp, &f->res);
238-
call_rcu(&f->rcu, mall_destroy_filter);
239-
return 0;
203+
return -EOPNOTSUPP;
240204
}
241205

242206
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243207
{
244208
struct cls_mall_head *head = rtnl_dereference(tp->root);
245-
struct cls_mall_filter *f = head->filter;
246209

247210
if (arg->count < arg->skip)
248211
goto skip;
249-
if (arg->fn(tp, (unsigned long) f, arg) < 0)
212+
if (arg->fn(tp, (unsigned long) head, arg) < 0)
250213
arg->stop = 1;
251214
skip:
252215
arg->count++;
@@ -255,28 +218,28 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
255218
static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256219
struct sk_buff *skb, struct tcmsg *t)
257220
{
258-
struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
221+
struct cls_mall_head *head = (struct cls_mall_head *) fh;
259222
struct nlattr *nest;
260223

261-
if (!f)
224+
if (!head)
262225
return skb->len;
263226

264-
t->tcm_handle = f->handle;
227+
t->tcm_handle = head->handle;
265228

266229
nest = nla_nest_start(skb, TCA_OPTIONS);
267230
if (!nest)
268231
goto nla_put_failure;
269232

270-
if (f->res.classid &&
271-
nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
233+
if (head->res.classid &&
234+
nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
272235
goto nla_put_failure;
273236

274-
if (tcf_exts_dump(skb, &f->exts))
237+
if (tcf_exts_dump(skb, &head->exts))
275238
goto nla_put_failure;
276239

277240
nla_nest_end(skb, nest);
278241

279-
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
242+
if (tcf_exts_dump_stats(skb, &head->exts) < 0)
280243
goto nla_put_failure;
281244

282245
return skb->len;

0 commit comments

Comments
 (0)