Skip to content

Commit 459d5f6

Browse files
jrfastabdavem330
authored andcommitted
net: sched: make cls_u32 per cpu
This uses per cpu counters in cls_u32 in preparation to convert over to rcu. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 331b729 commit 459d5f6

File tree

1 file changed

+59
-16
lines changed

1 file changed

+59
-16
lines changed

net/sched/cls_u32.c

Lines changed: 59 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,12 @@ struct tc_u_knode {
5555
struct tcf_result res;
5656
struct tc_u_hnode *ht_down;
5757
#ifdef CONFIG_CLS_U32_PERF
58-
struct tc_u32_pcnt *pf;
58+
struct tc_u32_pcnt __percpu *pf;
5959
#endif
6060
#ifdef CONFIG_CLS_U32_MARK
61-
struct tc_u32_mark mark;
61+
u32 val;
62+
u32 mask;
63+
u32 __percpu *pcpu_success;
6264
#endif
6365
struct tc_u32_sel sel;
6466
};
@@ -115,16 +117,16 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
115117
struct tc_u32_key *key = n->sel.keys;
116118

117119
#ifdef CONFIG_CLS_U32_PERF
118-
n->pf->rcnt += 1;
120+
__this_cpu_inc(n->pf->rcnt);
119121
j = 0;
120122
#endif
121123

122124
#ifdef CONFIG_CLS_U32_MARK
123-
if ((skb->mark & n->mark.mask) != n->mark.val) {
125+
if ((skb->mark & n->mask) != n->val) {
124126
n = n->next;
125127
goto next_knode;
126128
} else {
127-
n->mark.success++;
129+
__this_cpu_inc(*n->pcpu_success);
128130
}
129131
#endif
130132

@@ -143,7 +145,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
143145
goto next_knode;
144146
}
145147
#ifdef CONFIG_CLS_U32_PERF
146-
n->pf->kcnts[j] += 1;
148+
__this_cpu_inc(n->pf->kcnts[j]);
147149
j++;
148150
#endif
149151
}
@@ -159,7 +161,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
159161
}
160162
#endif
161163
#ifdef CONFIG_CLS_U32_PERF
162-
n->pf->rhit += 1;
164+
__this_cpu_inc(n->pf->rhit);
163165
#endif
164166
r = tcf_exts_exec(skb, &n->exts, res);
165167
if (r < 0) {
@@ -342,7 +344,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
342344
if (n->ht_down)
343345
n->ht_down->refcnt--;
344346
#ifdef CONFIG_CLS_U32_PERF
345-
kfree(n->pf);
347+
free_percpu(n->pf);
346348
#endif
347349
kfree(n);
348350
return 0;
@@ -564,6 +566,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
564566
struct nlattr *tb[TCA_U32_MAX + 1];
565567
u32 htid;
566568
int err;
569+
#ifdef CONFIG_CLS_U32_PERF
570+
size_t size;
571+
#endif
567572

568573
if (opt == NULL)
569574
return handle ? -EINVAL : 0;
@@ -642,8 +647,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
642647
return -ENOBUFS;
643648

644649
#ifdef CONFIG_CLS_U32_PERF
645-
n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
646-
if (n->pf == NULL) {
650+
size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
651+
n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
652+
if (!n->pf) {
647653
kfree(n);
648654
return -ENOBUFS;
649655
}
@@ -656,12 +662,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
656662
tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
657663

658664
#ifdef CONFIG_CLS_U32_MARK
665+
n->pcpu_success = alloc_percpu(u32);
666+
659667
if (tb[TCA_U32_MARK]) {
660668
struct tc_u32_mark *mark;
661669

662670
mark = nla_data(tb[TCA_U32_MARK]);
663-
memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
664-
n->mark.success = 0;
671+
n->val = mark->val;
672+
n->mask = mark->mask;
665673
}
666674
#endif
667675

@@ -745,6 +753,11 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
745753
if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
746754
goto nla_put_failure;
747755
} else {
756+
#ifdef CONFIG_CLS_U32_PERF
757+
struct tc_u32_pcnt *gpf;
758+
#endif
759+
int cpu;
760+
748761
if (nla_put(skb, TCA_U32_SEL,
749762
sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
750763
&n->sel))
@@ -762,9 +775,20 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
762775
goto nla_put_failure;
763776

764777
#ifdef CONFIG_CLS_U32_MARK
765-
if ((n->mark.val || n->mark.mask) &&
766-
nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
767-
goto nla_put_failure;
778+
if ((n->val || n->mask)) {
779+
struct tc_u32_mark mark = {.val = n->val,
780+
.mask = n->mask,
781+
.success = 0};
782+
783+
for_each_possible_cpu(cpu) {
784+
__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpu);
785+
786+
mark.success += cnt;
787+
}
788+
789+
if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
790+
goto nla_put_failure;
791+
}
768792
#endif
769793

770794
if (tcf_exts_dump(skb, &n->exts) < 0)
@@ -779,10 +803,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
779803
}
780804
#endif
781805
#ifdef CONFIG_CLS_U32_PERF
806+
gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
807+
n->sel.nkeys * sizeof(u64),
808+
GFP_KERNEL);
809+
if (!gpf)
810+
goto nla_put_failure;
811+
812+
for_each_possible_cpu(cpu) {
813+
int i;
814+
struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
815+
816+
gpf->rcnt += pf->rcnt;
817+
gpf->rhit += pf->rhit;
818+
for (i = 0; i < n->sel.nkeys; i++)
819+
gpf->kcnts[i] += pf->kcnts[i];
820+
}
821+
782822
if (nla_put(skb, TCA_U32_PCNT,
783823
sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
784-
n->pf))
824+
gpf)) {
825+
kfree(gpf);
785826
goto nla_put_failure;
827+
}
828+
kfree(gpf);
786829
#endif
787830
}
788831

0 commit comments

Comments
 (0)