Skip to content

Commit e37268e

Browse files
committed
Merge branch 'add-flow_rule-infrastructure'
Pablo Neira Ayuso says: ==================== add flow_rule infrastructure This patchset, as is, allows us to reuse the driver codebase to configure ACL hardware offloads for the ethtool_rxnfc and the TC flower interfaces. A few clients for this infrastructure are presented, such as the bcm_sf2 and the qede drivers, for reference. Moreover all of the existing drivers in the tree are converted to use this infrastructure. This patchset is re-using the existing flow dissector infrastructure that was introduced by Jiri Pirko et al. so the amount of abstractions that this patchset adds are minimal. Well, just a few wrapper structures for the selector side of the rules. And, in order to express actions, this patchset exposes an action API that is based on the existing TC action infrastructure and what existing drivers already support on that front. v7: This patchset is a rebase on top of the net-next tree, after addressing questions and feedback from driver developers in the last batch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents d9b5a67 + 37c5d3e commit e37268e

File tree

22 files changed

+2416
-2009
lines changed

22 files changed

+2416
-2009
lines changed

drivers/net/dsa/bcm_sf2_cfp.c

Lines changed: 67 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <linux/netdevice.h>
1717
#include <net/dsa.h>
1818
#include <linux/bitmap.h>
19+
#include <net/flow_offload.h>
1920

2021
#include "bcm_sf2.h"
2122
#include "bcm_sf2_regs.h"
@@ -257,7 +258,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
257258
}
258259

259260
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
260-
struct ethtool_tcpip4_spec *v4_spec,
261+
struct flow_dissector_key_ipv4_addrs *addrs,
262+
struct flow_dissector_key_ports *ports,
261263
unsigned int slice_num,
262264
bool mask)
263265
{
@@ -278,7 +280,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
278280
* UDF_n_A6 [23:8]
279281
* UDF_n_A5 [7:0]
280282
*/
281-
reg = be16_to_cpu(v4_spec->pdst) >> 8;
283+
reg = be16_to_cpu(ports->dst) >> 8;
282284
if (mask)
283285
offset = CORE_CFP_MASK_PORT(3);
284286
else
@@ -289,9 +291,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
289291
* UDF_n_A4 [23:8]
290292
* UDF_n_A3 [7:0]
291293
*/
292-
reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
293-
(u32)be16_to_cpu(v4_spec->psrc) << 8 |
294-
(be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
294+
reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
295+
(u32)be16_to_cpu(ports->src) << 8 |
296+
(be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
295297
if (mask)
296298
offset = CORE_CFP_MASK_PORT(2);
297299
else
@@ -302,9 +304,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
302304
* UDF_n_A2 [23:8]
303305
* UDF_n_A1 [7:0]
304306
*/
305-
reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
306-
(u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
307-
(be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
307+
reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
308+
(u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
309+
(be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
308310
if (mask)
309311
offset = CORE_CFP_MASK_PORT(1);
310312
else
@@ -317,8 +319,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
317319
* Slice ID [3:2]
318320
* Slice valid [1:0]
319321
*/
320-
reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
321-
(u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
322+
reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
323+
(u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
322324
SLICE_NUM(slice_num) | SLICE_VALID;
323325
if (mask)
324326
offset = CORE_CFP_MASK_PORT(0);
@@ -332,9 +334,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
332334
unsigned int queue_num,
333335
struct ethtool_rx_flow_spec *fs)
334336
{
335-
struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
337+
struct ethtool_rx_flow_spec_input input = {};
336338
const struct cfp_udf_layout *layout;
337339
unsigned int slice_num, rule_index;
340+
struct ethtool_rx_flow_rule *flow;
341+
struct flow_match_ipv4_addrs ipv4;
342+
struct flow_match_ports ports;
343+
struct flow_match_ip ip;
338344
u8 ip_proto, ip_frag;
339345
u8 num_udf;
340346
u32 reg;
@@ -343,13 +349,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
343349
switch (fs->flow_type & ~FLOW_EXT) {
344350
case TCP_V4_FLOW:
345351
ip_proto = IPPROTO_TCP;
346-
v4_spec = &fs->h_u.tcp_ip4_spec;
347-
v4_m_spec = &fs->m_u.tcp_ip4_spec;
348352
break;
349353
case UDP_V4_FLOW:
350354
ip_proto = IPPROTO_UDP;
351-
v4_spec = &fs->h_u.udp_ip4_spec;
352-
v4_m_spec = &fs->m_u.udp_ip4_spec;
353355
break;
354356
default:
355357
return -EINVAL;
@@ -367,11 +369,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
367369
if (rule_index > bcm_sf2_cfp_rule_size(priv))
368370
return -ENOSPC;
369371

372+
input.fs = fs;
373+
flow = ethtool_rx_flow_rule_create(&input);
374+
if (IS_ERR(flow))
375+
return PTR_ERR(flow);
376+
377+
flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
378+
flow_rule_match_ports(flow->rule, &ports);
379+
flow_rule_match_ip(flow->rule, &ip);
380+
370381
layout = &udf_tcpip4_layout;
371382
/* We only use one UDF slice for now */
372383
slice_num = bcm_sf2_get_slice_number(layout, 0);
373-
if (slice_num == UDF_NUM_SLICES)
374-
return -EINVAL;
384+
if (slice_num == UDF_NUM_SLICES) {
385+
ret = -EINVAL;
386+
goto out_err_flow_rule;
387+
}
375388

376389
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
377390

@@ -398,7 +411,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
398411
* Reserved [1]
399412
* UDF_Valid[8] [0]
400413
*/
401-
core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
414+
core_writel(priv, ip.key->tos << IPTOS_SHIFT |
402415
ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
403416
udf_upper_bits(num_udf),
404417
CORE_CFP_DATA_PORT(6));
@@ -417,23 +430,23 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
417430
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
418431

419432
/* Program the match and the mask */
420-
bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
421-
bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
433+
bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
434+
bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
422435

423436
/* Insert into TCAM now */
424437
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
425438

426439
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
427440
if (ret) {
428441
pr_err("TCAM entry at addr %d failed\n", rule_index);
429-
return ret;
442+
goto out_err_flow_rule;
430443
}
431444

432445
/* Insert into Action and policer RAMs now */
433446
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
434447
queue_num, true);
435448
if (ret)
436-
return ret;
449+
goto out_err_flow_rule;
437450

438451
/* Turn on CFP for this rule now */
439452
reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -446,6 +459,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
446459
fs->location = rule_index;
447460

448461
return 0;
462+
463+
out_err_flow_rule:
464+
ethtool_rx_flow_rule_destroy(flow);
465+
return ret;
449466
}
450467

451468
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
@@ -582,8 +599,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
582599
struct ethtool_rx_flow_spec *fs)
583600
{
584601
struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
602+
struct ethtool_rx_flow_spec_input input = {};
585603
unsigned int slice_num, rule_index[2];
586604
const struct cfp_udf_layout *layout;
605+
struct ethtool_rx_flow_rule *flow;
606+
struct flow_match_ipv6_addrs ipv6;
607+
struct flow_match_ports ports;
587608
u8 ip_proto, ip_frag;
588609
int ret = 0;
589610
u8 num_udf;
@@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
645666
goto out_err;
646667
}
647668

669+
input.fs = fs;
670+
flow = ethtool_rx_flow_rule_create(&input);
671+
if (IS_ERR(flow)) {
672+
ret = PTR_ERR(flow);
673+
goto out_err;
674+
}
675+
flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
676+
flow_rule_match_ports(flow->rule, &ports);
677+
648678
/* Apply the UDF layout for this filter */
649679
bcm_sf2_cfp_udf_set(priv, layout, slice_num);
650680

@@ -688,31 +718,31 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
688718
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
689719

690720
/* Slice the IPv6 source address and port */
691-
bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
692-
slice_num, false);
693-
bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
694-
SLICE_NUM_MASK, true);
721+
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
722+
ports.key->src, slice_num, false);
723+
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
724+
ports.mask->src, SLICE_NUM_MASK, true);
695725

696726
/* Insert into TCAM now because we need to insert a second rule */
697727
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
698728

699729
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
700730
if (ret) {
701731
pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
702-
goto out_err;
732+
goto out_err_flow_rule;
703733
}
704734

705735
/* Insert into Action and policer RAMs now */
706736
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
707737
queue_num, false);
708738
if (ret)
709-
goto out_err;
739+
goto out_err_flow_rule;
710740

711741
/* Now deal with the second slice to chain this rule */
712742
slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
713743
if (slice_num == UDF_NUM_SLICES) {
714744
ret = -EINVAL;
715-
goto out_err;
745+
goto out_err_flow_rule;
716746
}
717747

718748
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -748,18 +778,18 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
748778
/* Mask all */
749779
core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
750780

751-
bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
752-
false);
753-
bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
754-
SLICE_NUM_MASK, true);
781+
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
782+
ports.key->dst, slice_num, false);
783+
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
784+
ports.key->dst, SLICE_NUM_MASK, true);
755785

756786
/* Insert into TCAM now */
757787
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
758788

759789
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
760790
if (ret) {
761791
pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
762-
goto out_err;
792+
goto out_err_flow_rule;
763793
}
764794

765795
/* Insert into Action and policer RAMs now, set chain ID to
@@ -768,7 +798,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
768798
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
769799
queue_num, true);
770800
if (ret)
771-
goto out_err;
801+
goto out_err_flow_rule;
772802

773803
/* Turn on CFP for this rule now */
774804
reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
784814

785815
return ret;
786816

817+
out_err_flow_rule:
818+
ethtool_rx_flow_rule_destroy(flow);
787819
out_err:
788820
clear_bit(rule_index[1], priv->cfp.used);
789821
return ret;

0 commit comments

Comments
 (0)