20
20
#include "bcm_sf2.h"
21
21
#include "bcm_sf2_regs.h"
22
22
23
- struct cfp_udf_layout {
24
- u8 slices [UDF_NUM_SLICES ];
23
+ struct cfp_udf_slice_layout {
24
+ u8 slices [UDFS_PER_SLICE ];
25
25
u32 mask_value ;
26
+ u32 base_offset ;
27
+ };
26
28
29
+ struct cfp_udf_layout {
30
+ struct cfp_udf_slice_layout udfs [UDF_NUM_SLICES ];
27
31
};
28
32
33
+ static const u8 zero_slice [UDFS_PER_SLICE ] = { };
34
+
29
35
/* UDF slices layout for a TCPv4/UDPv4 specification */
30
36
static const struct cfp_udf_layout udf_tcpip4_layout = {
31
- .slices = {
32
- /* End of L2, byte offset 12, src IP[0:15] */
33
- CFG_UDF_EOL2 | 6 ,
34
- /* End of L2, byte offset 14, src IP[16:31] */
35
- CFG_UDF_EOL2 | 7 ,
36
- /* End of L2, byte offset 16, dst IP[0:15] */
37
- CFG_UDF_EOL2 | 8 ,
38
- /* End of L2, byte offset 18, dst IP[16:31] */
39
- CFG_UDF_EOL2 | 9 ,
40
- /* End of L3, byte offset 0, src port */
41
- CFG_UDF_EOL3 | 0 ,
42
- /* End of L3, byte offset 2, dst port */
43
- CFG_UDF_EOL3 | 1 ,
44
- 0 , 0 , 0
37
+ .udfs = {
38
+ [1 ] = {
39
+ .slices = {
40
+ /* End of L2, byte offset 12, src IP[0:15] */
41
+ CFG_UDF_EOL2 | 6 ,
42
+ /* End of L2, byte offset 14, src IP[16:31] */
43
+ CFG_UDF_EOL2 | 7 ,
44
+ /* End of L2, byte offset 16, dst IP[0:15] */
45
+ CFG_UDF_EOL2 | 8 ,
46
+ /* End of L2, byte offset 18, dst IP[16:31] */
47
+ CFG_UDF_EOL2 | 9 ,
48
+ /* End of L3, byte offset 0, src port */
49
+ CFG_UDF_EOL3 | 0 ,
50
+ /* End of L3, byte offset 2, dst port */
51
+ CFG_UDF_EOL3 | 1 ,
52
+ 0 , 0 , 0
53
+ },
54
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG ,
55
+ .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET ,
56
+ },
45
57
},
46
- .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG ,
47
58
};
48
59
49
60
static inline unsigned int bcm_sf2_get_num_udf_slices (const u8 * layout )
50
61
{
51
62
unsigned int i , count = 0 ;
52
63
53
- for (i = 0 ; i < UDF_NUM_SLICES ; i ++ ) {
64
+ for (i = 0 ; i < UDFS_PER_SLICE ; i ++ ) {
54
65
if (layout [i ] != 0 )
55
66
count ++ ;
56
67
}
57
68
58
69
return count ;
59
70
}
60
71
72
+ static inline u32 udf_upper_bits (unsigned int num_udf )
73
+ {
74
+ return GENMASK (num_udf - 1 , 0 ) >> (UDFS_PER_SLICE - 1 );
75
+ }
76
+
77
+ static inline u32 udf_lower_bits (unsigned int num_udf )
78
+ {
79
+ return (u8 )GENMASK (num_udf - 1 , 0 );
80
+ }
81
+
82
+ static unsigned int bcm_sf2_get_slice_number (const struct cfp_udf_layout * l ,
83
+ unsigned int start )
84
+ {
85
+ const struct cfp_udf_slice_layout * slice_layout ;
86
+ unsigned int slice_idx ;
87
+
88
+ for (slice_idx = start ; slice_idx < UDF_NUM_SLICES ; slice_idx ++ ) {
89
+ slice_layout = & l -> udfs [slice_idx ];
90
+ if (memcmp (slice_layout -> slices , zero_slice ,
91
+ sizeof (zero_slice )))
92
+ break ;
93
+ }
94
+
95
+ return slice_idx ;
96
+ }
97
+
61
98
static void bcm_sf2_cfp_udf_set (struct bcm_sf2_priv * priv ,
62
- unsigned int slice_num ,
63
- const u8 * layout )
99
+ const struct cfp_udf_layout * layout ,
100
+ unsigned int slice_num )
64
101
{
65
- u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET ;
102
+ u32 offset = layout -> udfs [ slice_num ]. base_offset ;
66
103
unsigned int i ;
67
104
68
- for (i = 0 ; i < UDF_NUM_SLICES ; i ++ )
69
- core_writel (priv , layout [i ], offset + i * 4 );
105
+ for (i = 0 ; i < UDFS_PER_SLICE ; i ++ )
106
+ core_writel (priv , layout -> udfs [slice_num ].slices [i ],
107
+ offset + i * 4 );
70
108
}
71
109
72
110
static int bcm_sf2_cfp_op (struct bcm_sf2_priv * priv , unsigned int op )
@@ -189,13 +227,16 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
189
227
else
190
228
rule_index = fs -> location ;
191
229
192
- /* We only use one UDF slice for now */
193
- slice_num = 1 ;
194
230
layout = & udf_tcpip4_layout ;
195
- num_udf = bcm_sf2_get_num_udf_slices (layout -> slices );
231
+ /* We only use one UDF slice for now */
232
+ slice_num = bcm_sf2_get_slice_number (layout , 0 );
233
+ if (slice_num == UDF_NUM_SLICES )
234
+ return - EINVAL ;
235
+
236
+ num_udf = bcm_sf2_get_num_udf_slices (layout -> udfs [slice_num ].slices );
196
237
197
238
/* Apply the UDF layout for this filter */
198
- bcm_sf2_cfp_udf_set (priv , slice_num , layout -> slices );
239
+ bcm_sf2_cfp_udf_set (priv , layout , slice_num );
199
240
200
241
/* Apply to all packets received through this port */
201
242
core_writel (priv , BIT (port ), CORE_CFP_DATA_PORT (7 ));
@@ -218,14 +259,15 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
218
259
* UDF_Valid[8] [0]
219
260
*/
220
261
core_writel (priv , v4_spec -> tos << IPTOS_SHIFT |
221
- ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT ,
262
+ ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
263
+ udf_upper_bits (num_udf ),
222
264
CORE_CFP_DATA_PORT (6 ));
223
265
224
266
/* UDF_Valid[7:0] [31:24]
225
267
* S-Tag [23:8]
226
268
* C-Tag [7:0]
227
269
*/
228
- core_writel (priv , GENMASK (num_udf - 1 , 0 ) << 24 , CORE_CFP_DATA_PORT (5 ));
270
+ core_writel (priv , udf_lower_bits (num_udf ) << 24 , CORE_CFP_DATA_PORT (5 ));
229
271
230
272
/* C-Tag [31:24]
231
273
* UDF_n_A8 [23:8]
@@ -270,10 +312,11 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
270
312
core_writel (priv , reg , CORE_CFP_DATA_PORT (0 ));
271
313
272
314
/* Mask with the specific layout for IPv4 packets */
273
- core_writel (priv , layout -> mask_value , CORE_CFP_MASK_PORT (6 ));
315
+ core_writel (priv , layout -> udfs [slice_num ].mask_value |
316
+ udf_upper_bits (num_udf ), CORE_CFP_MASK_PORT (6 ));
274
317
275
318
/* Mask all but valid UDFs */
276
- core_writel (priv , GENMASK (num_udf - 1 , 0 ) << 24 , CORE_CFP_MASK_PORT (5 ));
319
+ core_writel (priv , udf_lower_bits (num_udf ) << 24 , CORE_CFP_MASK_PORT (5 ));
277
320
278
321
/* Mask all */
279
322
core_writel (priv , 0 , CORE_CFP_MASK_PORT (4 ));
0 commit comments