|
38 | 38 | #include "cxgb4.h"
|
39 | 39 | #include "cxgb4_tc_flower.h"
|
40 | 40 |
|
| 41 | +static struct ch_tc_flower_entry *allocate_flower_entry(void) |
| 42 | +{ |
| 43 | + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); |
| 44 | + return new; |
| 45 | +} |
| 46 | + |
| 47 | +/* Must be called with either RTNL or rcu_read_lock */ |
| 48 | +static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, |
| 49 | + unsigned long flower_cookie) |
| 50 | +{ |
| 51 | + struct ch_tc_flower_entry *flower_entry; |
| 52 | + |
| 53 | + hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry, |
| 54 | + link, flower_cookie) |
| 55 | + if (flower_entry->tc_flower_cookie == flower_cookie) |
| 56 | + return flower_entry; |
| 57 | + return NULL; |
| 58 | +} |
| 59 | + |
| 60 | +static void cxgb4_process_flow_match(struct net_device *dev, |
| 61 | + struct tc_cls_flower_offload *cls, |
| 62 | + struct ch_filter_specification *fs) |
| 63 | +{ |
| 64 | + u16 addr_type = 0; |
| 65 | + |
| 66 | + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { |
| 67 | + struct flow_dissector_key_control *key = |
| 68 | + skb_flow_dissector_target(cls->dissector, |
| 69 | + FLOW_DISSECTOR_KEY_CONTROL, |
| 70 | + cls->key); |
| 71 | + |
| 72 | + addr_type = key->addr_type; |
| 73 | + } |
| 74 | + |
| 75 | + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
| 76 | + struct flow_dissector_key_basic *key = |
| 77 | + skb_flow_dissector_target(cls->dissector, |
| 78 | + FLOW_DISSECTOR_KEY_BASIC, |
| 79 | + cls->key); |
| 80 | + struct flow_dissector_key_basic *mask = |
| 81 | + skb_flow_dissector_target(cls->dissector, |
| 82 | + FLOW_DISSECTOR_KEY_BASIC, |
| 83 | + cls->mask); |
| 84 | + u16 ethtype_key = ntohs(key->n_proto); |
| 85 | + u16 ethtype_mask = ntohs(mask->n_proto); |
| 86 | + |
| 87 | + if (ethtype_key == ETH_P_ALL) { |
| 88 | + ethtype_key = 0; |
| 89 | + ethtype_mask = 0; |
| 90 | + } |
| 91 | + |
| 92 | + fs->val.ethtype = ethtype_key; |
| 93 | + fs->mask.ethtype = ethtype_mask; |
| 94 | + fs->val.proto = key->ip_proto; |
| 95 | + fs->mask.proto = mask->ip_proto; |
| 96 | + } |
| 97 | + |
| 98 | + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
| 99 | + struct flow_dissector_key_ipv4_addrs *key = |
| 100 | + skb_flow_dissector_target(cls->dissector, |
| 101 | + FLOW_DISSECTOR_KEY_IPV4_ADDRS, |
| 102 | + cls->key); |
| 103 | + struct flow_dissector_key_ipv4_addrs *mask = |
| 104 | + skb_flow_dissector_target(cls->dissector, |
| 105 | + FLOW_DISSECTOR_KEY_IPV4_ADDRS, |
| 106 | + cls->mask); |
| 107 | + fs->type = 0; |
| 108 | + memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); |
| 109 | + memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); |
| 110 | + memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); |
| 111 | + memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); |
| 112 | + } |
| 113 | + |
| 114 | + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
| 115 | + struct flow_dissector_key_ipv6_addrs *key = |
| 116 | + skb_flow_dissector_target(cls->dissector, |
| 117 | + FLOW_DISSECTOR_KEY_IPV6_ADDRS, |
| 118 | + cls->key); |
| 119 | + struct flow_dissector_key_ipv6_addrs *mask = |
| 120 | + skb_flow_dissector_target(cls->dissector, |
| 121 | + FLOW_DISSECTOR_KEY_IPV6_ADDRS, |
| 122 | + cls->mask); |
| 123 | + |
| 124 | + fs->type = 1; |
| 125 | + memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); |
| 126 | + memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); |
| 127 | + memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); |
| 128 | + memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); |
| 129 | + } |
| 130 | + |
| 131 | + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { |
| 132 | + struct flow_dissector_key_ports *key, *mask; |
| 133 | + |
| 134 | + key = skb_flow_dissector_target(cls->dissector, |
| 135 | + FLOW_DISSECTOR_KEY_PORTS, |
| 136 | + cls->key); |
| 137 | + mask = skb_flow_dissector_target(cls->dissector, |
| 138 | + FLOW_DISSECTOR_KEY_PORTS, |
| 139 | + cls->mask); |
| 140 | + fs->val.lport = cpu_to_be16(key->dst); |
| 141 | + fs->mask.lport = cpu_to_be16(mask->dst); |
| 142 | + fs->val.fport = cpu_to_be16(key->src); |
| 143 | + fs->mask.fport = cpu_to_be16(mask->src); |
| 144 | + } |
| 145 | + |
| 146 | + /* Match only packets coming from the ingress port where this |
| 147 | + * filter will be created. |
| 148 | + */ |
| 149 | + fs->val.iport = netdev2pinfo(dev)->port_id; |
| 150 | + fs->mask.iport = ~0; |
| 151 | +} |
| 152 | + |
| 153 | +static int cxgb4_validate_flow_match(struct net_device *dev, |
| 154 | + struct tc_cls_flower_offload *cls) |
| 155 | +{ |
| 156 | + if (cls->dissector->used_keys & |
| 157 | + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | |
| 158 | + BIT(FLOW_DISSECTOR_KEY_BASIC) | |
| 159 | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | |
| 160 | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | |
| 161 | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { |
| 162 | + netdev_warn(dev, "Unsupported key used: 0x%x\n", |
| 163 | + cls->dissector->used_keys); |
| 164 | + return -EOPNOTSUPP; |
| 165 | + } |
| 166 | + return 0; |
| 167 | +} |
| 168 | + |
| 169 | +static void cxgb4_process_flow_actions(struct net_device *in, |
| 170 | + struct tc_cls_flower_offload *cls, |
| 171 | + struct ch_filter_specification *fs) |
| 172 | +{ |
| 173 | + const struct tc_action *a; |
| 174 | + LIST_HEAD(actions); |
| 175 | + |
| 176 | + tcf_exts_to_list(cls->exts, &actions); |
| 177 | + list_for_each_entry(a, &actions, list) { |
| 178 | + if (is_tcf_gact_shot(a)) { |
| 179 | + fs->action = FILTER_DROP; |
| 180 | + } else if (is_tcf_mirred_egress_redirect(a)) { |
| 181 | + int ifindex = tcf_mirred_ifindex(a); |
| 182 | + struct net_device *out = __dev_get_by_index(dev_net(in), |
| 183 | + ifindex); |
| 184 | + struct port_info *pi = netdev_priv(out); |
| 185 | + |
| 186 | + fs->action = FILTER_SWITCH; |
| 187 | + fs->eport = pi->port_id; |
| 188 | + } |
| 189 | + } |
| 190 | +} |
| 191 | + |
| 192 | +static int cxgb4_validate_flow_actions(struct net_device *dev, |
| 193 | + struct tc_cls_flower_offload *cls) |
| 194 | +{ |
| 195 | + const struct tc_action *a; |
| 196 | + LIST_HEAD(actions); |
| 197 | + |
| 198 | + tcf_exts_to_list(cls->exts, &actions); |
| 199 | + list_for_each_entry(a, &actions, list) { |
| 200 | + if (is_tcf_gact_shot(a)) { |
| 201 | + /* Do nothing */ |
| 202 | + } else if (is_tcf_mirred_egress_redirect(a)) { |
| 203 | + struct adapter *adap = netdev2adap(dev); |
| 204 | + struct net_device *n_dev; |
| 205 | + unsigned int i, ifindex; |
| 206 | + bool found = false; |
| 207 | + |
| 208 | + ifindex = tcf_mirred_ifindex(a); |
| 209 | + for_each_port(adap, i) { |
| 210 | + n_dev = adap->port[i]; |
| 211 | + if (ifindex == n_dev->ifindex) { |
| 212 | + found = true; |
| 213 | + break; |
| 214 | + } |
| 215 | + } |
| 216 | + |
| 217 | + /* If interface doesn't belong to our hw, then |
| 218 | + * the provided output port is not valid |
| 219 | + */ |
| 220 | + if (!found) { |
| 221 | + netdev_err(dev, "%s: Out port invalid\n", |
| 222 | + __func__); |
| 223 | + return -EINVAL; |
| 224 | + } |
| 225 | + } else { |
| 226 | + netdev_err(dev, "%s: Unsupported action\n", __func__); |
| 227 | + return -EOPNOTSUPP; |
| 228 | + } |
| 229 | + } |
| 230 | + return 0; |
| 231 | +} |
| 232 | + |
41 | 233 | int cxgb4_tc_flower_replace(struct net_device *dev,
|
42 | 234 | struct tc_cls_flower_offload *cls)
|
43 | 235 | {
|
44 |
| - return -EOPNOTSUPP; |
| 236 | + struct adapter *adap = netdev2adap(dev); |
| 237 | + struct ch_tc_flower_entry *ch_flower; |
| 238 | + struct ch_filter_specification *fs; |
| 239 | + struct filter_ctx ctx; |
| 240 | + int fidx; |
| 241 | + int ret; |
| 242 | + |
| 243 | + if (cxgb4_validate_flow_actions(dev, cls)) |
| 244 | + return -EOPNOTSUPP; |
| 245 | + |
| 246 | + if (cxgb4_validate_flow_match(dev, cls)) |
| 247 | + return -EOPNOTSUPP; |
| 248 | + |
| 249 | + ch_flower = allocate_flower_entry(); |
| 250 | + if (!ch_flower) { |
| 251 | + netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); |
| 252 | + return -ENOMEM; |
| 253 | + } |
| 254 | + |
| 255 | + fs = &ch_flower->fs; |
| 256 | + fs->hitcnts = 1; |
| 257 | + cxgb4_process_flow_actions(dev, cls, fs); |
| 258 | + cxgb4_process_flow_match(dev, cls, fs); |
| 259 | + |
| 260 | + fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); |
| 261 | + if (fidx < 0) { |
| 262 | + netdev_err(dev, "%s: No fidx for offload.\n", __func__); |
| 263 | + ret = -ENOMEM; |
| 264 | + goto free_entry; |
| 265 | + } |
| 266 | + |
| 267 | + init_completion(&ctx.completion); |
| 268 | + ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); |
| 269 | + if (ret) { |
| 270 | + netdev_err(dev, "%s: filter creation err %d\n", |
| 271 | + __func__, ret); |
| 272 | + goto free_entry; |
| 273 | + } |
| 274 | + |
| 275 | + /* Wait for reply */ |
| 276 | + ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); |
| 277 | + if (!ret) { |
| 278 | + ret = -ETIMEDOUT; |
| 279 | + goto free_entry; |
| 280 | + } |
| 281 | + |
| 282 | + ret = ctx.result; |
| 283 | + /* Check if hw returned error for filter creation */ |
| 284 | + if (ret) { |
| 285 | + netdev_err(dev, "%s: filter creation err %d\n", |
| 286 | + __func__, ret); |
| 287 | + goto free_entry; |
| 288 | + } |
| 289 | + |
| 290 | + INIT_HLIST_NODE(&ch_flower->link); |
| 291 | + ch_flower->tc_flower_cookie = cls->cookie; |
| 292 | + ch_flower->filter_id = ctx.tid; |
| 293 | + hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie); |
| 294 | + |
| 295 | + return ret; |
| 296 | + |
| 297 | +free_entry: |
| 298 | + kfree(ch_flower); |
| 299 | + return ret; |
45 | 300 | }
|
46 | 301 |
|
47 | 302 | int cxgb4_tc_flower_destroy(struct net_device *dev,
|
48 | 303 | struct tc_cls_flower_offload *cls)
|
49 | 304 | {
|
50 |
| - return -EOPNOTSUPP; |
| 305 | + struct adapter *adap = netdev2adap(dev); |
| 306 | + struct ch_tc_flower_entry *ch_flower; |
| 307 | + int ret; |
| 308 | + |
| 309 | + ch_flower = ch_flower_lookup(adap, cls->cookie); |
| 310 | + if (!ch_flower) |
| 311 | + return -ENOENT; |
| 312 | + |
| 313 | + ret = cxgb4_del_filter(dev, ch_flower->filter_id); |
| 314 | + if (ret) |
| 315 | + goto err; |
| 316 | + |
| 317 | + hash_del_rcu(&ch_flower->link); |
| 318 | + kfree_rcu(ch_flower, rcu); |
| 319 | + |
| 320 | +err: |
| 321 | + return ret; |
51 | 322 | }
|
52 | 323 |
|
53 | 324 | int cxgb4_tc_flower_stats(struct net_device *dev,
|
54 | 325 | struct tc_cls_flower_offload *cls)
|
55 | 326 | {
|
56 | 327 | return -EOPNOTSUPP;
|
57 | 328 | }
|
| 329 | + |
| 330 | +void cxgb4_init_tc_flower(struct adapter *adap) |
| 331 | +{ |
| 332 | + hash_init(adap->flower_anymatch_tbl); |
| 333 | +} |
0 commit comments