@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
468
468
memset (& dst_mac [ETH_ALEN ], 0 , 2 );
469
469
}
470
470
471
+
472
+ static int mlx4_en_tunnel_steer_add (struct mlx4_en_priv * priv , unsigned char * addr ,
473
+ int qpn , u64 * reg_id )
474
+ {
475
+ int err ;
476
+ struct mlx4_spec_list spec_eth_outer = { {NULL } };
477
+ struct mlx4_spec_list spec_vxlan = { {NULL } };
478
+ struct mlx4_spec_list spec_eth_inner = { {NULL } };
479
+
480
+ struct mlx4_net_trans_rule rule = {
481
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO ,
482
+ .exclusive = 0 ,
483
+ .allow_loopback = 1 ,
484
+ .promisc_mode = MLX4_FS_REGULAR ,
485
+ .priority = MLX4_DOMAIN_NIC ,
486
+ };
487
+
488
+ __be64 mac_mask = cpu_to_be64 (MLX4_MAC_MASK << 16 );
489
+
490
+ if (priv -> mdev -> dev -> caps .tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN )
491
+ return 0 ; /* do nothing */
492
+
493
+ rule .port = priv -> port ;
494
+ rule .qpn = qpn ;
495
+ INIT_LIST_HEAD (& rule .list );
496
+
497
+ spec_eth_outer .id = MLX4_NET_TRANS_RULE_ID_ETH ;
498
+ memcpy (spec_eth_outer .eth .dst_mac , addr , ETH_ALEN );
499
+ memcpy (spec_eth_outer .eth .dst_mac_msk , & mac_mask , ETH_ALEN );
500
+
501
+ spec_vxlan .id = MLX4_NET_TRANS_RULE_ID_VXLAN ; /* any vxlan header */
502
+ spec_eth_inner .id = MLX4_NET_TRANS_RULE_ID_ETH ; /* any inner eth header */
503
+
504
+ list_add_tail (& spec_eth_outer .list , & rule .list );
505
+ list_add_tail (& spec_vxlan .list , & rule .list );
506
+ list_add_tail (& spec_eth_inner .list , & rule .list );
507
+
508
+ err = mlx4_flow_attach (priv -> mdev -> dev , & rule , reg_id );
509
+ if (err ) {
510
+ en_err (priv , "failed to add vxlan steering rule, err %d\n" , err );
511
+ return err ;
512
+ }
513
+ en_dbg (DRV , priv , "added vxlan steering rule, mac %pM reg_id %llx\n" , addr , * reg_id );
514
+ return 0 ;
515
+ }
516
+
517
+
471
518
static int mlx4_en_uc_steer_add (struct mlx4_en_priv * priv ,
472
519
unsigned char * mac , int * qpn , u64 * reg_id )
473
520
{
@@ -585,6 +632,10 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
585
632
if (err )
586
633
goto steer_err ;
587
634
635
+ if (mlx4_en_tunnel_steer_add (priv , priv -> dev -> dev_addr , * qpn ,
636
+ & priv -> tunnel_reg_id ))
637
+ goto tunnel_err ;
638
+
588
639
entry = kmalloc (sizeof (* entry ), GFP_KERNEL );
589
640
if (!entry ) {
590
641
err = - ENOMEM ;
@@ -599,6 +650,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
599
650
return 0 ;
600
651
601
652
alloc_err :
653
+ if (priv -> tunnel_reg_id )
654
+ mlx4_flow_detach (priv -> mdev -> dev , priv -> tunnel_reg_id );
655
+ tunnel_err :
602
656
mlx4_en_uc_steer_release (priv , priv -> dev -> dev_addr , * qpn , reg_id );
603
657
604
658
steer_err :
@@ -642,6 +696,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
642
696
}
643
697
}
644
698
699
+ if (priv -> tunnel_reg_id ) {
700
+ mlx4_flow_detach (priv -> mdev -> dev , priv -> tunnel_reg_id );
701
+ priv -> tunnel_reg_id = 0 ;
702
+ }
703
+
645
704
en_dbg (DRV , priv , "Releasing qp: port %d, qpn %d\n" ,
646
705
priv -> port , qpn );
647
706
mlx4_qp_release_range (dev , qpn , 1 );
@@ -1044,6 +1103,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1044
1103
if (err )
1045
1104
en_err (priv , "Fail to detach multicast address\n" );
1046
1105
1106
+ if (mclist -> tunnel_reg_id ) {
1107
+ err = mlx4_flow_detach (priv -> mdev -> dev , mclist -> tunnel_reg_id );
1108
+ if (err )
1109
+ en_err (priv , "Failed to detach multicast address\n" );
1110
+ }
1111
+
1047
1112
/* remove from list */
1048
1113
list_del (& mclist -> list );
1049
1114
kfree (mclist );
@@ -1061,6 +1126,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1061
1126
if (err )
1062
1127
en_err (priv , "Fail to attach multicast address\n" );
1063
1128
1129
+ err = mlx4_en_tunnel_steer_add (priv , & mc_list [10 ], priv -> base_qpn ,
1130
+ & mclist -> tunnel_reg_id );
1131
+ if (err )
1132
+ en_err (priv , "Failed to attach multicast address\n" );
1064
1133
}
1065
1134
}
1066
1135
}
@@ -1598,6 +1667,15 @@ int mlx4_en_start_port(struct net_device *dev)
1598
1667
goto tx_err ;
1599
1668
}
1600
1669
1670
+ if (mdev -> dev -> caps .tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ) {
1671
+ err = mlx4_SET_PORT_VXLAN (mdev -> dev , priv -> port , VXLAN_STEER_BY_OUTER_MAC );
1672
+ if (err ) {
1673
+ en_err (priv , "Failed setting port L2 tunnel configuration, err %d\n" ,
1674
+ err );
1675
+ goto tx_err ;
1676
+ }
1677
+ }
1678
+
1601
1679
/* Init port */
1602
1680
en_dbg (HW , priv , "Initializing port\n" );
1603
1681
err = mlx4_INIT_PORT (mdev -> dev , priv -> port );
@@ -2400,6 +2478,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2400
2478
if (mdev -> dev -> caps .steering_mode != MLX4_STEERING_MODE_A0 )
2401
2479
dev -> priv_flags |= IFF_UNICAST_FLT ;
2402
2480
2481
+ if (mdev -> dev -> caps .tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ) {
2482
+ dev -> hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2483
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL ;
2484
+ dev -> hw_features |= NETIF_F_GSO_UDP_TUNNEL ;
2485
+ dev -> features |= NETIF_F_GSO_UDP_TUNNEL ;
2486
+ }
2487
+
2403
2488
mdev -> pndev [port ] = dev ;
2404
2489
2405
2490
netif_carrier_off (dev );
@@ -2429,6 +2514,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2429
2514
goto out ;
2430
2515
}
2431
2516
2517
+ if (mdev -> dev -> caps .tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ) {
2518
+ err = mlx4_SET_PORT_VXLAN (mdev -> dev , priv -> port , VXLAN_STEER_BY_OUTER_MAC );
2519
+ if (err ) {
2520
+ en_err (priv , "Failed setting port L2 tunnel configuration, err %d\n" ,
2521
+ err );
2522
+ goto out ;
2523
+ }
2524
+ }
2525
+
2432
2526
/* Init port */
2433
2527
en_warn (priv , "Initializing port\n" );
2434
2528
err = mlx4_INIT_PORT (mdev -> dev , priv -> port );
0 commit comments