@@ -459,6 +459,19 @@ static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
459
459
rdma_destroy_ah_attr (ah_attr );
460
460
}
461
461
462
+ static const struct ib_gid_attr *
463
+ rdma_update_sgid_attr (struct rdma_ah_attr * ah_attr ,
464
+ const struct ib_gid_attr * old_attr )
465
+ {
466
+ if (old_attr )
467
+ rdma_put_gid_attr (old_attr );
468
+ if (ah_attr -> ah_flags & IB_AH_GRH ) {
469
+ rdma_hold_gid_attr (ah_attr -> grh .sgid_attr );
470
+ return ah_attr -> grh .sgid_attr ;
471
+ }
472
+ return NULL ;
473
+ }
474
+
462
475
static struct ib_ah * _rdma_create_ah (struct ib_pd * pd ,
463
476
struct rdma_ah_attr * ah_attr ,
464
477
struct ib_udata * udata )
@@ -472,6 +485,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
472
485
ah -> pd = pd ;
473
486
ah -> uobject = NULL ;
474
487
ah -> type = ah_attr -> type ;
488
+ ah -> sgid_attr = rdma_update_sgid_attr (ah_attr , NULL );
489
+
475
490
atomic_inc (& pd -> usecnt );
476
491
}
477
492
@@ -871,6 +886,7 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
871
886
ah -> device -> modify_ah (ah , ah_attr ) :
872
887
- EOPNOTSUPP ;
873
888
889
+ ah -> sgid_attr = rdma_update_sgid_attr (ah_attr , ah -> sgid_attr );
874
890
rdma_unfill_sgid_attr (ah_attr , old_sgid_attr );
875
891
return ret ;
876
892
}
@@ -888,13 +904,17 @@ EXPORT_SYMBOL(rdma_query_ah);
888
904
889
905
int rdma_destroy_ah (struct ib_ah * ah )
890
906
{
907
+ const struct ib_gid_attr * sgid_attr = ah -> sgid_attr ;
891
908
struct ib_pd * pd ;
892
909
int ret ;
893
910
894
911
pd = ah -> pd ;
895
912
ret = ah -> device -> destroy_ah (ah );
896
- if (!ret )
913
+ if (!ret ) {
897
914
atomic_dec (& pd -> usecnt );
915
+ if (sgid_attr )
916
+ rdma_put_gid_attr (sgid_attr );
917
+ }
898
918
899
919
return ret ;
900
920
}
@@ -1573,6 +1593,13 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1573
1593
return ret ;
1574
1594
}
1575
1595
if (attr_mask & IB_QP_ALT_PATH ) {
1596
+ /*
1597
+ * FIXME: This does not track the migration state, so if the
1598
+ * user loads a new alternate path after the HW has migrated
1599
+ * from primary->alternate we will keep the wrong
1600
+ * references. This is OK for IB because the reference
1601
+ * counting does not serve any functional purpose.
1602
+ */
1576
1603
ret = rdma_fill_sgid_attr (qp -> device , & attr -> alt_ah_attr ,
1577
1604
& old_sgid_attr_alt_av );
1578
1605
if (ret )
@@ -1606,8 +1633,17 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1606
1633
}
1607
1634
1608
1635
ret = ib_security_modify_qp (qp , attr , attr_mask , udata );
1609
- if (!ret && (attr_mask & IB_QP_PORT ))
1636
+ if (ret )
1637
+ goto out ;
1638
+
1639
+ if (attr_mask & IB_QP_PORT )
1610
1640
qp -> port = attr -> port_num ;
1641
+ if (attr_mask & IB_QP_AV )
1642
+ qp -> av_sgid_attr =
1643
+ rdma_update_sgid_attr (& attr -> ah_attr , qp -> av_sgid_attr );
1644
+ if (attr_mask & IB_QP_ALT_PATH )
1645
+ qp -> alt_path_sgid_attr = rdma_update_sgid_attr (
1646
+ & attr -> alt_ah_attr , qp -> alt_path_sgid_attr );
1611
1647
1612
1648
out :
1613
1649
if (attr_mask & IB_QP_ALT_PATH )
@@ -1765,6 +1801,8 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
1765
1801
1766
1802
int ib_destroy_qp (struct ib_qp * qp )
1767
1803
{
1804
+ const struct ib_gid_attr * alt_path_sgid_attr = qp -> alt_path_sgid_attr ;
1805
+ const struct ib_gid_attr * av_sgid_attr = qp -> av_sgid_attr ;
1768
1806
struct ib_pd * pd ;
1769
1807
struct ib_cq * scq , * rcq ;
1770
1808
struct ib_srq * srq ;
@@ -1795,6 +1833,10 @@ int ib_destroy_qp(struct ib_qp *qp)
1795
1833
rdma_restrack_del (& qp -> res );
1796
1834
ret = qp -> device -> destroy_qp (qp );
1797
1835
if (!ret ) {
1836
+ if (alt_path_sgid_attr )
1837
+ rdma_put_gid_attr (alt_path_sgid_attr );
1838
+ if (av_sgid_attr )
1839
+ rdma_put_gid_attr (av_sgid_attr );
1798
1840
if (pd )
1799
1841
atomic_dec (& pd -> usecnt );
1800
1842
if (scq )
0 commit comments