@@ -152,7 +152,7 @@ void rpcrdma_event_process(struct ib_wc *wc)
152
152
dprintk ("RPC: %s: event rep %p status %X opcode %X length %u\n" ,
153
153
__func__ , rep , wc -> status , wc -> opcode , wc -> byte_len );
154
154
155
- if (!rep ) /* send or bind completion that we don't care about */
155
+ if (!rep ) /* send completion that we don't care about */
156
156
return ;
157
157
158
158
if (IB_WC_SUCCESS != wc -> status ) {
@@ -197,8 +197,6 @@ void rpcrdma_event_process(struct ib_wc *wc)
197
197
}
198
198
atomic_set (& rep -> rr_buffer -> rb_credits , credits );
199
199
}
200
- /* fall through */
201
- case IB_WC_BIND_MW :
202
200
rpcrdma_schedule_tasklet (rep );
203
201
break ;
204
202
default :
@@ -233,7 +231,7 @@ rpcrdma_cq_poll(struct ib_cq *cq)
233
231
/*
234
232
* rpcrdma_cq_event_upcall
235
233
*
236
- * This upcall handles recv, send, bind and unbind events.
234
+ * This upcall handles recv and send events.
237
235
* It is reentrant but processes single events in order to maintain
238
236
* ordering of receives to keep server credits.
239
237
*
@@ -494,16 +492,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
494
492
}
495
493
496
494
switch (memreg ) {
497
- case RPCRDMA_MEMWINDOWS :
498
- case RPCRDMA_MEMWINDOWS_ASYNC :
499
- if (!(devattr .device_cap_flags & IB_DEVICE_MEM_WINDOW )) {
500
- dprintk ("RPC: %s: MEMWINDOWS registration "
501
- "specified but not supported by adapter, "
502
- "using slower RPCRDMA_REGISTER\n" ,
503
- __func__ );
504
- memreg = RPCRDMA_REGISTER ;
505
- }
506
- break ;
507
495
case RPCRDMA_MTHCAFMR :
508
496
if (!ia -> ri_id -> device -> alloc_fmr ) {
509
497
#if RPCRDMA_PERSISTENT_REGISTRATION
@@ -567,16 +555,13 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
567
555
IB_ACCESS_REMOTE_READ ;
568
556
goto register_setup ;
569
557
#endif
570
- case RPCRDMA_MEMWINDOWS_ASYNC :
571
- case RPCRDMA_MEMWINDOWS :
572
- mem_priv = IB_ACCESS_LOCAL_WRITE |
573
- IB_ACCESS_MW_BIND ;
574
- goto register_setup ;
575
558
case RPCRDMA_MTHCAFMR :
576
559
if (ia -> ri_have_dma_lkey )
577
560
break ;
578
561
mem_priv = IB_ACCESS_LOCAL_WRITE ;
562
+ #if RPCRDMA_PERSISTENT_REGISTRATION
579
563
register_setup :
564
+ #endif
580
565
ia -> ri_bind_mem = ib_get_dma_mr (ia -> ri_pd , mem_priv );
581
566
if (IS_ERR (ia -> ri_bind_mem )) {
582
567
printk (KERN_ALERT "%s: ib_get_dma_mr for "
@@ -699,14 +684,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
699
684
}
700
685
break ;
701
686
}
702
- case RPCRDMA_MEMWINDOWS_ASYNC :
703
- case RPCRDMA_MEMWINDOWS :
704
- /* Add room for mw_binds+unbinds - overkill! */
705
- ep -> rep_attr .cap .max_send_wr ++ ;
706
- ep -> rep_attr .cap .max_send_wr *= (2 * RPCRDMA_MAX_SEGS );
707
- if (ep -> rep_attr .cap .max_send_wr > devattr .max_qp_wr )
708
- return - EINVAL ;
709
- break ;
710
687
default :
711
688
break ;
712
689
}
@@ -728,26 +705,13 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
728
705
729
706
/* set trigger for requesting send completion */
730
707
ep -> rep_cqinit = ep -> rep_attr .cap .max_send_wr /2 /* - 1*/ ;
731
- switch (ia -> ri_memreg_strategy ) {
732
- case RPCRDMA_MEMWINDOWS_ASYNC :
733
- case RPCRDMA_MEMWINDOWS :
734
- ep -> rep_cqinit -= RPCRDMA_MAX_SEGS ;
735
- break ;
736
- default :
737
- break ;
738
- }
739
708
if (ep -> rep_cqinit <= 2 )
740
709
ep -> rep_cqinit = 0 ;
741
710
INIT_CQCOUNT (ep );
742
711
ep -> rep_ia = ia ;
743
712
init_waitqueue_head (& ep -> rep_connect_wait );
744
713
INIT_DELAYED_WORK (& ep -> rep_connect_worker , rpcrdma_connect_worker );
745
714
746
- /*
747
- * Create a single cq for receive dto and mw_bind (only ever
748
- * care about unbind, really). Send completions are suppressed.
749
- * Use single threaded tasklet upcalls to maintain ordering.
750
- */
751
715
ep -> rep_cq = ib_create_cq (ia -> ri_id -> device , rpcrdma_cq_event_upcall ,
752
716
rpcrdma_cq_async_error_upcall , NULL ,
753
717
ep -> rep_attr .cap .max_recv_wr +
@@ -1020,11 +984,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
1020
984
len += (buf -> rb_max_requests + 1 ) * RPCRDMA_MAX_SEGS *
1021
985
sizeof (struct rpcrdma_mw );
1022
986
break ;
1023
- case RPCRDMA_MEMWINDOWS_ASYNC :
1024
- case RPCRDMA_MEMWINDOWS :
1025
- len += (buf -> rb_max_requests + 1 ) * RPCRDMA_MAX_SEGS *
1026
- sizeof (struct rpcrdma_mw );
1027
- break ;
1028
987
default :
1029
988
break ;
1030
989
}
@@ -1055,11 +1014,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
1055
1014
}
1056
1015
p += cdata -> padding ;
1057
1016
1058
- /*
1059
- * Allocate the fmr's, or mw's for mw_bind chunk registration.
1060
- * We "cycle" the mw's in order to minimize rkey reuse,
1061
- * and also reduce unbind-to-bind collision.
1062
- */
1063
1017
INIT_LIST_HEAD (& buf -> rb_mws );
1064
1018
r = (struct rpcrdma_mw * )p ;
1065
1019
switch (ia -> ri_memreg_strategy ) {
@@ -1107,21 +1061,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
1107
1061
++ r ;
1108
1062
}
1109
1063
break ;
1110
- case RPCRDMA_MEMWINDOWS_ASYNC :
1111
- case RPCRDMA_MEMWINDOWS :
1112
- /* Allocate one extra request's worth, for full cycling */
1113
- for (i = (buf -> rb_max_requests + 1 ) * RPCRDMA_MAX_SEGS ; i ; i -- ) {
1114
- r -> r .mw = ib_alloc_mw (ia -> ri_pd , IB_MW_TYPE_1 );
1115
- if (IS_ERR (r -> r .mw )) {
1116
- rc = PTR_ERR (r -> r .mw );
1117
- dprintk ("RPC: %s: ib_alloc_mw"
1118
- " failed %i\n" , __func__ , rc );
1119
- goto out ;
1120
- }
1121
- list_add (& r -> mw_list , & buf -> rb_mws );
1122
- ++ r ;
1123
- }
1124
- break ;
1125
1064
default :
1126
1065
break ;
1127
1066
}
@@ -1170,7 +1109,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
1170
1109
memset (rep , 0 , sizeof (struct rpcrdma_rep ));
1171
1110
buf -> rb_recv_bufs [i ] = rep ;
1172
1111
buf -> rb_recv_bufs [i ]-> rr_buffer = buf ;
1173
- init_waitqueue_head (& rep -> rr_unbind );
1174
1112
1175
1113
rc = rpcrdma_register_internal (ia , rep -> rr_base ,
1176
1114
len - offsetof(struct rpcrdma_rep , rr_base ),
@@ -1204,7 +1142,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1204
1142
1205
1143
/* clean up in reverse order from create
1206
1144
* 1. recv mr memory (mr free, then kfree)
1207
- * 1a. bind mw memory
1208
1145
* 2. send mr memory (mr free, then kfree)
1209
1146
* 3. padding (if any) [moved to rpcrdma_ep_destroy]
1210
1147
* 4. arrays
@@ -1248,15 +1185,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1248
1185
" failed %i\n" ,
1249
1186
__func__ , rc );
1250
1187
break ;
1251
- case RPCRDMA_MEMWINDOWS_ASYNC :
1252
- case RPCRDMA_MEMWINDOWS :
1253
- rc = ib_dealloc_mw (r -> r .mw );
1254
- if (rc )
1255
- dprintk ("RPC: %s:"
1256
- " ib_dealloc_mw"
1257
- " failed %i\n" ,
1258
- __func__ , rc );
1259
- break ;
1260
1188
default :
1261
1189
break ;
1262
1190
}
@@ -1331,15 +1259,12 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
1331
1259
req -> rl_niovs = 0 ;
1332
1260
if (req -> rl_reply ) {
1333
1261
buffers -> rb_recv_bufs [-- buffers -> rb_recv_index ] = req -> rl_reply ;
1334
- init_waitqueue_head (& req -> rl_reply -> rr_unbind );
1335
1262
req -> rl_reply -> rr_func = NULL ;
1336
1263
req -> rl_reply = NULL ;
1337
1264
}
1338
1265
switch (ia -> ri_memreg_strategy ) {
1339
1266
case RPCRDMA_FRMR :
1340
1267
case RPCRDMA_MTHCAFMR :
1341
- case RPCRDMA_MEMWINDOWS_ASYNC :
1342
- case RPCRDMA_MEMWINDOWS :
1343
1268
/*
1344
1269
* Cycle mw's back in reverse order, and "spin" them.
1345
1270
* This delays and scrambles reuse as much as possible.
@@ -1384,8 +1309,7 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1384
1309
1385
1310
/*
1386
1311
* Put reply buffers back into pool when not attached to
1387
- * request. This happens in error conditions, and when
1388
- * aborting unbinds. Pre-decrement counter/array index.
1312
+ * request. This happens in error conditions.
1389
1313
*/
1390
1314
void
1391
1315
rpcrdma_recv_buffer_put (struct rpcrdma_rep * rep )
@@ -1687,74 +1611,6 @@ rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
1687
1611
return rc ;
1688
1612
}
1689
1613
1690
- static int
1691
- rpcrdma_register_memwin_external (struct rpcrdma_mr_seg * seg ,
1692
- int * nsegs , int writing , struct rpcrdma_ia * ia ,
1693
- struct rpcrdma_xprt * r_xprt )
1694
- {
1695
- int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE :
1696
- IB_ACCESS_REMOTE_READ );
1697
- struct ib_mw_bind param ;
1698
- int rc ;
1699
-
1700
- * nsegs = 1 ;
1701
- rpcrdma_map_one (ia , seg , writing );
1702
- param .bind_info .mr = ia -> ri_bind_mem ;
1703
- param .wr_id = 0ULL ; /* no send cookie */
1704
- param .bind_info .addr = seg -> mr_dma ;
1705
- param .bind_info .length = seg -> mr_len ;
1706
- param .send_flags = 0 ;
1707
- param .bind_info .mw_access_flags = mem_priv ;
1708
-
1709
- DECR_CQCOUNT (& r_xprt -> rx_ep );
1710
- rc = ib_bind_mw (ia -> ri_id -> qp , seg -> mr_chunk .rl_mw -> r .mw , & param );
1711
- if (rc ) {
1712
- dprintk ("RPC: %s: failed ib_bind_mw "
1713
- "%u@0x%llx status %i\n" ,
1714
- __func__ , seg -> mr_len ,
1715
- (unsigned long long )seg -> mr_dma , rc );
1716
- rpcrdma_unmap_one (ia , seg );
1717
- } else {
1718
- seg -> mr_rkey = seg -> mr_chunk .rl_mw -> r .mw -> rkey ;
1719
- seg -> mr_base = param .bind_info .addr ;
1720
- seg -> mr_nsegs = 1 ;
1721
- }
1722
- return rc ;
1723
- }
1724
-
1725
- static int
1726
- rpcrdma_deregister_memwin_external (struct rpcrdma_mr_seg * seg ,
1727
- struct rpcrdma_ia * ia ,
1728
- struct rpcrdma_xprt * r_xprt , void * * r )
1729
- {
1730
- struct ib_mw_bind param ;
1731
- LIST_HEAD (l );
1732
- int rc ;
1733
-
1734
- BUG_ON (seg -> mr_nsegs != 1 );
1735
- param .bind_info .mr = ia -> ri_bind_mem ;
1736
- param .bind_info .addr = 0ULL ; /* unbind */
1737
- param .bind_info .length = 0 ;
1738
- param .bind_info .mw_access_flags = 0 ;
1739
- if (* r ) {
1740
- param .wr_id = (u64 ) (unsigned long ) * r ;
1741
- param .send_flags = IB_SEND_SIGNALED ;
1742
- INIT_CQCOUNT (& r_xprt -> rx_ep );
1743
- } else {
1744
- param .wr_id = 0ULL ;
1745
- param .send_flags = 0 ;
1746
- DECR_CQCOUNT (& r_xprt -> rx_ep );
1747
- }
1748
- rc = ib_bind_mw (ia -> ri_id -> qp , seg -> mr_chunk .rl_mw -> r .mw , & param );
1749
- rpcrdma_unmap_one (ia , seg );
1750
- if (rc )
1751
- dprintk ("RPC: %s: failed ib_(un)bind_mw,"
1752
- " status %i\n" , __func__ , rc );
1753
- else
1754
- * r = NULL ; /* will upcall on completion */
1755
- return rc ;
1756
- }
1757
-
1758
1614
static int
1759
1615
rpcrdma_register_default_external (struct rpcrdma_mr_seg * seg ,
1760
1616
int * nsegs , int writing , struct rpcrdma_ia * ia )
@@ -1845,12 +1701,6 @@ rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
1845
1701
rc = rpcrdma_register_fmr_external (seg , & nsegs , writing , ia );
1846
1702
break ;
1847
1703
1848
- /* Registration using memory windows */
1849
- case RPCRDMA_MEMWINDOWS_ASYNC :
1850
- case RPCRDMA_MEMWINDOWS :
1851
- rc = rpcrdma_register_memwin_external (seg , & nsegs , writing , ia , r_xprt );
1852
- break ;
1853
-
1854
1704
/* Default registration each time */
1855
1705
default :
1856
1706
rc = rpcrdma_register_default_external (seg , & nsegs , writing , ia );
@@ -1887,11 +1737,6 @@ rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
1887
1737
rc = rpcrdma_deregister_fmr_external (seg , ia );
1888
1738
break ;
1889
1739
1890
- case RPCRDMA_MEMWINDOWS_ASYNC :
1891
- case RPCRDMA_MEMWINDOWS :
1892
- rc = rpcrdma_deregister_memwin_external (seg , ia , r_xprt , & r );
1893
- break ;
1894
-
1895
1740
default :
1896
1741
rc = rpcrdma_deregister_default_external (seg , ia );
1897
1742
break ;
0 commit comments