@@ -139,6 +139,38 @@ static void stop_ep_timer(struct iwch_ep *ep)
139
139
put_ep (& ep -> com );
140
140
}
141
141
142
+ int iwch_l2t_send (struct t3cdev * tdev , struct sk_buff * skb , struct l2t_entry * l2e )
143
+ {
144
+ int error = 0 ;
145
+ struct cxio_rdev * rdev ;
146
+
147
+ rdev = (struct cxio_rdev * )tdev -> ulp ;
148
+ if (cxio_fatal_error (rdev )) {
149
+ kfree_skb (skb );
150
+ return - EIO ;
151
+ }
152
+ error = l2t_send (tdev , skb , l2e );
153
+ if (error )
154
+ kfree_skb (skb );
155
+ return error ;
156
+ }
157
+
158
+ int iwch_cxgb3_ofld_send (struct t3cdev * tdev , struct sk_buff * skb )
159
+ {
160
+ int error = 0 ;
161
+ struct cxio_rdev * rdev ;
162
+
163
+ rdev = (struct cxio_rdev * )tdev -> ulp ;
164
+ if (cxio_fatal_error (rdev )) {
165
+ kfree_skb (skb );
166
+ return - EIO ;
167
+ }
168
+ error = cxgb3_ofld_send (tdev , skb );
169
+ if (error )
170
+ kfree_skb (skb );
171
+ return error ;
172
+ }
173
+
142
174
static void release_tid (struct t3cdev * tdev , u32 hwtid , struct sk_buff * skb )
143
175
{
144
176
struct cpl_tid_release * req ;
@@ -150,7 +182,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
150
182
req -> wr .wr_hi = htonl (V_WR_OP (FW_WROPCODE_FORWARD ));
151
183
OPCODE_TID (req ) = htonl (MK_OPCODE_TID (CPL_TID_RELEASE , hwtid ));
152
184
skb -> priority = CPL_PRIORITY_SETUP ;
153
- cxgb3_ofld_send (tdev , skb );
185
+ iwch_cxgb3_ofld_send (tdev , skb );
154
186
return ;
155
187
}
156
188
@@ -172,8 +204,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep)
172
204
req -> val = cpu_to_be64 (1 << S_TCB_RX_QUIESCE );
173
205
174
206
skb -> priority = CPL_PRIORITY_DATA ;
175
- cxgb3_ofld_send (ep -> com .tdev , skb );
176
- return 0 ;
207
+ return iwch_cxgb3_ofld_send (ep -> com .tdev , skb );
177
208
}
178
209
179
210
int iwch_resume_tid (struct iwch_ep * ep )
@@ -194,8 +225,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
194
225
req -> val = 0 ;
195
226
196
227
skb -> priority = CPL_PRIORITY_DATA ;
197
- cxgb3_ofld_send (ep -> com .tdev , skb );
198
- return 0 ;
228
+ return iwch_cxgb3_ofld_send (ep -> com .tdev , skb );
199
229
}
200
230
201
231
static void set_emss (struct iwch_ep * ep , u16 opt )
@@ -382,7 +412,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
382
412
383
413
PDBG ("%s t3cdev %p\n" , __func__ , dev );
384
414
req -> cmd = CPL_ABORT_NO_RST ;
385
- cxgb3_ofld_send (dev , skb );
415
+ iwch_cxgb3_ofld_send (dev , skb );
386
416
}
387
417
388
418
static int send_halfclose (struct iwch_ep * ep , gfp_t gfp )
@@ -402,8 +432,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
402
432
req -> wr .wr_hi = htonl (V_WR_OP (FW_WROPCODE_OFLD_CLOSE_CON ));
403
433
req -> wr .wr_lo = htonl (V_WR_TID (ep -> hwtid ));
404
434
OPCODE_TID (req ) = htonl (MK_OPCODE_TID (CPL_CLOSE_CON_REQ , ep -> hwtid ));
405
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
406
- return 0 ;
435
+ return iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
407
436
}
408
437
409
438
static int send_abort (struct iwch_ep * ep , struct sk_buff * skb , gfp_t gfp )
@@ -424,8 +453,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
424
453
req -> wr .wr_lo = htonl (V_WR_TID (ep -> hwtid ));
425
454
OPCODE_TID (req ) = htonl (MK_OPCODE_TID (CPL_ABORT_REQ , ep -> hwtid ));
426
455
req -> cmd = CPL_ABORT_SEND_RST ;
427
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
428
- return 0 ;
456
+ return iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
429
457
}
430
458
431
459
static int send_connect (struct iwch_ep * ep )
@@ -469,8 +497,7 @@ static int send_connect(struct iwch_ep *ep)
469
497
req -> opt0l = htonl (opt0l );
470
498
req -> params = 0 ;
471
499
req -> opt2 = htonl (opt2 );
472
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
473
- return 0 ;
500
+ return iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
474
501
}
475
502
476
503
static void send_mpa_req (struct iwch_ep * ep , struct sk_buff * skb )
@@ -527,7 +554,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
527
554
req -> sndseq = htonl (ep -> snd_seq );
528
555
BUG_ON (ep -> mpa_skb );
529
556
ep -> mpa_skb = skb ;
530
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
557
+ iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
531
558
start_ep_timer (ep );
532
559
state_set (& ep -> com , MPA_REQ_SENT );
533
560
return ;
@@ -578,8 +605,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
578
605
req -> sndseq = htonl (ep -> snd_seq );
579
606
BUG_ON (ep -> mpa_skb );
580
607
ep -> mpa_skb = skb ;
581
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
582
- return 0 ;
608
+ return iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
583
609
}
584
610
585
611
static int send_mpa_reply (struct iwch_ep * ep , const void * pdata , u8 plen )
@@ -630,8 +656,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
630
656
req -> sndseq = htonl (ep -> snd_seq );
631
657
ep -> mpa_skb = skb ;
632
658
state_set (& ep -> com , MPA_REP_SENT );
633
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
634
- return 0 ;
659
+ return iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
635
660
}
636
661
637
662
static int act_establish (struct t3cdev * tdev , struct sk_buff * skb , void * ctx )
@@ -795,7 +820,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
795
820
OPCODE_TID (req ) = htonl (MK_OPCODE_TID (CPL_RX_DATA_ACK , ep -> hwtid ));
796
821
req -> credit_dack = htonl (V_RX_CREDITS (credits ) | V_RX_FORCE_ACK (1 ));
797
822
skb -> priority = CPL_PRIORITY_ACK ;
798
- cxgb3_ofld_send (ep -> com .tdev , skb );
823
+ iwch_cxgb3_ofld_send (ep -> com .tdev , skb );
799
824
return credits ;
800
825
}
801
826
@@ -1203,8 +1228,7 @@ static int listen_start(struct iwch_listen_ep *ep)
1203
1228
req -> opt1 = htonl (V_CONN_POLICY (CPL_CONN_POLICY_ASK ));
1204
1229
1205
1230
skb -> priority = 1 ;
1206
- cxgb3_ofld_send (ep -> com .tdev , skb );
1207
- return 0 ;
1231
+ return iwch_cxgb3_ofld_send (ep -> com .tdev , skb );
1208
1232
}
1209
1233
1210
1234
static int pass_open_rpl (struct t3cdev * tdev , struct sk_buff * skb , void * ctx )
@@ -1237,8 +1261,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
1237
1261
req -> cpu_idx = 0 ;
1238
1262
OPCODE_TID (req ) = htonl (MK_OPCODE_TID (CPL_CLOSE_LISTSRV_REQ , ep -> stid ));
1239
1263
skb -> priority = 1 ;
1240
- cxgb3_ofld_send (ep -> com .tdev , skb );
1241
- return 0 ;
1264
+ return iwch_cxgb3_ofld_send (ep -> com .tdev , skb );
1242
1265
}
1243
1266
1244
1267
static int close_listsrv_rpl (struct t3cdev * tdev , struct sk_buff * skb ,
@@ -1286,7 +1309,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1286
1309
rpl -> opt2 = htonl (opt2 );
1287
1310
rpl -> rsvd = rpl -> opt2 ; /* workaround for HW bug */
1288
1311
skb -> priority = CPL_PRIORITY_SETUP ;
1289
- l2t_send (ep -> com .tdev , skb , ep -> l2t );
1312
+ iwch_l2t_send (ep -> com .tdev , skb , ep -> l2t );
1290
1313
1291
1314
return ;
1292
1315
}
@@ -1315,7 +1338,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1315
1338
rpl -> opt0l_status = htonl (CPL_PASS_OPEN_REJECT );
1316
1339
rpl -> opt2 = 0 ;
1317
1340
rpl -> rsvd = rpl -> opt2 ;
1318
- cxgb3_ofld_send (tdev , skb );
1341
+ iwch_cxgb3_ofld_send (tdev , skb );
1319
1342
}
1320
1343
}
1321
1344
@@ -1613,7 +1636,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1613
1636
rpl -> wr .wr_lo = htonl (V_WR_TID (ep -> hwtid ));
1614
1637
OPCODE_TID (rpl ) = htonl (MK_OPCODE_TID (CPL_ABORT_RPL , ep -> hwtid ));
1615
1638
rpl -> cmd = CPL_ABORT_NO_RST ;
1616
- cxgb3_ofld_send (ep -> com .tdev , rpl_skb );
1639
+ iwch_cxgb3_ofld_send (ep -> com .tdev , rpl_skb );
1617
1640
out :
1618
1641
if (release )
1619
1642
release_ep_resources (ep );
@@ -2017,8 +2040,11 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
2017
2040
ep -> com .rpl_done = 0 ;
2018
2041
ep -> com .rpl_err = 0 ;
2019
2042
err = listen_stop (ep );
2043
+ if (err )
2044
+ goto done ;
2020
2045
wait_event (ep -> com .waitq , ep -> com .rpl_done );
2021
2046
cxgb3_free_stid (ep -> com .tdev , ep -> stid );
2047
+ done :
2022
2048
err = ep -> com .rpl_err ;
2023
2049
cm_id -> rem_ref (cm_id );
2024
2050
put_ep (& ep -> com );
@@ -2030,12 +2056,22 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2030
2056
int ret = 0 ;
2031
2057
unsigned long flags ;
2032
2058
int close = 0 ;
2059
+ int fatal = 0 ;
2060
+ struct t3cdev * tdev ;
2061
+ struct cxio_rdev * rdev ;
2033
2062
2034
2063
spin_lock_irqsave (& ep -> com .lock , flags );
2035
2064
2036
2065
PDBG ("%s ep %p state %s, abrupt %d\n" , __func__ , ep ,
2037
2066
states [ep -> com .state ], abrupt );
2038
2067
2068
+ tdev = (struct t3cdev * )ep -> com .tdev ;
2069
+ rdev = (struct cxio_rdev * )tdev -> ulp ;
2070
+ if (cxio_fatal_error (rdev )) {
2071
+ fatal = 1 ;
2072
+ close_complete_upcall (ep );
2073
+ ep -> com .state = DEAD ;
2074
+ }
2039
2075
switch (ep -> com .state ) {
2040
2076
case MPA_REQ_WAIT :
2041
2077
case MPA_REQ_SENT :
@@ -2075,7 +2111,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2075
2111
ret = send_abort (ep , NULL , gfp );
2076
2112
else
2077
2113
ret = send_halfclose (ep , gfp );
2114
+ if (ret )
2115
+ fatal = 1 ;
2078
2116
}
2117
+ if (fatal )
2118
+ release_ep_resources (ep );
2079
2119
return ret ;
2080
2120
}
2081
2121
0 commit comments