@@ -817,10 +817,25 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
817
817
return len ;
818
818
}
819
819
820
+ /*
821
+ * Return an IU and possible credit to the free pool
822
+ */
823
+ static void srp_put_tx_iu (struct srp_target_port * target , struct srp_iu * iu ,
824
+ enum srp_iu_type iu_type )
825
+ {
826
+ unsigned long flags ;
827
+
828
+ spin_lock_irqsave (target -> scsi_host -> host_lock , flags );
829
+ list_add (& iu -> list , & target -> free_tx );
830
+ if (iu_type != SRP_IU_RSP )
831
+ ++ target -> req_lim ;
832
+ spin_unlock_irqrestore (target -> scsi_host -> host_lock , flags );
833
+ }
834
+
820
835
/*
821
836
* Must be called with target->scsi_host->host_lock held to protect
822
- * req_lim and free_tx. Lock cannot be dropped between call here and
823
- * call to __srp_post_send ().
837
+ * req_lim and free_tx. If IU is not sent, it must be returned using
838
+ * srp_put_tx_iu ().
824
839
*
825
840
* Note:
826
841
* An upper limit for the number of allocated information units for each
@@ -843,26 +858,25 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
843
858
return NULL ;
844
859
845
860
/* Initiator responses to target requests do not consume credits */
846
- if (target -> req_lim <= rsv && iu_type != SRP_IU_RSP ) {
847
- ++ target -> zero_req_lim ;
848
- return NULL ;
861
+ if (iu_type != SRP_IU_RSP ) {
862
+ if (target -> req_lim <= rsv ) {
863
+ ++ target -> zero_req_lim ;
864
+ return NULL ;
865
+ }
866
+
867
+ -- target -> req_lim ;
849
868
}
850
869
851
870
iu = list_first_entry (& target -> free_tx , struct srp_iu , list );
852
- iu -> type = iu_type ;
871
+ list_del ( & iu -> list ) ;
853
872
return iu ;
854
873
}
855
874
856
- /*
857
- * Must be called with target->scsi_host->host_lock held to protect
858
- * req_lim and free_tx.
859
- */
860
- static int __srp_post_send (struct srp_target_port * target ,
861
- struct srp_iu * iu , int len )
875
+ static int srp_post_send (struct srp_target_port * target ,
876
+ struct srp_iu * iu , int len )
862
877
{
863
878
struct ib_sge list ;
864
879
struct ib_send_wr wr , * bad_wr ;
865
- int ret = 0 ;
866
880
867
881
list .addr = iu -> dma ;
868
882
list .length = len ;
@@ -875,15 +889,7 @@ static int __srp_post_send(struct srp_target_port *target,
875
889
wr .opcode = IB_WR_SEND ;
876
890
wr .send_flags = IB_SEND_SIGNALED ;
877
891
878
- ret = ib_post_send (target -> qp , & wr , & bad_wr );
879
-
880
- if (!ret ) {
881
- list_del (& iu -> list );
882
- if (iu -> type != SRP_IU_RSP )
883
- -- target -> req_lim ;
884
- }
885
-
886
- return ret ;
892
+ return ib_post_send (target -> qp , & wr , & bad_wr );
887
893
}
888
894
889
895
static int srp_post_recv (struct srp_target_port * target , struct srp_iu * iu )
@@ -953,34 +959,33 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
953
959
static int srp_response_common (struct srp_target_port * target , s32 req_delta ,
954
960
void * rsp , int len )
955
961
{
956
- struct ib_device * dev ;
962
+ struct ib_device * dev = target -> srp_host -> srp_dev -> dev ;
957
963
unsigned long flags ;
958
964
struct srp_iu * iu ;
959
- int err = 1 ;
960
-
961
- dev = target -> srp_host -> srp_dev -> dev ;
965
+ int err ;
962
966
963
967
spin_lock_irqsave (target -> scsi_host -> host_lock , flags );
964
968
target -> req_lim += req_delta ;
965
-
966
969
iu = __srp_get_tx_iu (target , SRP_IU_RSP );
970
+ spin_unlock_irqrestore (target -> scsi_host -> host_lock , flags );
971
+
967
972
if (!iu ) {
968
973
shost_printk (KERN_ERR , target -> scsi_host , PFX
969
974
"no IU available to send response\n" );
970
- goto out ;
975
+ return 1 ;
971
976
}
972
977
973
978
ib_dma_sync_single_for_cpu (dev , iu -> dma , len , DMA_TO_DEVICE );
974
979
memcpy (iu -> buf , rsp , len );
975
980
ib_dma_sync_single_for_device (dev , iu -> dma , len , DMA_TO_DEVICE );
976
981
977
- err = __srp_post_send (target , iu , len );
978
- if (err )
982
+ err = srp_post_send (target , iu , len );
983
+ if (err ) {
979
984
shost_printk (KERN_ERR , target -> scsi_host , PFX
980
985
"unable to post response: %d\n" , err );
986
+ srp_put_tx_iu (target , iu , SRP_IU_RSP );
987
+ }
981
988
982
- out :
983
- spin_unlock_irqrestore (target -> scsi_host -> host_lock , flags );
984
989
return err ;
985
990
}
986
991
@@ -1107,14 +1112,14 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1107
1112
}
1108
1113
}
1109
1114
1110
- static int srp_queuecommand_lck (struct scsi_cmnd * scmnd ,
1111
- void (* done )(struct scsi_cmnd * ))
1115
+ static int srp_queuecommand (struct Scsi_Host * shost , struct scsi_cmnd * scmnd )
1112
1116
{
1113
- struct srp_target_port * target = host_to_target (scmnd -> device -> host );
1117
+ struct srp_target_port * target = host_to_target (shost );
1114
1118
struct srp_request * req ;
1115
1119
struct srp_iu * iu ;
1116
1120
struct srp_cmd * cmd ;
1117
1121
struct ib_device * dev ;
1122
+ unsigned long flags ;
1118
1123
int len ;
1119
1124
1120
1125
if (target -> state == SRP_TARGET_CONNECTING )
@@ -1123,21 +1128,26 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1123
1128
if (target -> state == SRP_TARGET_DEAD ||
1124
1129
target -> state == SRP_TARGET_REMOVED ) {
1125
1130
scmnd -> result = DID_BAD_TARGET << 16 ;
1126
- done (scmnd );
1131
+ scmnd -> scsi_done (scmnd );
1127
1132
return 0 ;
1128
1133
}
1129
1134
1135
+ spin_lock_irqsave (shost -> host_lock , flags );
1130
1136
iu = __srp_get_tx_iu (target , SRP_IU_CMD );
1137
+ if (iu ) {
1138
+ req = list_first_entry (& target -> free_reqs , struct srp_request ,
1139
+ list );
1140
+ list_del (& req -> list );
1141
+ }
1142
+ spin_unlock_irqrestore (shost -> host_lock , flags );
1143
+
1131
1144
if (!iu )
1132
1145
goto err ;
1133
1146
1134
1147
dev = target -> srp_host -> srp_dev -> dev ;
1135
1148
ib_dma_sync_single_for_cpu (dev , iu -> dma , srp_max_iu_len ,
1136
1149
DMA_TO_DEVICE );
1137
1150
1138
- req = list_first_entry (& target -> free_reqs , struct srp_request , list );
1139
-
1140
- scmnd -> scsi_done = done ;
1141
1151
scmnd -> result = 0 ;
1142
1152
scmnd -> host_scribble = (void * ) req ;
1143
1153
@@ -1156,30 +1166,33 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1156
1166
if (len < 0 ) {
1157
1167
shost_printk (KERN_ERR , target -> scsi_host ,
1158
1168
PFX "Failed to map data\n" );
1159
- goto err ;
1169
+ goto err_iu ;
1160
1170
}
1161
1171
1162
1172
ib_dma_sync_single_for_device (dev , iu -> dma , srp_max_iu_len ,
1163
1173
DMA_TO_DEVICE );
1164
1174
1165
- if (__srp_post_send (target , iu , len )) {
1175
+ if (srp_post_send (target , iu , len )) {
1166
1176
shost_printk (KERN_ERR , target -> scsi_host , PFX "Send failed\n" );
1167
1177
goto err_unmap ;
1168
1178
}
1169
1179
1170
- list_del (& req -> list );
1171
-
1172
1180
return 0 ;
1173
1181
1174
1182
err_unmap :
1175
1183
srp_unmap_data (scmnd , target , req );
1176
1184
1185
+ err_iu :
1186
+ srp_put_tx_iu (target , iu , SRP_IU_CMD );
1187
+
1188
+ spin_lock_irqsave (shost -> host_lock , flags );
1189
+ list_add (& req -> list , & target -> free_reqs );
1190
+ spin_unlock_irqrestore (shost -> host_lock , flags );
1191
+
1177
1192
err :
1178
1193
return SCSI_MLQUEUE_HOST_BUSY ;
1179
1194
}
1180
1195
1181
- static DEF_SCSI_QCMD (srp_queuecommand )
1182
-
1183
1196
static int srp_alloc_iu_bufs (struct srp_target_port * target )
1184
1197
{
1185
1198
int i ;
@@ -1433,17 +1446,18 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1433
1446
struct srp_iu * iu ;
1434
1447
struct srp_tsk_mgmt * tsk_mgmt ;
1435
1448
1436
- spin_lock_irq (target -> scsi_host -> host_lock );
1437
-
1438
1449
if (target -> state == SRP_TARGET_DEAD ||
1439
1450
target -> state == SRP_TARGET_REMOVED )
1440
- goto out ;
1451
+ return -1 ;
1441
1452
1442
1453
init_completion (& target -> tsk_mgmt_done );
1443
1454
1455
+ spin_lock_irq (target -> scsi_host -> host_lock );
1444
1456
iu = __srp_get_tx_iu (target , SRP_IU_TSK_MGMT );
1457
+ spin_unlock_irq (target -> scsi_host -> host_lock );
1458
+
1445
1459
if (!iu )
1446
- goto out ;
1460
+ return -1 ;
1447
1461
1448
1462
ib_dma_sync_single_for_cpu (dev , iu -> dma , sizeof * tsk_mgmt ,
1449
1463
DMA_TO_DEVICE );
@@ -1458,20 +1472,16 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1458
1472
1459
1473
ib_dma_sync_single_for_device (dev , iu -> dma , sizeof * tsk_mgmt ,
1460
1474
DMA_TO_DEVICE );
1461
- if (__srp_post_send (target , iu , sizeof * tsk_mgmt ))
1462
- goto out ;
1463
-
1464
- spin_unlock_irq ( target -> scsi_host -> host_lock );
1475
+ if (srp_post_send (target , iu , sizeof * tsk_mgmt )) {
1476
+ srp_put_tx_iu ( target , iu , SRP_IU_TSK_MGMT ) ;
1477
+ return -1 ;
1478
+ }
1465
1479
1466
1480
if (!wait_for_completion_timeout (& target -> tsk_mgmt_done ,
1467
1481
msecs_to_jiffies (SRP_ABORT_TIMEOUT_MS )))
1468
1482
return -1 ;
1469
1483
1470
1484
return 0 ;
1471
-
1472
- out :
1473
- spin_unlock_irq (target -> scsi_host -> host_lock );
1474
- return -1 ;
1475
1485
}
1476
1486
1477
1487
static int srp_abort (struct scsi_cmnd * scmnd )
0 commit comments