Skip to content

Commit 76c75b2

Browse files
bvanasschedillow
authored andcommitted
IB/srp: reduce local coverage for command submission and EH
We only need locks to protect our lists and number of credits available. By pre-consuming the credit for the request, we can reduce our lock coverage to just those areas. If we don't actually send the request, we'll need to put the credit back into the pool. Signed-off-by: Bart Van Assche <bvanassche@acm.org> [ broken out and small cleanups by David Dillow ] Signed-off-by: David Dillow <dillowda@ornl.gov>
1 parent 536ae14 commit 76c75b2

File tree

2 files changed

+67
-58
lines changed

2 files changed

+67
-58
lines changed

drivers/infiniband/ulp/srp/ib_srp.c

Lines changed: 67 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -817,10 +817,25 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
817817
return len;
818818
}
819819

820+
/*
821+
* Return an IU and possible credit to the free pool
822+
*/
823+
static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
824+
enum srp_iu_type iu_type)
825+
{
826+
unsigned long flags;
827+
828+
spin_lock_irqsave(target->scsi_host->host_lock, flags);
829+
list_add(&iu->list, &target->free_tx);
830+
if (iu_type != SRP_IU_RSP)
831+
++target->req_lim;
832+
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
833+
}
834+
820835
/*
821836
* Must be called with target->scsi_host->host_lock held to protect
822-
* req_lim and free_tx. Lock cannot be dropped between call here and
823-
* call to __srp_post_send().
837+
* req_lim and free_tx. If IU is not sent, it must be returned using
838+
* srp_put_tx_iu().
824839
*
825840
* Note:
826841
* An upper limit for the number of allocated information units for each
@@ -843,26 +858,25 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
843858
return NULL;
844859

845860
/* Initiator responses to target requests do not consume credits */
846-
if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
847-
++target->zero_req_lim;
848-
return NULL;
861+
if (iu_type != SRP_IU_RSP) {
862+
if (target->req_lim <= rsv) {
863+
++target->zero_req_lim;
864+
return NULL;
865+
}
866+
867+
--target->req_lim;
849868
}
850869

851870
iu = list_first_entry(&target->free_tx, struct srp_iu, list);
852-
iu->type = iu_type;
871+
list_del(&iu->list);
853872
return iu;
854873
}
855874

856-
/*
857-
* Must be called with target->scsi_host->host_lock held to protect
858-
* req_lim and free_tx.
859-
*/
860-
static int __srp_post_send(struct srp_target_port *target,
861-
struct srp_iu *iu, int len)
875+
static int srp_post_send(struct srp_target_port *target,
876+
struct srp_iu *iu, int len)
862877
{
863878
struct ib_sge list;
864879
struct ib_send_wr wr, *bad_wr;
865-
int ret = 0;
866880

867881
list.addr = iu->dma;
868882
list.length = len;
@@ -875,15 +889,7 @@ static int __srp_post_send(struct srp_target_port *target,
875889
wr.opcode = IB_WR_SEND;
876890
wr.send_flags = IB_SEND_SIGNALED;
877891

878-
ret = ib_post_send(target->qp, &wr, &bad_wr);
879-
880-
if (!ret) {
881-
list_del(&iu->list);
882-
if (iu->type != SRP_IU_RSP)
883-
--target->req_lim;
884-
}
885-
886-
return ret;
892+
return ib_post_send(target->qp, &wr, &bad_wr);
887893
}
888894

889895
static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
@@ -953,34 +959,33 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
953959
static int srp_response_common(struct srp_target_port *target, s32 req_delta,
954960
void *rsp, int len)
955961
{
956-
struct ib_device *dev;
962+
struct ib_device *dev = target->srp_host->srp_dev->dev;
957963
unsigned long flags;
958964
struct srp_iu *iu;
959-
int err = 1;
960-
961-
dev = target->srp_host->srp_dev->dev;
965+
int err;
962966

963967
spin_lock_irqsave(target->scsi_host->host_lock, flags);
964968
target->req_lim += req_delta;
965-
966969
iu = __srp_get_tx_iu(target, SRP_IU_RSP);
970+
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
971+
967972
if (!iu) {
968973
shost_printk(KERN_ERR, target->scsi_host, PFX
969974
"no IU available to send response\n");
970-
goto out;
975+
return 1;
971976
}
972977

973978
ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
974979
memcpy(iu->buf, rsp, len);
975980
ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
976981

977-
err = __srp_post_send(target, iu, len);
978-
if (err)
982+
err = srp_post_send(target, iu, len);
983+
if (err) {
979984
shost_printk(KERN_ERR, target->scsi_host, PFX
980985
"unable to post response: %d\n", err);
986+
srp_put_tx_iu(target, iu, SRP_IU_RSP);
987+
}
981988

982-
out:
983-
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
984989
return err;
985990
}
986991

@@ -1107,14 +1112,14 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
11071112
}
11081113
}
11091114

1110-
static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1111-
void (*done)(struct scsi_cmnd *))
1115+
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
11121116
{
1113-
struct srp_target_port *target = host_to_target(scmnd->device->host);
1117+
struct srp_target_port *target = host_to_target(shost);
11141118
struct srp_request *req;
11151119
struct srp_iu *iu;
11161120
struct srp_cmd *cmd;
11171121
struct ib_device *dev;
1122+
unsigned long flags;
11181123
int len;
11191124

11201125
if (target->state == SRP_TARGET_CONNECTING)
@@ -1123,21 +1128,26 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
11231128
if (target->state == SRP_TARGET_DEAD ||
11241129
target->state == SRP_TARGET_REMOVED) {
11251130
scmnd->result = DID_BAD_TARGET << 16;
1126-
done(scmnd);
1131+
scmnd->scsi_done(scmnd);
11271132
return 0;
11281133
}
11291134

1135+
spin_lock_irqsave(shost->host_lock, flags);
11301136
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1137+
if (iu) {
1138+
req = list_first_entry(&target->free_reqs, struct srp_request,
1139+
list);
1140+
list_del(&req->list);
1141+
}
1142+
spin_unlock_irqrestore(shost->host_lock, flags);
1143+
11311144
if (!iu)
11321145
goto err;
11331146

11341147
dev = target->srp_host->srp_dev->dev;
11351148
ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
11361149
DMA_TO_DEVICE);
11371150

1138-
req = list_first_entry(&target->free_reqs, struct srp_request, list);
1139-
1140-
scmnd->scsi_done = done;
11411151
scmnd->result = 0;
11421152
scmnd->host_scribble = (void *) req;
11431153

@@ -1156,30 +1166,33 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
11561166
if (len < 0) {
11571167
shost_printk(KERN_ERR, target->scsi_host,
11581168
PFX "Failed to map data\n");
1159-
goto err;
1169+
goto err_iu;
11601170
}
11611171

11621172
ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
11631173
DMA_TO_DEVICE);
11641174

1165-
if (__srp_post_send(target, iu, len)) {
1175+
if (srp_post_send(target, iu, len)) {
11661176
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
11671177
goto err_unmap;
11681178
}
11691179

1170-
list_del(&req->list);
1171-
11721180
return 0;
11731181

11741182
err_unmap:
11751183
srp_unmap_data(scmnd, target, req);
11761184

1185+
err_iu:
1186+
srp_put_tx_iu(target, iu, SRP_IU_CMD);
1187+
1188+
spin_lock_irqsave(shost->host_lock, flags);
1189+
list_add(&req->list, &target->free_reqs);
1190+
spin_unlock_irqrestore(shost->host_lock, flags);
1191+
11771192
err:
11781193
return SCSI_MLQUEUE_HOST_BUSY;
11791194
}
11801195

1181-
static DEF_SCSI_QCMD(srp_queuecommand)
1182-
11831196
static int srp_alloc_iu_bufs(struct srp_target_port *target)
11841197
{
11851198
int i;
@@ -1433,17 +1446,18 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
14331446
struct srp_iu *iu;
14341447
struct srp_tsk_mgmt *tsk_mgmt;
14351448

1436-
spin_lock_irq(target->scsi_host->host_lock);
1437-
14381449
if (target->state == SRP_TARGET_DEAD ||
14391450
target->state == SRP_TARGET_REMOVED)
1440-
goto out;
1451+
return -1;
14411452

14421453
init_completion(&target->tsk_mgmt_done);
14431454

1455+
spin_lock_irq(target->scsi_host->host_lock);
14441456
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1457+
spin_unlock_irq(target->scsi_host->host_lock);
1458+
14451459
if (!iu)
1446-
goto out;
1460+
return -1;
14471461

14481462
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
14491463
DMA_TO_DEVICE);
@@ -1458,20 +1472,16 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
14581472

14591473
ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
14601474
DMA_TO_DEVICE);
1461-
if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1462-
goto out;
1463-
1464-
spin_unlock_irq(target->scsi_host->host_lock);
1475+
if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1476+
srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1477+
return -1;
1478+
}
14651479

14661480
if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
14671481
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
14681482
return -1;
14691483

14701484
return 0;
1471-
1472-
out:
1473-
spin_unlock_irq(target->scsi_host->host_lock);
1474-
return -1;
14751485
}
14761486

14771487
static int srp_abort(struct scsi_cmnd *scmnd)

drivers/infiniband/ulp/srp/ib_srp.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,6 @@ struct srp_iu {
168168
void *buf;
169169
size_t size;
170170
enum dma_data_direction direction;
171-
enum srp_iu_type type;
172171
};
173172

174173
#endif /* IB_SRP_H */

0 commit comments

Comments
 (0)