Skip to content

Commit 8ba1ddb

Browse files
Matthew R. Ochsmartinkpetersen
authored andcommitted
scsi: cxlflash: Update TMF command processing
Currently, the SCSI command presented to the device reset handler is used to send TMFs to the AFU for a device reset. This behavior is incorrect as the command presented is an actual command and not a special notification. As such, it should only be used for reference and not be acted upon. Additionally, the existing TMF transmission routine does not account for actual errors from the hardware, only reflecting failure when a timeout occurs. This can lead to a condition where the device reset handler is presented with a false 'success'. Update send_tmf() to dynamically allocate a private command for sending the TMF command and properly reflect failure when the completed command indicates an error or was aborted. Detect TMF commands during response processing and avoid scsi_done() for these types of commands. Lastly, update comments in the TMF processing paths to describe the new behavior. Signed-off-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com> Signed-off-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1 parent 479ad8e commit 8ba1ddb

File tree

1 file changed

+44
-21
lines changed

1 file changed

+44
-21
lines changed

drivers/scsi/cxlflash/main.c

Lines changed: 44 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -155,9 +155,10 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
155155
* cmd_complete() - command completion handler
156156
* @cmd: AFU command that has completed.
157157
*
158-
* Prepares and submits command that has either completed or timed out to
159-
* the SCSI stack. Checks AFU command back into command pool for non-internal
160-
* (cmd->scp populated) commands.
158+
* For SCSI commands this routine prepares and submits commands that have
159+
* either completed or timed out to the SCSI stack. For internal commands
160+
* (TMF or AFU), this routine simply notifies the originator that the
161+
* command has completed.
161162
*/
162163
static void cmd_complete(struct afu_cmd *cmd)
163164
{
@@ -167,7 +168,6 @@ static void cmd_complete(struct afu_cmd *cmd)
167168
struct cxlflash_cfg *cfg = afu->parent;
168169
struct device *dev = &cfg->dev->dev;
169170
struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
170-
bool cmd_is_tmf;
171171

172172
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
173173
list_del(&cmd->list);
@@ -180,19 +180,14 @@ static void cmd_complete(struct afu_cmd *cmd)
180180
else
181181
scp->result = (DID_OK << 16);
182182

183-
cmd_is_tmf = cmd->cmd_tmf;
184-
185183
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
186184
__func__, scp, scp->result, cmd->sa.ioasc);
187-
188185
scp->scsi_done(scp);
189-
190-
if (cmd_is_tmf) {
191-
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
192-
cfg->tmf_active = false;
193-
wake_up_all_locked(&cfg->tmf_waitq);
194-
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
195-
}
186+
} else if (cmd->cmd_tmf) {
187+
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
188+
cfg->tmf_active = false;
189+
wake_up_all_locked(&cfg->tmf_waitq);
190+
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
196191
} else
197192
complete(&cmd->cevent);
198193
}
@@ -206,8 +201,10 @@ static void cmd_complete(struct afu_cmd *cmd)
206201
*/
207202
static void flush_pending_cmds(struct hwq *hwq)
208203
{
204+
struct cxlflash_cfg *cfg = hwq->afu->parent;
209205
struct afu_cmd *cmd, *tmp;
210206
struct scsi_cmnd *scp;
207+
ulong lock_flags;
211208

212209
list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
213210
/* Bypass command when on a doneq, cmd_complete() will handle */
@@ -222,7 +219,15 @@ static void flush_pending_cmds(struct hwq *hwq)
222219
scp->scsi_done(scp);
223220
} else {
224221
cmd->cmd_aborted = true;
225-
complete(&cmd->cevent);
222+
223+
if (cmd->cmd_tmf) {
224+
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
225+
cfg->tmf_active = false;
226+
wake_up_all_locked(&cfg->tmf_waitq);
227+
spin_unlock_irqrestore(&cfg->tmf_slock,
228+
lock_flags);
229+
} else
230+
complete(&cmd->cevent);
226231
}
227232
}
228233
}
@@ -455,24 +460,35 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
455460
/**
456461
* send_tmf() - sends a Task Management Function (TMF)
457462
* @afu: AFU to checkout from.
458-
* @scp: SCSI command from stack.
463+
* @scp: SCSI command from stack describing target.
459464
* @tmfcmd: TMF command to send.
460465
*
461466
* Return:
462-
* 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
467+
* 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
463468
*/
464469
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
465470
{
466471
struct Scsi_Host *host = scp->device->host;
467472
struct cxlflash_cfg *cfg = shost_priv(host);
468-
struct afu_cmd *cmd = sc_to_afucz(scp);
473+
struct afu_cmd *cmd = NULL;
469474
struct device *dev = &cfg->dev->dev;
470475
int hwq_index = cmd_to_target_hwq(host, scp, afu);
471476
struct hwq *hwq = get_hwq(afu, hwq_index);
477+
char *buf = NULL;
472478
ulong lock_flags;
473479
int rc = 0;
474480
ulong to;
475481

482+
buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
483+
if (unlikely(!buf)) {
484+
dev_err(dev, "%s: no memory for command\n", __func__);
485+
rc = -ENOMEM;
486+
goto out;
487+
}
488+
489+
cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
490+
INIT_LIST_HEAD(&cmd->queue);
491+
476492
/* When Task Management Function is active do not send another */
477493
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
478494
if (cfg->tmf_active)
@@ -482,7 +498,6 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
482498
cfg->tmf_active = true;
483499
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
484500

485-
cmd->scp = scp;
486501
cmd->parent = afu;
487502
cmd->cmd_tmf = true;
488503
cmd->hwq_index = hwq_index;
@@ -511,12 +526,20 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
511526
cfg->tmf_slock,
512527
to);
513528
if (!to) {
514-
cfg->tmf_active = false;
515529
dev_err(dev, "%s: TMF timed out\n", __func__);
516-
rc = -1;
530+
rc = -ETIMEDOUT;
531+
} else if (cmd->cmd_aborted) {
532+
dev_err(dev, "%s: TMF aborted\n", __func__);
533+
rc = -EAGAIN;
534+
} else if (cmd->sa.ioasc) {
535+
dev_err(dev, "%s: TMF failed ioasc=%08x\n",
536+
__func__, cmd->sa.ioasc);
537+
rc = -EIO;
517538
}
539+
cfg->tmf_active = false;
518540
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
519541
out:
542+
kfree(buf);
520543
return rc;
521544
}
522545

0 commit comments

Comments
 (0)