Skip to content

Commit 602d674

Browse files
sagigrimbergaxboe
authored andcommitted
nvme-tcp: support C2HData with SUCCESS flag
A C2HData PDU with the SUCCESS flag set indicates that the I/O was completed by the controller successfully and means that a subsequent completion response capsule PDU will be ommitted. If we see this flag, fisrt we check that LAST_PDU flag is set as well, and then we complete the request when the data transfer (and data digest verification if its on) is done. While we're at it, reuse a bit of code with nvme_fail_request. Reported-by: Steve Blightman <steve.blightman@oracle.com> Suggested-by: Oliver Smith-Denny <osmithde@cisco.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Oliver Smith-Denny <osmithde@cisco.com> Tested-by: Oliver Smith-Denny <osmithde@cisco.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 005c674 commit 602d674

File tree

1 file changed

+28
-4
lines changed

1 file changed

+28
-4
lines changed

drivers/nvme/host/tcp.c

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -463,6 +463,15 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
463463

464464
queue->data_remaining = le32_to_cpu(pdu->data_length);
465465

466+
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
467+
unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
468+
dev_err(queue->ctrl->ctrl.device,
469+
"queue %d tag %#x SUCCESS set but not last PDU\n",
470+
nvme_tcp_queue_id(queue), rq->tag);
471+
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
472+
return -EPROTO;
473+
}
474+
466475
return 0;
467476

468477
}
@@ -618,6 +627,14 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
618627
return ret;
619628
}
620629

630+
static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
631+
{
632+
union nvme_result res = {};
633+
634+
nvme_end_request(rq, cpu_to_le16(status << 1), res);
635+
}
636+
637+
621638
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
622639
unsigned int *offset, size_t *len)
623640
{
@@ -685,6 +702,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
685702
nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
686703
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
687704
} else {
705+
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
706+
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
688707
nvme_tcp_init_recv_ctx(queue);
689708
}
690709
}
@@ -695,6 +714,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
695714
static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
696715
struct sk_buff *skb, unsigned int *offset, size_t *len)
697716
{
717+
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
698718
char *ddgst = (char *)&queue->recv_ddgst;
699719
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
700720
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
@@ -718,6 +738,13 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
718738
return -EIO;
719739
}
720740

741+
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
742+
struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
743+
pdu->command_id);
744+
745+
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
746+
}
747+
721748
nvme_tcp_init_recv_ctx(queue);
722749
return 0;
723750
}
@@ -815,10 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
815842

816843
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
817844
{
818-
union nvme_result res = {};
819-
820-
nvme_end_request(blk_mq_rq_from_pdu(req),
821-
cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res);
845+
nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
822846
}
823847

824848
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)

0 commit comments

Comments
 (0)