Skip to content

Commit 5e62d5c

Browse files
Christoph Hellwigaxboe
authored andcommitted
nvmet: better data length validation
Currently the NVMe target stores the expexted data length in req->data_len and uses that for data transfer decisions, but that does not take the actual transfer length in the SGLs into account. So this adds a new transfer_len field, into which the transport drivers store the actual transfer length. We then check the two match before actually executing the command. The FC transport driver already had such a field, which is removed in favour of the common one. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 03e0f3a commit 5e62d5c

File tree

5 files changed

+34
-25
lines changed

5 files changed

+34
-25
lines changed

drivers/nvme/target/core.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -501,6 +501,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
501501
req->ops = ops;
502502
req->sg = NULL;
503503
req->sg_cnt = 0;
504+
req->transfer_len = 0;
504505
req->rsp->status = 0;
505506

506507
/* no support for fused commands yet */
@@ -550,6 +551,15 @@ void nvmet_req_uninit(struct nvmet_req *req)
550551
}
551552
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
552553

554+
void nvmet_req_execute(struct nvmet_req *req)
555+
{
556+
if (unlikely(req->data_len != req->transfer_len))
557+
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
558+
else
559+
req->execute(req);
560+
}
561+
EXPORT_SYMBOL_GPL(nvmet_req_execute);
562+
553563
static inline bool nvmet_cc_en(u32 cc)
554564
{
555565
return (cc >> NVME_CC_EN_SHIFT) & 0x1;

drivers/nvme/target/fc.c

Lines changed: 12 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ struct nvmet_fc_fcp_iod {
7676
dma_addr_t rspdma;
7777
struct scatterlist *data_sg;
7878
int data_sg_cnt;
79-
u32 total_length;
8079
u32 offset;
8180
enum nvmet_fcp_datadir io_dir;
8281
bool active;
@@ -1700,7 +1699,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
17001699
u32 page_len, length;
17011700
int i = 0;
17021701

1703-
length = fod->total_length;
1702+
length = fod->req.transfer_len;
17041703
nent = DIV_ROUND_UP(length, PAGE_SIZE);
17051704
sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
17061705
if (!sg)
@@ -1789,7 +1788,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
17891788
u32 rsn, rspcnt, xfr_length;
17901789

17911790
if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1792-
xfr_length = fod->total_length;
1791+
xfr_length = fod->req.transfer_len;
17931792
else
17941793
xfr_length = fod->offset;
17951794

@@ -1815,7 +1814,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
18151814
rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
18161815
if (!(rspcnt % fod->queue->ersp_ratio) ||
18171816
sqe->opcode == nvme_fabrics_command ||
1818-
xfr_length != fod->total_length ||
1817+
xfr_length != fod->req.transfer_len ||
18191818
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
18201819
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
18211820
queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
@@ -1892,7 +1891,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
18921891
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
18931892

18941893
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
1895-
(fod->total_length - fod->offset));
1894+
(fod->req.transfer_len - fod->offset));
18961895
fcpreq->transfer_length = tlen;
18971896
fcpreq->transferred_length = 0;
18981897
fcpreq->fcp_error = 0;
@@ -1906,7 +1905,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
19061905
* combined xfr with response.
19071906
*/
19081907
if ((op == NVMET_FCOP_READDATA) &&
1909-
((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1908+
((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
19101909
(tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
19111910
fcpreq->op = NVMET_FCOP_READDATA_RSP;
19121911
nvmet_fc_prep_fcp_rsp(tgtport, fod);
@@ -1986,7 +1985,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
19861985
}
19871986

19881987
fod->offset += fcpreq->transferred_length;
1989-
if (fod->offset != fod->total_length) {
1988+
if (fod->offset != fod->req.transfer_len) {
19901989
spin_lock_irqsave(&fod->flock, flags);
19911990
fod->writedataactive = true;
19921991
spin_unlock_irqrestore(&fod->flock, flags);
@@ -1998,9 +1997,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
19981997
}
19991998

20001999
/* data transfer complete, resume with nvmet layer */
2001-
2002-
fod->req.execute(&fod->req);
2003-
2000+
nvmet_req_execute(&fod->req);
20042001
break;
20052002

20062003
case NVMET_FCOP_READDATA:
@@ -2023,7 +2020,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
20232020
}
20242021

20252022
fod->offset += fcpreq->transferred_length;
2026-
if (fod->offset != fod->total_length) {
2023+
if (fod->offset != fod->req.transfer_len) {
20272024
/* transfer the next chunk */
20282025
nvmet_fc_transfer_fcp_data(tgtport, fod,
20292026
NVMET_FCOP_READDATA);
@@ -2160,7 +2157,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
21602157

21612158
fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
21622159

2163-
fod->total_length = be32_to_cpu(cmdiu->data_len);
2160+
fod->req.transfer_len = be32_to_cpu(cmdiu->data_len);
21642161
if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
21652162
fod->io_dir = NVMET_FCP_WRITE;
21662163
if (!nvme_is_write(&cmdiu->sqe))
@@ -2171,17 +2168,14 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
21712168
goto transport_error;
21722169
} else {
21732170
fod->io_dir = NVMET_FCP_NODATA;
2174-
if (fod->total_length)
2171+
if (fod->req.transfer_len)
21752172
goto transport_error;
21762173
}
21772174

21782175
fod->req.cmd = &fod->cmdiubuf.sqe;
21792176
fod->req.rsp = &fod->rspiubuf.cqe;
21802177
fod->req.port = fod->queue->port;
21812178

2182-
/* ensure nvmet handlers will set cmd handler callback */
2183-
fod->req.execute = NULL;
2184-
21852179
/* clear any response payload */
21862180
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
21872181

@@ -2201,7 +2195,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
22012195
/* keep a running counter of tail position */
22022196
atomic_inc(&fod->queue->sqtail);
22032197

2204-
if (fod->total_length) {
2198+
if (fod->req.transfer_len) {
22052199
ret = nvmet_fc_alloc_tgt_pgs(fod);
22062200
if (ret) {
22072201
nvmet_req_complete(&fod->req, ret);
@@ -2224,9 +2218,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
22242218
* can invoke the nvmet_layer now. If read data, cmd completion will
22252219
* push the data
22262220
*/
2227-
2228-
fod->req.execute(&fod->req);
2229-
2221+
nvmet_req_execute(&fod->req);
22302222
return;
22312223

22322224
transport_error:

drivers/nvme/target/loop.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
127127
struct nvme_loop_iod *iod =
128128
container_of(work, struct nvme_loop_iod, work);
129129

130-
iod->req.execute(&iod->req);
130+
nvmet_req_execute(&iod->req);
131131
}
132132

133133
static enum blk_eh_timer_return
@@ -176,6 +176,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
176176

177177
iod->req.sg = iod->sg_table.sgl;
178178
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
179+
iod->req.transfer_len = blk_rq_bytes(req);
179180
}
180181

181182
blk_mq_start_request(req);

drivers/nvme/target/nvmet.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,10 @@ struct nvmet_req {
223223
struct bio inline_bio;
224224
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
225225
int sg_cnt;
226+
/* data length as parsed from the command: */
226227
size_t data_len;
228+
/* data length as parsed from the SGL descriptor: */
229+
size_t transfer_len;
227230

228231
struct nvmet_port *port;
229232

@@ -266,6 +269,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
266269
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
267270
struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
268271
void nvmet_req_uninit(struct nvmet_req *req);
272+
void nvmet_req_execute(struct nvmet_req *req);
269273
void nvmet_req_complete(struct nvmet_req *req, u16 status);
270274

271275
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,

drivers/nvme/target/rdma.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -148,14 +148,14 @@ static inline u32 get_unaligned_le24(const u8 *p)
148148
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
149149
{
150150
return nvme_is_write(rsp->req.cmd) &&
151-
rsp->req.data_len &&
151+
rsp->req.transfer_len &&
152152
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
153153
}
154154

155155
static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
156156
{
157157
return !nvme_is_write(rsp->req.cmd) &&
158-
rsp->req.data_len &&
158+
rsp->req.transfer_len &&
159159
!rsp->req.rsp->status &&
160160
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
161161
}
@@ -577,7 +577,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
577577
return;
578578
}
579579

580-
rsp->req.execute(&rsp->req);
580+
nvmet_req_execute(&rsp->req);
581581
}
582582

583583
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -609,6 +609,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
609609

610610
nvmet_rdma_use_inline_sg(rsp, len, off);
611611
rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
612+
rsp->req.transfer_len += len;
612613
return 0;
613614
}
614615

@@ -636,6 +637,7 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
636637
nvmet_data_dir(&rsp->req));
637638
if (ret < 0)
638639
return NVME_SC_INTERNAL;
640+
rsp->req.transfer_len += len;
639641
rsp->n_rdma += ret;
640642

641643
if (invalidate) {
@@ -693,7 +695,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
693695
queue->cm_id->port_num, &rsp->read_cqe, NULL))
694696
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
695697
} else {
696-
rsp->req.execute(&rsp->req);
698+
nvmet_req_execute(&rsp->req);
697699
}
698700

699701
return true;

0 commit comments

Comments
 (0)