@@ -76,7 +76,6 @@ struct nvmet_fc_fcp_iod {
76
76
dma_addr_t rspdma ;
77
77
struct scatterlist * data_sg ;
78
78
int data_sg_cnt ;
79
- u32 total_length ;
80
79
u32 offset ;
81
80
enum nvmet_fcp_datadir io_dir ;
82
81
bool active ;
@@ -1700,7 +1699,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1700
1699
u32 page_len , length ;
1701
1700
int i = 0 ;
1702
1701
1703
- length = fod -> total_length ;
1702
+ length = fod -> req . transfer_len ;
1704
1703
nent = DIV_ROUND_UP (length , PAGE_SIZE );
1705
1704
sg = kmalloc_array (nent , sizeof (struct scatterlist ), GFP_KERNEL );
1706
1705
if (!sg )
@@ -1789,7 +1788,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1789
1788
u32 rsn , rspcnt , xfr_length ;
1790
1789
1791
1790
if (fod -> fcpreq -> op == NVMET_FCOP_READDATA_RSP )
1792
- xfr_length = fod -> total_length ;
1791
+ xfr_length = fod -> req . transfer_len ;
1793
1792
else
1794
1793
xfr_length = fod -> offset ;
1795
1794
@@ -1815,7 +1814,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1815
1814
rspcnt = atomic_inc_return (& fod -> queue -> zrspcnt );
1816
1815
if (!(rspcnt % fod -> queue -> ersp_ratio ) ||
1817
1816
sqe -> opcode == nvme_fabrics_command ||
1818
- xfr_length != fod -> total_length ||
1817
+ xfr_length != fod -> req . transfer_len ||
1819
1818
(le16_to_cpu (cqe -> status ) & 0xFFFE ) || cqewd [0 ] || cqewd [1 ] ||
1820
1819
(sqe -> flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND )) ||
1821
1820
queue_90percent_full (fod -> queue , le16_to_cpu (cqe -> sq_head )))
@@ -1892,7 +1891,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1892
1891
fcpreq -> timeout = NVME_FC_TGTOP_TIMEOUT_SEC ;
1893
1892
1894
1893
tlen = min_t (u32 , tgtport -> max_sg_cnt * PAGE_SIZE ,
1895
- (fod -> total_length - fod -> offset ));
1894
+ (fod -> req . transfer_len - fod -> offset ));
1896
1895
fcpreq -> transfer_length = tlen ;
1897
1896
fcpreq -> transferred_length = 0 ;
1898
1897
fcpreq -> fcp_error = 0 ;
@@ -1906,7 +1905,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1906
1905
* combined xfr with response.
1907
1906
*/
1908
1907
if ((op == NVMET_FCOP_READDATA ) &&
1909
- ((fod -> offset + fcpreq -> transfer_length ) == fod -> total_length ) &&
1908
+ ((fod -> offset + fcpreq -> transfer_length ) == fod -> req . transfer_len ) &&
1910
1909
(tgtport -> ops -> target_features & NVMET_FCTGTFEAT_READDATA_RSP )) {
1911
1910
fcpreq -> op = NVMET_FCOP_READDATA_RSP ;
1912
1911
nvmet_fc_prep_fcp_rsp (tgtport , fod );
@@ -1986,7 +1985,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1986
1985
}
1987
1986
1988
1987
fod -> offset += fcpreq -> transferred_length ;
1989
- if (fod -> offset != fod -> total_length ) {
1988
+ if (fod -> offset != fod -> req . transfer_len ) {
1990
1989
spin_lock_irqsave (& fod -> flock , flags );
1991
1990
fod -> writedataactive = true;
1992
1991
spin_unlock_irqrestore (& fod -> flock , flags );
@@ -1998,9 +1997,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1998
1997
}
1999
1998
2000
1999
/* data transfer complete, resume with nvmet layer */
2001
-
2002
- fod -> req .execute (& fod -> req );
2003
-
2000
+ nvmet_req_execute (& fod -> req );
2004
2001
break ;
2005
2002
2006
2003
case NVMET_FCOP_READDATA :
@@ -2023,7 +2020,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2023
2020
}
2024
2021
2025
2022
fod -> offset += fcpreq -> transferred_length ;
2026
- if (fod -> offset != fod -> total_length ) {
2023
+ if (fod -> offset != fod -> req . transfer_len ) {
2027
2024
/* transfer the next chunk */
2028
2025
nvmet_fc_transfer_fcp_data (tgtport , fod ,
2029
2026
NVMET_FCOP_READDATA );
@@ -2160,7 +2157,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2160
2157
2161
2158
fod -> fcpreq -> done = nvmet_fc_xmt_fcp_op_done ;
2162
2159
2163
- fod -> total_length = be32_to_cpu (cmdiu -> data_len );
2160
+ fod -> req . transfer_len = be32_to_cpu (cmdiu -> data_len );
2164
2161
if (cmdiu -> flags & FCNVME_CMD_FLAGS_WRITE ) {
2165
2162
fod -> io_dir = NVMET_FCP_WRITE ;
2166
2163
if (!nvme_is_write (& cmdiu -> sqe ))
@@ -2171,17 +2168,14 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2171
2168
goto transport_error ;
2172
2169
} else {
2173
2170
fod -> io_dir = NVMET_FCP_NODATA ;
2174
- if (fod -> total_length )
2171
+ if (fod -> req . transfer_len )
2175
2172
goto transport_error ;
2176
2173
}
2177
2174
2178
2175
fod -> req .cmd = & fod -> cmdiubuf .sqe ;
2179
2176
fod -> req .rsp = & fod -> rspiubuf .cqe ;
2180
2177
fod -> req .port = fod -> queue -> port ;
2181
2178
2182
- /* ensure nvmet handlers will set cmd handler callback */
2183
- fod -> req .execute = NULL ;
2184
-
2185
2179
/* clear any response payload */
2186
2180
memset (& fod -> rspiubuf , 0 , sizeof (fod -> rspiubuf ));
2187
2181
@@ -2201,7 +2195,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2201
2195
/* keep a running counter of tail position */
2202
2196
atomic_inc (& fod -> queue -> sqtail );
2203
2197
2204
- if (fod -> total_length ) {
2198
+ if (fod -> req . transfer_len ) {
2205
2199
ret = nvmet_fc_alloc_tgt_pgs (fod );
2206
2200
if (ret ) {
2207
2201
nvmet_req_complete (& fod -> req , ret );
@@ -2224,9 +2218,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2224
2218
* can invoke the nvmet_layer now. If read data, cmd completion will
2225
2219
* push the data
2226
2220
*/
2227
-
2228
- fod -> req .execute (& fod -> req );
2229
-
2221
+ nvmet_req_execute (& fod -> req );
2230
2222
return ;
2231
2223
2232
2224
transport_error :
0 commit comments