@@ -77,6 +77,8 @@ static const char transfertypes[][12] = {
77
77
* Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
78
78
* elements. Segments are then coalesced when registered, if possible
79
79
* within the selected memreg mode.
80
+ *
81
+ * Returns positive number of segments converted, or a negative errno.
80
82
*/
81
83
82
84
static int
@@ -103,12 +105,13 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
103
105
/* alloc the pagelist for receiving buffer */
104
106
ppages [p ] = alloc_page (GFP_ATOMIC );
105
107
if (!ppages [p ])
106
- return 0 ;
108
+ return - ENOMEM ;
107
109
}
108
110
seg [n ].mr_page = ppages [p ];
109
111
seg [n ].mr_offset = (void * )(unsigned long ) page_base ;
110
112
seg [n ].mr_len = min_t (u32 , PAGE_SIZE - page_base , len );
111
- BUG_ON (seg [n ].mr_len > PAGE_SIZE );
113
+ if (seg [n ].mr_len > PAGE_SIZE )
114
+ return - EIO ;
112
115
len -= seg [n ].mr_len ;
113
116
++ n ;
114
117
++ p ;
@@ -117,7 +120,7 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
117
120
118
121
/* Message overflows the seg array */
119
122
if (len && n == nsegs )
120
- return 0 ;
123
+ return - EIO ;
121
124
122
125
if (xdrbuf -> tail [0 ].iov_len ) {
123
126
/* the rpcrdma protocol allows us to omit any trailing
@@ -126,7 +129,7 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
126
129
return n ;
127
130
if (n == nsegs )
128
131
/* Tail remains, but we're out of segments */
129
- return 0 ;
132
+ return - EIO ;
130
133
seg [n ].mr_page = NULL ;
131
134
seg [n ].mr_offset = xdrbuf -> tail [0 ].iov_base ;
132
135
seg [n ].mr_len = xdrbuf -> tail [0 ].iov_len ;
@@ -167,15 +170,17 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
167
170
* Reply chunk (a counted array):
168
171
* N elements:
169
172
* 1 - N - HLOO - HLOO - ... - HLOO
173
+ *
174
+ * Returns positive RPC/RDMA header size, or negative errno.
170
175
*/
171
176
172
- static unsigned int
177
+ static ssize_t
173
178
rpcrdma_create_chunks (struct rpc_rqst * rqst , struct xdr_buf * target ,
174
179
struct rpcrdma_msg * headerp , enum rpcrdma_chunktype type )
175
180
{
176
181
struct rpcrdma_req * req = rpcr_to_rdmar (rqst );
177
182
struct rpcrdma_xprt * r_xprt = rpcx_to_rdmax (rqst -> rq_xprt );
178
- int nsegs , nchunks = 0 ;
183
+ int n , nsegs , nchunks = 0 ;
179
184
unsigned int pos ;
180
185
struct rpcrdma_mr_seg * seg = req -> rl_segments ;
181
186
struct rpcrdma_read_chunk * cur_rchunk = NULL ;
@@ -201,11 +206,11 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
201
206
pos = target -> head [0 ].iov_len ;
202
207
203
208
nsegs = rpcrdma_convert_iovs (target , pos , type , seg , RPCRDMA_MAX_SEGS );
204
- if (nsegs == 0 )
205
- return 0 ;
209
+ if (nsegs < 0 )
210
+ return nsegs ;
206
211
207
212
do {
208
- int n = rpcrdma_register_external (seg , nsegs ,
213
+ n = rpcrdma_register_external (seg , nsegs ,
209
214
cur_wchunk != NULL , r_xprt );
210
215
if (n <= 0 )
211
216
goto out ;
@@ -277,7 +282,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
277
282
for (pos = 0 ; nchunks -- ;)
278
283
pos += rpcrdma_deregister_external (
279
284
& req -> rl_segments [pos ], r_xprt );
280
- return 0 ;
285
+ return n ;
281
286
}
282
287
283
288
/*
@@ -359,6 +364,8 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
359
364
* [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
360
365
* [2] -- optional padding.
361
366
* [3] -- if padded, header only in [1] and data here.
367
+ *
368
+ * Returns zero on success, otherwise a negative errno.
362
369
*/
363
370
364
371
int
@@ -368,7 +375,8 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
368
375
struct rpcrdma_xprt * r_xprt = rpcx_to_rdmax (xprt );
369
376
struct rpcrdma_req * req = rpcr_to_rdmar (rqst );
370
377
char * base ;
371
- size_t hdrlen , rpclen , padlen ;
378
+ size_t rpclen , padlen ;
379
+ ssize_t hdrlen ;
372
380
enum rpcrdma_chunktype rtype , wtype ;
373
381
struct rpcrdma_msg * headerp ;
374
382
@@ -439,7 +447,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
439
447
/* The following simplification is not true forever */
440
448
if (rtype != rpcrdma_noch && wtype == rpcrdma_replych )
441
449
wtype = rpcrdma_noch ;
442
- BUG_ON (rtype != rpcrdma_noch && wtype != rpcrdma_noch );
450
+ if (rtype != rpcrdma_noch && wtype != rpcrdma_noch ) {
451
+ dprintk ("RPC: %s: cannot marshal multiple chunk lists\n" ,
452
+ __func__ );
453
+ return - EIO ;
454
+ }
443
455
444
456
hdrlen = 28 ; /*sizeof *headerp;*/
445
457
padlen = 0 ;
@@ -464,8 +476,11 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
464
476
headerp -> rm_body .rm_padded .rm_pempty [1 ] = xdr_zero ;
465
477
headerp -> rm_body .rm_padded .rm_pempty [2 ] = xdr_zero ;
466
478
hdrlen += 2 * sizeof (u32 ); /* extra words in padhdr */
467
- BUG_ON (wtype != rpcrdma_noch );
468
-
479
+ if (wtype != rpcrdma_noch ) {
480
+ dprintk ("RPC: %s: invalid chunk list\n" ,
481
+ __func__ );
482
+ return - EIO ;
483
+ }
469
484
} else {
470
485
headerp -> rm_body .rm_nochunks .rm_empty [0 ] = xdr_zero ;
471
486
headerp -> rm_body .rm_nochunks .rm_empty [1 ] = xdr_zero ;
@@ -500,9 +515,8 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
500
515
hdrlen = rpcrdma_create_chunks (rqst ,
501
516
& rqst -> rq_rcv_buf , headerp , wtype );
502
517
}
503
-
504
- if (hdrlen == 0 )
505
- return -1 ;
518
+ if (hdrlen < 0 )
519
+ return hdrlen ;
506
520
507
521
dprintk ("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
508
522
" headerp 0x%p base 0x%p lkey 0x%x\n" ,
0 commit comments