@@ -231,74 +231,36 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
231
231
__pblk_end_io_read (pblk , rqd , true);
232
232
}
233
233
234
- static int pblk_partial_read (struct pblk * pblk , struct nvm_rq * rqd ,
235
- struct bio * orig_bio , unsigned int bio_init_idx ,
236
- unsigned long * read_bitmap )
234
+ static void pblk_end_partial_read (struct nvm_rq * rqd )
237
235
{
238
- struct pblk_sec_meta * meta_list = rqd -> meta_list ;
239
- struct bio * new_bio ;
236
+ struct pblk * pblk = rqd -> private ;
237
+ struct pblk_g_ctx * r_ctx = nvm_rq_to_pdu (rqd );
238
+ struct pblk_pr_ctx * pr_ctx = r_ctx -> private ;
239
+ struct bio * new_bio = rqd -> bio ;
240
+ struct bio * bio = pr_ctx -> orig_bio ;
240
241
struct bio_vec src_bv , dst_bv ;
241
- void * ppa_ptr = NULL ;
242
- void * src_p , * dst_p ;
243
- dma_addr_t dma_ppa_list = 0 ;
244
- __le64 * lba_list_mem , * lba_list_media ;
245
- int nr_secs = rqd -> nr_ppas ;
242
+ struct pblk_sec_meta * meta_list = rqd -> meta_list ;
243
+ int bio_init_idx = pr_ctx -> bio_init_idx ;
244
+ unsigned long * read_bitmap = pr_ctx -> bitmap ;
245
+ int nr_secs = pr_ctx -> orig_nr_secs ;
246
246
int nr_holes = nr_secs - bitmap_weight (read_bitmap , nr_secs );
247
- int i , ret , hole ;
248
-
249
- /* Re-use allocated memory for intermediate lbas */
250
- lba_list_mem = (((void * )rqd -> ppa_list ) + pblk_dma_ppa_size );
251
- lba_list_media = (((void * )rqd -> ppa_list ) + 2 * pblk_dma_ppa_size );
252
-
253
- new_bio = bio_alloc (GFP_KERNEL , nr_holes );
254
-
255
- if (pblk_bio_add_pages (pblk , new_bio , GFP_KERNEL , nr_holes ))
256
- goto fail_add_pages ;
257
-
258
- if (nr_holes != new_bio -> bi_vcnt ) {
259
- pblk_err (pblk , "malformed bio\n" );
260
- goto fail ;
261
- }
262
-
263
- for (i = 0 ; i < nr_secs ; i ++ )
264
- lba_list_mem [i ] = meta_list [i ].lba ;
265
-
266
- new_bio -> bi_iter .bi_sector = 0 ; /* internal bio */
267
- bio_set_op_attrs (new_bio , REQ_OP_READ , 0 );
268
-
269
- rqd -> bio = new_bio ;
270
- rqd -> nr_ppas = nr_holes ;
271
- rqd -> flags = pblk_set_read_mode (pblk , PBLK_READ_RANDOM );
272
-
273
- if (unlikely (nr_holes == 1 )) {
274
- ppa_ptr = rqd -> ppa_list ;
275
- dma_ppa_list = rqd -> dma_ppa_list ;
276
- rqd -> ppa_addr = rqd -> ppa_list [0 ];
277
- }
278
-
279
- ret = pblk_submit_io_sync (pblk , rqd );
280
- if (ret ) {
281
- bio_put (rqd -> bio );
282
- pblk_err (pblk , "sync read IO submission failed\n" );
283
- goto fail ;
284
- }
285
-
286
- if (rqd -> error ) {
287
- atomic_long_inc (& pblk -> read_failed );
288
- #ifdef CONFIG_NVM_PBLK_DEBUG
289
- pblk_print_failed_rqd (pblk , rqd , rqd -> error );
290
- #endif
291
- }
247
+ __le64 * lba_list_mem , * lba_list_media ;
248
+ void * src_p , * dst_p ;
249
+ int hole , i ;
292
250
293
251
if (unlikely (nr_holes == 1 )) {
294
252
struct ppa_addr ppa ;
295
253
296
254
ppa = rqd -> ppa_addr ;
297
- rqd -> ppa_list = ppa_ptr ;
298
- rqd -> dma_ppa_list = dma_ppa_list ;
255
+ rqd -> ppa_list = pr_ctx -> ppa_ptr ;
256
+ rqd -> dma_ppa_list = pr_ctx -> dma_ppa_list ;
299
257
rqd -> ppa_list [0 ] = ppa ;
300
258
}
301
259
260
+ /* Re-use allocated memory for intermediate lbas */
261
+ lba_list_mem = (((void * )rqd -> ppa_list ) + pblk_dma_ppa_size );
262
+ lba_list_media = (((void * )rqd -> ppa_list ) + 2 * pblk_dma_ppa_size );
263
+
302
264
for (i = 0 ; i < nr_secs ; i ++ ) {
303
265
lba_list_media [i ] = meta_list [i ].lba ;
304
266
meta_list [i ].lba = lba_list_mem [i ];
@@ -316,7 +278,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
316
278
meta_list [hole ].lba = lba_list_media [i ];
317
279
318
280
src_bv = new_bio -> bi_io_vec [i ++ ];
319
- dst_bv = orig_bio -> bi_io_vec [bio_init_idx + hole ];
281
+ dst_bv = bio -> bi_io_vec [bio_init_idx + hole ];
320
282
321
283
src_p = kmap_atomic (src_bv .bv_page );
322
284
dst_p = kmap_atomic (dst_bv .bv_page );
@@ -334,19 +296,107 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
334
296
} while (hole < nr_secs );
335
297
336
298
bio_put (new_bio );
299
+ kfree (pr_ctx );
337
300
338
301
/* restore original request */
339
302
rqd -> bio = NULL ;
340
303
rqd -> nr_ppas = nr_secs ;
341
304
305
+ bio_endio (bio );
342
306
__pblk_end_io_read (pblk , rqd , false);
343
- return NVM_IO_DONE ;
307
+ }
344
308
345
- fail :
346
- /* Free allocated pages in new bio */
309
+ static int pblk_setup_partial_read (struct pblk * pblk , struct nvm_rq * rqd ,
310
+ unsigned int bio_init_idx ,
311
+ unsigned long * read_bitmap ,
312
+ int nr_holes )
313
+ {
314
+ struct pblk_sec_meta * meta_list = rqd -> meta_list ;
315
+ struct pblk_g_ctx * r_ctx = nvm_rq_to_pdu (rqd );
316
+ struct pblk_pr_ctx * pr_ctx ;
317
+ struct bio * new_bio , * bio = r_ctx -> private ;
318
+ __le64 * lba_list_mem ;
319
+ int nr_secs = rqd -> nr_ppas ;
320
+ int i ;
321
+
322
+ /* Re-use allocated memory for intermediate lbas */
323
+ lba_list_mem = (((void * )rqd -> ppa_list ) + pblk_dma_ppa_size );
324
+
325
+ new_bio = bio_alloc (GFP_KERNEL , nr_holes );
326
+
327
+ if (pblk_bio_add_pages (pblk , new_bio , GFP_KERNEL , nr_holes ))
328
+ goto fail_bio_put ;
329
+
330
+ if (nr_holes != new_bio -> bi_vcnt ) {
331
+ WARN_ONCE (1 , "pblk: malformed bio\n" );
332
+ goto fail_free_pages ;
333
+ }
334
+
335
+ pr_ctx = kmalloc (sizeof (struct pblk_pr_ctx ), GFP_KERNEL );
336
+ if (!pr_ctx )
337
+ goto fail_free_pages ;
338
+
339
+ for (i = 0 ; i < nr_secs ; i ++ )
340
+ lba_list_mem [i ] = meta_list [i ].lba ;
341
+
342
+ new_bio -> bi_iter .bi_sector = 0 ; /* internal bio */
343
+ bio_set_op_attrs (new_bio , REQ_OP_READ , 0 );
344
+
345
+ rqd -> bio = new_bio ;
346
+ rqd -> nr_ppas = nr_holes ;
347
+ rqd -> flags = pblk_set_read_mode (pblk , PBLK_READ_RANDOM );
348
+
349
+ pr_ctx -> ppa_ptr = NULL ;
350
+ pr_ctx -> orig_bio = bio ;
351
+ bitmap_copy (pr_ctx -> bitmap , read_bitmap , NVM_MAX_VLBA );
352
+ pr_ctx -> bio_init_idx = bio_init_idx ;
353
+ pr_ctx -> orig_nr_secs = nr_secs ;
354
+ r_ctx -> private = pr_ctx ;
355
+
356
+ if (unlikely (nr_holes == 1 )) {
357
+ pr_ctx -> ppa_ptr = rqd -> ppa_list ;
358
+ pr_ctx -> dma_ppa_list = rqd -> dma_ppa_list ;
359
+ rqd -> ppa_addr = rqd -> ppa_list [0 ];
360
+ }
361
+ return 0 ;
362
+
363
+ fail_free_pages :
347
364
pblk_bio_free_pages (pblk , new_bio , 0 , new_bio -> bi_vcnt );
348
- fail_add_pages :
365
+ fail_bio_put :
366
+ bio_put (new_bio );
367
+
368
+ return - ENOMEM ;
369
+ }
370
+
371
+ static int pblk_partial_read_bio (struct pblk * pblk , struct nvm_rq * rqd ,
372
+ unsigned int bio_init_idx ,
373
+ unsigned long * read_bitmap , int nr_secs )
374
+ {
375
+ int nr_holes ;
376
+ int ret ;
377
+
378
+ nr_holes = nr_secs - bitmap_weight (read_bitmap , nr_secs );
379
+
380
+ if (pblk_setup_partial_read (pblk , rqd , bio_init_idx , read_bitmap ,
381
+ nr_holes ))
382
+ return NVM_IO_ERR ;
383
+
384
+ rqd -> end_io = pblk_end_partial_read ;
385
+
386
+ ret = pblk_submit_io (pblk , rqd );
387
+ if (ret ) {
388
+ bio_put (rqd -> bio );
389
+ pblk_err (pblk , "partial read IO submission failed\n" );
390
+ goto err ;
391
+ }
392
+
393
+ return NVM_IO_OK ;
394
+
395
+ err :
349
396
pblk_err (pblk , "failed to perform partial read\n" );
397
+
398
+ /* Free allocated pages in new bio */
399
+ pblk_bio_free_pages (pblk , rqd -> bio , 0 , rqd -> bio -> bi_vcnt );
350
400
__pblk_end_io_read (pblk , rqd , false);
351
401
return NVM_IO_ERR ;
352
402
}
@@ -480,8 +530,15 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
480
530
/* The read bio request could be partially filled by the write buffer,
481
531
* but there are some holes that need to be read from the drive.
482
532
*/
483
- return pblk_partial_read (pblk , rqd , bio , bio_init_idx , read_bitmap );
533
+ ret = pblk_partial_read_bio (pblk , rqd , bio_init_idx , read_bitmap ,
534
+ nr_secs );
535
+ if (ret )
536
+ goto fail_meta_free ;
537
+
538
+ return NVM_IO_OK ;
484
539
540
+ fail_meta_free :
541
+ nvm_dev_dma_free (dev -> parent , rqd -> meta_list , rqd -> dma_meta_list );
485
542
fail_rqd_free :
486
543
pblk_free_rqd (pblk , rqd , PBLK_READ );
487
544
return ret ;
0 commit comments