@@ -20,7 +20,6 @@ struct dmz_bioctx {
20
20
struct dm_zone * zone ;
21
21
struct bio * bio ;
22
22
refcount_t ref ;
23
- blk_status_t status ;
24
23
};
25
24
26
25
/*
@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78
77
{
79
78
struct dmz_bioctx * bioctx = dm_per_bio_data (bio , sizeof (struct dmz_bioctx ));
80
79
81
- if (bioctx -> status == BLK_STS_OK && status != BLK_STS_OK )
82
- bioctx -> status = status ;
83
- bio_endio (bio );
80
+ if (status != BLK_STS_OK && bio -> bi_status == BLK_STS_OK )
81
+ bio -> bi_status = status ;
82
+
83
+ if (refcount_dec_and_test (& bioctx -> ref )) {
84
+ struct dm_zone * zone = bioctx -> zone ;
85
+
86
+ if (zone ) {
87
+ if (bio -> bi_status != BLK_STS_OK &&
88
+ bio_op (bio ) == REQ_OP_WRITE &&
89
+ dmz_is_seq (zone ))
90
+ set_bit (DMZ_SEQ_WRITE_ERR , & zone -> flags );
91
+ dmz_deactivate_zone (zone );
92
+ }
93
+ bio_endio (bio );
94
+ }
84
95
}
85
96
86
97
/*
87
- * Partial clone read BIO completion callback . This terminates the
98
+ * Completion callback for an internally cloned target BIO . This terminates the
88
99
* target BIO when there are no more references to its context.
89
100
*/
90
- static void dmz_read_bio_end_io (struct bio * bio )
101
+ static void dmz_clone_endio (struct bio * clone )
91
102
{
92
- struct dmz_bioctx * bioctx = bio -> bi_private ;
93
- blk_status_t status = bio -> bi_status ;
103
+ struct dmz_bioctx * bioctx = clone -> bi_private ;
104
+ blk_status_t status = clone -> bi_status ;
94
105
95
- bio_put (bio );
106
+ bio_put (clone );
96
107
dmz_bio_endio (bioctx -> bio , status );
97
108
}
98
109
99
110
/*
100
- * Issue a BIO to a zone . The BIO may only partially process the
111
+ * Issue a clone of a target BIO . The clone may only partially process the
101
112
* original target BIO.
102
113
*/
103
- static int dmz_submit_read_bio (struct dmz_target * dmz , struct dm_zone * zone ,
104
- struct bio * bio , sector_t chunk_block ,
105
- unsigned int nr_blocks )
114
+ static int dmz_submit_bio (struct dmz_target * dmz , struct dm_zone * zone ,
115
+ struct bio * bio , sector_t chunk_block ,
116
+ unsigned int nr_blocks )
106
117
{
107
118
struct dmz_bioctx * bioctx = dm_per_bio_data (bio , sizeof (struct dmz_bioctx ));
108
- sector_t sector ;
109
119
struct bio * clone ;
110
120
111
- /* BIO remap sector */
112
- sector = dmz_start_sect (dmz -> metadata , zone ) + dmz_blk2sect (chunk_block );
113
-
114
- /* If the read is not partial, there is no need to clone the BIO */
115
- if (nr_blocks == dmz_bio_blocks (bio )) {
116
- /* Setup and submit the BIO */
117
- bio -> bi_iter .bi_sector = sector ;
118
- refcount_inc (& bioctx -> ref );
119
- generic_make_request (bio );
120
- return 0 ;
121
- }
122
-
123
- /* Partial BIO: we need to clone the BIO */
124
121
clone = bio_clone_fast (bio , GFP_NOIO , & dmz -> bio_set );
125
122
if (!clone )
126
123
return - ENOMEM ;
127
124
128
- /* Setup the clone */
129
- clone -> bi_iter .bi_sector = sector ;
125
+ bio_set_dev (clone , dmz -> dev -> bdev );
126
+ clone -> bi_iter .bi_sector =
127
+ dmz_start_sect (dmz -> metadata , zone ) + dmz_blk2sect (chunk_block );
130
128
clone -> bi_iter .bi_size = dmz_blk2sect (nr_blocks ) << SECTOR_SHIFT ;
131
- clone -> bi_end_io = dmz_read_bio_end_io ;
129
+ clone -> bi_end_io = dmz_clone_endio ;
132
130
clone -> bi_private = bioctx ;
133
131
134
132
bio_advance (bio , clone -> bi_iter .bi_size );
135
133
136
- /* Submit the clone */
137
134
refcount_inc (& bioctx -> ref );
138
135
generic_make_request (clone );
139
136
137
+ if (bio_op (bio ) == REQ_OP_WRITE && dmz_is_seq (zone ))
138
+ zone -> wp_block += nr_blocks ;
139
+
140
140
return 0 ;
141
141
}
142
142
@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
214
214
if (nr_blocks ) {
215
215
/* Valid blocks found: read them */
216
216
nr_blocks = min_t (unsigned int , nr_blocks , end_block - chunk_block );
217
- ret = dmz_submit_read_bio (dmz , rzone , bio , chunk_block , nr_blocks );
217
+ ret = dmz_submit_bio (dmz , rzone , bio , chunk_block , nr_blocks );
218
218
if (ret )
219
219
return ret ;
220
220
chunk_block += nr_blocks ;
@@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
228
228
return 0 ;
229
229
}
230
230
231
- /*
232
- * Issue a write BIO to a zone.
233
- */
234
- static void dmz_submit_write_bio (struct dmz_target * dmz , struct dm_zone * zone ,
235
- struct bio * bio , sector_t chunk_block ,
236
- unsigned int nr_blocks )
237
- {
238
- struct dmz_bioctx * bioctx = dm_per_bio_data (bio , sizeof (struct dmz_bioctx ));
239
-
240
- /* Setup and submit the BIO */
241
- bio_set_dev (bio , dmz -> dev -> bdev );
242
- bio -> bi_iter .bi_sector = dmz_start_sect (dmz -> metadata , zone ) + dmz_blk2sect (chunk_block );
243
- refcount_inc (& bioctx -> ref );
244
- generic_make_request (bio );
245
-
246
- if (dmz_is_seq (zone ))
247
- zone -> wp_block += nr_blocks ;
248
- }
249
-
250
231
/*
251
232
* Write blocks directly in a data zone, at the write pointer.
252
233
* If a buffer zone is assigned, invalidate the blocks written
@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
265
246
return - EROFS ;
266
247
267
248
/* Submit write */
268
- dmz_submit_write_bio (dmz , zone , bio , chunk_block , nr_blocks );
249
+ ret = dmz_submit_bio (dmz , zone , bio , chunk_block , nr_blocks );
250
+ if (ret )
251
+ return ret ;
269
252
270
253
/*
271
254
* Validate the blocks in the data zone and invalidate
@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
301
284
return - EROFS ;
302
285
303
286
/* Submit write */
304
- dmz_submit_write_bio (dmz , bzone , bio , chunk_block , nr_blocks );
287
+ ret = dmz_submit_bio (dmz , bzone , bio , chunk_block , nr_blocks );
288
+ if (ret )
289
+ return ret ;
305
290
306
291
/*
307
292
* Validate the blocks in the buffer zone
@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
600
585
bioctx -> zone = NULL ;
601
586
bioctx -> bio = bio ;
602
587
refcount_set (& bioctx -> ref , 1 );
603
- bioctx -> status = BLK_STS_OK ;
604
588
605
589
/* Set the BIO pending in the flush list */
606
590
if (!nr_sectors && bio_op (bio ) == REQ_OP_WRITE ) {
@@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
623
607
return DM_MAPIO_SUBMITTED ;
624
608
}
625
609
626
- /*
627
- * Completed target BIO processing.
628
- */
629
- static int dmz_end_io (struct dm_target * ti , struct bio * bio , blk_status_t * error )
630
- {
631
- struct dmz_bioctx * bioctx = dm_per_bio_data (bio , sizeof (struct dmz_bioctx ));
632
-
633
- if (bioctx -> status == BLK_STS_OK && * error )
634
- bioctx -> status = * error ;
635
-
636
- if (!refcount_dec_and_test (& bioctx -> ref ))
637
- return DM_ENDIO_INCOMPLETE ;
638
-
639
- /* Done */
640
- bio -> bi_status = bioctx -> status ;
641
-
642
- if (bioctx -> zone ) {
643
- struct dm_zone * zone = bioctx -> zone ;
644
-
645
- if (* error && bio_op (bio ) == REQ_OP_WRITE ) {
646
- if (dmz_is_seq (zone ))
647
- set_bit (DMZ_SEQ_WRITE_ERR , & zone -> flags );
648
- }
649
- dmz_deactivate_zone (zone );
650
- }
651
-
652
- return DM_ENDIO_DONE ;
653
- }
654
-
655
610
/*
656
611
* Get zoned device information.
657
612
*/
@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
946
901
.ctr = dmz_ctr ,
947
902
.dtr = dmz_dtr ,
948
903
.map = dmz_map ,
949
- .end_io = dmz_end_io ,
950
904
.io_hints = dmz_io_hints ,
951
905
.prepare_ioctl = dmz_prepare_ioctl ,
952
906
.postsuspend = dmz_suspend ,
0 commit comments