Skip to content

Commit b4b6cb6

Browse files
Ming Leiaxboe
authored andcommitted
Revert "block: blk-merge: try to make front segments in full size"
This reverts commit a2d3796. If max segment size isn't 512-aligned, this patch won't work well. Also once multipage bvec is enabled, adjacent bvecs won't be physically contiguous if page is added via bio_add_page(), so we don't need this kind of complicated logic. Reported-by: Dmitry Osipenko <digetx@gmail.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 5448aca commit b4b6cb6

File tree

1 file changed

+5
-49
lines changed

1 file changed

+5
-49
lines changed

block/blk-merge.c

Lines changed: 5 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
109109
bool do_split = true;
110110
struct bio *new = NULL;
111111
const unsigned max_sectors = get_max_io_size(q, bio);
112-
unsigned advance = 0;
113112

114113
bio_for_each_segment(bv, bio, iter) {
115114
/*
@@ -133,32 +132,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
133132
}
134133

135134
if (bvprvp && blk_queue_cluster(q)) {
135+
if (seg_size + bv.bv_len > queue_max_segment_size(q))
136+
goto new_segment;
136137
if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
137138
goto new_segment;
138139
if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
139140
goto new_segment;
140-
if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
141-
/*
142-
* One assumption is that initial value of
143-
* @seg_size(equals to bv.bv_len) won't be
144-
* bigger than max segment size, but this
145-
* becomes false after multipage bvecs.
146-
*/
147-
advance = queue_max_segment_size(q) - seg_size;
148-
149-
if (advance > 0) {
150-
seg_size += advance;
151-
sectors += advance >> 9;
152-
bv.bv_len -= advance;
153-
bv.bv_offset += advance;
154-
}
155-
156-
/*
157-
* Still need to put remainder of current
158-
* bvec into a new segment.
159-
*/
160-
goto new_segment;
161-
}
162141

163142
seg_size += bv.bv_len;
164143
bvprv = bv;
@@ -180,12 +159,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
180159
seg_size = bv.bv_len;
181160
sectors += bv.bv_len >> 9;
182161

183-
/* restore the bvec for iterator */
184-
if (advance) {
185-
bv.bv_len += advance;
186-
bv.bv_offset -= advance;
187-
advance = 0;
188-
}
189162
}
190163

191164
do_split = false;
@@ -386,29 +359,16 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
386359
{
387360

388361
int nbytes = bvec->bv_len;
389-
unsigned advance = 0;
390362

391363
if (*sg && *cluster) {
364+
if ((*sg)->length + nbytes > queue_max_segment_size(q))
365+
goto new_segment;
366+
392367
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
393368
goto new_segment;
394369
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
395370
goto new_segment;
396371

397-
/*
398-
* try best to merge part of the bvec into previous
399-
* segment and follow same policy with
400-
* blk_bio_segment_split()
401-
*/
402-
if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
403-
advance = queue_max_segment_size(q) - (*sg)->length;
404-
if (advance) {
405-
(*sg)->length += advance;
406-
bvec->bv_offset += advance;
407-
bvec->bv_len -= advance;
408-
}
409-
goto new_segment;
410-
}
411-
412372
(*sg)->length += nbytes;
413373
} else {
414374
new_segment:
@@ -431,10 +391,6 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
431391

432392
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
433393
(*nsegs)++;
434-
435-
/* for making iterator happy */
436-
bvec->bv_offset -= advance;
437-
bvec->bv_len += advance;
438394
}
439395
*bvprv = *bvec;
440396
}

0 commit comments

Comments
 (0)