Skip to content

Commit a2d3796

Browse files
Ming Leiaxboe
authored andcommitted
block: blk-merge: try to make front segments in full size
When merging one bvec into segment, if the bvec is too big to merge, current policy is to move the whole bvec into another new segment. This patchset changes the policy into trying to maximize size of front segments, that means in above situation, part of bvec is merged into current segment, and the remainder is put into next segment. This patch prepares for support multipage bvec because it can be quite common to see this case and we should try to make front segments in full size. Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 6a501bf commit a2d3796

File tree

1 file changed

+49
-5
lines changed

1 file changed

+49
-5
lines changed

block/blk-merge.c

Lines changed: 49 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
109109
bool do_split = true;
110110
struct bio *new = NULL;
111111
const unsigned max_sectors = get_max_io_size(q, bio);
112+
unsigned advance = 0;
112113

113114
bio_for_each_segment(bv, bio, iter) {
114115
/*
@@ -134,12 +135,32 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
134135
}
135136

136137
if (bvprvp && blk_queue_cluster(q)) {
137-
if (seg_size + bv.bv_len > queue_max_segment_size(q))
138-
goto new_segment;
139138
if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
140139
goto new_segment;
141140
if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
142141
goto new_segment;
142+
if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
143+
/*
144+
* One assumption is that initial value of
145+
* @seg_size(equals to bv.bv_len) won't be
146+
* bigger than max segment size, but this
147+
* becomes false after multipage bvecs.
148+
*/
149+
advance = queue_max_segment_size(q) - seg_size;
150+
151+
if (advance > 0) {
152+
seg_size += advance;
153+
sectors += advance >> 9;
154+
bv.bv_len -= advance;
155+
bv.bv_offset += advance;
156+
}
157+
158+
/*
159+
* Still need to put remainder of current
160+
* bvec into a new segment.
161+
*/
162+
goto new_segment;
163+
}
143164

144165
seg_size += bv.bv_len;
145166
bvprv = bv;
@@ -161,6 +182,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
161182
seg_size = bv.bv_len;
162183
sectors += bv.bv_len >> 9;
163184

185+
/* restore the bvec for iterator */
186+
if (advance) {
187+
bv.bv_len += advance;
188+
bv.bv_offset -= advance;
189+
advance = 0;
190+
}
164191
}
165192

166193
do_split = false;
@@ -361,16 +388,29 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
361388
{
362389

363390
int nbytes = bvec->bv_len;
391+
unsigned advance = 0;
364392

365393
if (*sg && *cluster) {
366-
if ((*sg)->length + nbytes > queue_max_segment_size(q))
367-
goto new_segment;
368-
369394
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
370395
goto new_segment;
371396
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
372397
goto new_segment;
373398

399+
/*
400+
* try best to merge part of the bvec into previous
401+
* segment and follow same policy with
402+
* blk_bio_segment_split()
403+
*/
404+
if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
405+
advance = queue_max_segment_size(q) - (*sg)->length;
406+
if (advance) {
407+
(*sg)->length += advance;
408+
bvec->bv_offset += advance;
409+
bvec->bv_len -= advance;
410+
}
411+
goto new_segment;
412+
}
413+
374414
(*sg)->length += nbytes;
375415
} else {
376416
new_segment:
@@ -393,6 +433,10 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
393433

394434
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
395435
(*nsegs)++;
436+
437+
/* for making iterator happy */
438+
bvec->bv_offset -= advance;
439+
bvec->bv_len += advance;
396440
}
397441
*bvprv = *bvec;
398442
}

0 commit comments

Comments
 (0)