Skip to content

Commit ae03bf6

Browse files
martinkpetersenJens Axboe
authored andcommitted
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
1 parent e1defc4 commit ae03bf6

File tree

25 files changed

+147
-97
lines changed

25 files changed

+147
-97
lines changed

block/blk-barrier.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
388388

389389
bio->bi_sector = sector;
390390

391-
if (nr_sects > q->max_hw_sectors) {
392-
bio->bi_size = q->max_hw_sectors << 9;
393-
nr_sects -= q->max_hw_sectors;
394-
sector += q->max_hw_sectors;
391+
if (nr_sects > queue_max_hw_sectors(q)) {
392+
bio->bi_size = queue_max_hw_sectors(q) << 9;
393+
nr_sects -= queue_max_hw_sectors(q);
394+
sector += queue_max_hw_sectors(q);
395395
} else {
396396
bio->bi_size = nr_sects << 9;
397397
nr_sects = 0;

block/blk-core.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
14371437
goto end_io;
14381438
}
14391439

1440-
if (unlikely(nr_sectors > q->max_hw_sectors)) {
1440+
if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
14411441
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1442-
bdevname(bio->bi_bdev, b),
1443-
bio_sectors(bio),
1444-
q->max_hw_sectors);
1442+
bdevname(bio->bi_bdev, b),
1443+
bio_sectors(bio),
1444+
queue_max_hw_sectors(q));
14451445
goto end_io;
14461446
}
14471447

@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
16081608
*/
16091609
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
16101610
{
1611-
if (blk_rq_sectors(rq) > q->max_sectors ||
1612-
blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
1611+
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1612+
blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
16131613
printk(KERN_ERR "%s: over max size limit.\n", __func__);
16141614
return -EIO;
16151615
}
@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
16211621
* limitation.
16221622
*/
16231623
blk_recalc_rq_segments(rq);
1624-
if (rq->nr_phys_segments > q->max_phys_segments ||
1625-
rq->nr_phys_segments > q->max_hw_segments) {
1624+
if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
1625+
rq->nr_phys_segments > queue_max_hw_segments(q)) {
16261626
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
16271627
return -EIO;
16281628
}

block/blk-map.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
115115
struct bio *bio = NULL;
116116
int ret;
117117

118-
if (len > (q->max_hw_sectors << 9))
118+
if (len > (queue_max_hw_sectors(q) << 9))
119119
return -EINVAL;
120120
if (!len)
121121
return -EINVAL;
@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
292292
struct bio *bio;
293293
int ret;
294294

295-
if (len > (q->max_hw_sectors << 9))
295+
if (len > (queue_max_hw_sectors(q) << 9))
296296
return -EINVAL;
297297
if (!len || !kbuf)
298298
return -EINVAL;

block/blk-merge.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
3232
* never considered part of another segment, since that
3333
* might change with the bounce page.
3434
*/
35-
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
35+
high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
3636
if (high || highprv)
3737
goto new_segment;
3838
if (cluster) {
39-
if (seg_size + bv->bv_len > q->max_segment_size)
39+
if (seg_size + bv->bv_len
40+
> queue_max_segment_size(q))
4041
goto new_segment;
4142
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
4243
goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
9192
return 0;
9293

9394
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94-
q->max_segment_size)
95+
queue_max_segment_size(q))
9596
return 0;
9697

9798
if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
134135
int nbytes = bvec->bv_len;
135136

136137
if (bvprv && cluster) {
137-
if (sg->length + nbytes > q->max_segment_size)
138+
if (sg->length + nbytes > queue_max_segment_size(q))
138139
goto new_segment;
139140

140141
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
205206
{
206207
int nr_phys_segs = bio_phys_segments(q, bio);
207208

208-
if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
209-
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
209+
if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
210+
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
210211
req->cmd_flags |= REQ_NOMERGE;
211212
if (req == q->last_merge)
212213
q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
227228
unsigned short max_sectors;
228229

229230
if (unlikely(blk_pc_request(req)))
230-
max_sectors = q->max_hw_sectors;
231+
max_sectors = queue_max_hw_sectors(q);
231232
else
232-
max_sectors = q->max_sectors;
233+
max_sectors = queue_max_sectors(q);
233234

234235
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
235236
req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
251252
unsigned short max_sectors;
252253

253254
if (unlikely(blk_pc_request(req)))
254-
max_sectors = q->max_hw_sectors;
255+
max_sectors = queue_max_hw_sectors(q);
255256
else
256-
max_sectors = q->max_sectors;
257+
max_sectors = queue_max_sectors(q);
257258

258259

259260
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
287288
/*
288289
* Will it become too large?
289290
*/
290-
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
291+
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
291292
return 0;
292293

293294
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
299300
total_phys_segments--;
300301
}
301302

302-
if (total_phys_segments > q->max_phys_segments)
303+
if (total_phys_segments > queue_max_phys_segments(q))
303304
return 0;
304305

305-
if (total_phys_segments > q->max_hw_segments)
306+
if (total_phys_segments > queue_max_hw_segments(q))
306307
return 0;
307308

308309
/* Merge is OK... */

block/blk-settings.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
219219
}
220220
EXPORT_SYMBOL(blk_queue_max_sectors);
221221

222+
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
223+
{
224+
if (BLK_DEF_MAX_SECTORS > max_sectors)
225+
q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
226+
else
227+
q->max_hw_sectors = max_sectors;
228+
}
229+
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
230+
222231
/**
223232
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
224233
* @q: the request queue for the device
@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
395404
dma_drain_needed_fn *dma_drain_needed,
396405
void *buf, unsigned int size)
397406
{
398-
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
407+
if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
399408
return -EINVAL;
400409
/* make room for appending the drain */
401-
--q->max_hw_segments;
402-
--q->max_phys_segments;
410+
blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
411+
blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
403412
q->dma_drain_needed = dma_drain_needed;
404413
q->dma_drain_buffer = buf;
405414
q->dma_drain_size = size;

block/blk-sysfs.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
9595

9696
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
9797
{
98-
int max_sectors_kb = q->max_sectors >> 1;
98+
int max_sectors_kb = queue_max_sectors(q) >> 1;
9999

100100
return queue_var_show(max_sectors_kb, (page));
101101
}
@@ -109,23 +109,23 @@ static ssize_t
109109
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
110110
{
111111
unsigned long max_sectors_kb,
112-
max_hw_sectors_kb = q->max_hw_sectors >> 1,
112+
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
113113
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
114114
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
115115

116116
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
117117
return -EINVAL;
118118

119119
spin_lock_irq(q->queue_lock);
120-
q->max_sectors = max_sectors_kb << 1;
120+
blk_queue_max_sectors(q, max_sectors_kb << 1);
121121
spin_unlock_irq(q->queue_lock);
122122

123123
return ret;
124124
}
125125

126126
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
127127
{
128-
int max_hw_sectors_kb = q->max_hw_sectors >> 1;
128+
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
129129

130130
return queue_var_show(max_hw_sectors_kb, (page));
131131
}

block/compat_ioctl.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
766766
return compat_put_int(arg, bdev_logical_block_size(bdev));
767767
case BLKSECTGET:
768768
return compat_put_ushort(arg,
769-
bdev_get_queue(bdev)->max_sectors);
769+
queue_max_sectors(bdev_get_queue(bdev)));
770770
case BLKRASET: /* compatible, but no compat_ptr (!) */
771771
case BLKFRASET:
772772
if (!capable(CAP_SYS_ADMIN))

block/ioctl.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
152152
bio->bi_private = &wait;
153153
bio->bi_sector = start;
154154

155-
if (len > q->max_hw_sectors) {
156-
bio->bi_size = q->max_hw_sectors << 9;
157-
len -= q->max_hw_sectors;
158-
start += q->max_hw_sectors;
155+
if (len > queue_max_hw_sectors(q)) {
156+
bio->bi_size = queue_max_hw_sectors(q) << 9;
157+
len -= queue_max_hw_sectors(q);
158+
start += queue_max_hw_sectors(q);
159159
} else {
160160
bio->bi_size = len << 9;
161161
len = 0;
@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
313313
case BLKSSZGET: /* get block device hardware sector size */
314314
return put_int(arg, bdev_logical_block_size(bdev));
315315
case BLKSECTGET:
316-
return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
316+
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
317317
case BLKRASET:
318318
case BLKFRASET:
319319
if(!capable(CAP_SYS_ADMIN))

block/scsi_ioctl.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
7575

7676
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
7777
{
78-
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
78+
unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
7979

8080
return put_user(val, p);
8181
}
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
8989

9090
if (size < 0)
9191
return -EINVAL;
92-
if (size > (q->max_sectors << 9))
93-
size = q->max_sectors << 9;
92+
if (size > (queue_max_sectors(q) << 9))
93+
size = queue_max_sectors(q) << 9;
9494

9595
q->sg_reserved_size = size;
9696
return 0;
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
264264
if (hdr->cmd_len > BLK_MAX_CDB)
265265
return -EINVAL;
266266

267-
if (hdr->dxfer_len > (q->max_hw_sectors << 9))
267+
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
268268
return -EIO;
269269

270270
if (hdr->dxfer_len)

drivers/block/pktcdvd.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
991991
*/
992992
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
993993
{
994-
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
994+
if ((pd->settings.size << 9) / CD_FRAMESIZE
995+
<= queue_max_phys_segments(q)) {
995996
/*
996997
* The cdrom device can handle one segment/frame
997998
*/
998999
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
9991000
return 0;
1000-
} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
1001+
} else if ((pd->settings.size << 9) / PAGE_SIZE
1002+
<= queue_max_phys_segments(q)) {
10011003
/*
10021004
* We can handle this case at the expense of some extra memory
10031005
* copies during write operations

drivers/cdrom/cdrom.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
21012101
nr = nframes;
21022102
if (cdi->cdda_method == CDDA_BPC_SINGLE)
21032103
nr = 1;
2104-
if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
2105-
nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
2104+
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
2105+
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
21062106

21072107
len = nr * CD_FRAMESIZE_RAW;
21082108

drivers/md/dm-table.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
510510
* combine_restrictions_low()
511511
*/
512512
rs->max_sectors =
513-
min_not_zero(rs->max_sectors, q->max_sectors);
513+
min_not_zero(rs->max_sectors, queue_max_sectors(q));
514514

515515
/*
516516
* Check if merge fn is supported.
@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
525525

526526
rs->max_phys_segments =
527527
min_not_zero(rs->max_phys_segments,
528-
q->max_phys_segments);
528+
queue_max_phys_segments(q));
529529

530530
rs->max_hw_segments =
531-
min_not_zero(rs->max_hw_segments, q->max_hw_segments);
531+
min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
532532

533533
rs->logical_block_size = max(rs->logical_block_size,
534534
queue_logical_block_size(q));
535535

536536
rs->max_segment_size =
537-
min_not_zero(rs->max_segment_size, q->max_segment_size);
537+
min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
538538

539539
rs->max_hw_sectors =
540-
min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
540+
min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
541541

542542
rs->seg_boundary_mask =
543543
min_not_zero(rs->seg_boundary_mask,
544-
q->seg_boundary_mask);
544+
queue_segment_boundary(q));
545545

546-
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
546+
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
547547

548548
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
549549
}
@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
914914
* restrictions.
915915
*/
916916
blk_queue_max_sectors(q, t->limits.max_sectors);
917-
q->max_phys_segments = t->limits.max_phys_segments;
918-
q->max_hw_segments = t->limits.max_hw_segments;
919-
q->logical_block_size = t->limits.logical_block_size;
920-
q->max_segment_size = t->limits.max_segment_size;
921-
q->max_hw_sectors = t->limits.max_hw_sectors;
922-
q->seg_boundary_mask = t->limits.seg_boundary_mask;
923-
q->bounce_pfn = t->limits.bounce_pfn;
917+
blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
918+
blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
919+
blk_queue_logical_block_size(q, t->limits.logical_block_size);
920+
blk_queue_max_segment_size(q, t->limits.max_segment_size);
921+
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
922+
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
923+
blk_queue_bounce_limit(q, t->limits.bounce_pfn);
924924

925925
if (t->limits.no_cluster)
926926
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);

drivers/md/linear.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
146146
* a one page request is never in violation.
147147
*/
148148
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
149-
mddev->queue->max_sectors > (PAGE_SIZE>>9))
149+
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
150150
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
151151

152152
disk->num_sectors = rdev->sectors;

0 commit comments

Comments
 (0)