Skip to content

Commit 025146e

Browse files
martinkpetersenJens Axboe
authored andcommitted
block: Move queue limits to an embedded struct
To accommodate stacking drivers that do not have an associated request queue we're moving the limits to a separate, embedded structure. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
1 parent ae03bf6 commit 025146e

File tree

2 files changed

+60
-39
lines changed

2 files changed

+60
-39
lines changed

block/blk-settings.c

Lines changed: 34 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
179179
*/
180180
if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
181181
dma = 1;
182-
q->bounce_pfn = max_low_pfn;
182+
q->limits.bounce_pfn = max_low_pfn;
183183
#else
184184
if (b_pfn < blk_max_low_pfn)
185185
dma = 1;
186-
q->bounce_pfn = b_pfn;
186+
q->limits.bounce_pfn = b_pfn;
187187
#endif
188188
if (dma) {
189189
init_emergency_isa_pool();
190190
q->bounce_gfp = GFP_NOIO | GFP_DMA;
191-
q->bounce_pfn = b_pfn;
191+
q->limits.bounce_pfn = b_pfn;
192192
}
193193
}
194194
EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -211,20 +211,20 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
211211
}
212212

213213
if (BLK_DEF_MAX_SECTORS > max_sectors)
214-
q->max_hw_sectors = q->max_sectors = max_sectors;
214+
q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
215215
else {
216-
q->max_sectors = BLK_DEF_MAX_SECTORS;
217-
q->max_hw_sectors = max_sectors;
216+
q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
217+
q->limits.max_hw_sectors = max_sectors;
218218
}
219219
}
220220
EXPORT_SYMBOL(blk_queue_max_sectors);
221221

222222
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
223223
{
224224
if (BLK_DEF_MAX_SECTORS > max_sectors)
225-
q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
225+
q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
226226
else
227-
q->max_hw_sectors = max_sectors;
227+
q->limits.max_hw_sectors = max_sectors;
228228
}
229229
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
230230

@@ -247,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q,
247247
__func__, max_segments);
248248
}
249249

250-
q->max_phys_segments = max_segments;
250+
q->limits.max_phys_segments = max_segments;
251251
}
252252
EXPORT_SYMBOL(blk_queue_max_phys_segments);
253253

@@ -271,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q,
271271
__func__, max_segments);
272272
}
273273

274-
q->max_hw_segments = max_segments;
274+
q->limits.max_hw_segments = max_segments;
275275
}
276276
EXPORT_SYMBOL(blk_queue_max_hw_segments);
277277

@@ -292,7 +292,7 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
292292
__func__, max_size);
293293
}
294294

295-
q->max_segment_size = max_size;
295+
q->limits.max_segment_size = max_size;
296296
}
297297
EXPORT_SYMBOL(blk_queue_max_segment_size);
298298

@@ -308,7 +308,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
308308
**/
309309
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
310310
{
311-
q->logical_block_size = size;
311+
q->limits.logical_block_size = size;
312312
}
313313
EXPORT_SYMBOL(blk_queue_logical_block_size);
314314

@@ -325,14 +325,27 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
325325
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
326326
{
327327
/* zero is "infinity" */
328-
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
329-
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
330-
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
331-
332-
t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
333-
t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
334-
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
335-
t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
328+
t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
329+
queue_max_sectors(b));
330+
331+
t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
332+
queue_max_hw_sectors(b));
333+
334+
t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
335+
queue_segment_boundary(b));
336+
337+
t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
338+
queue_max_phys_segments(b));
339+
340+
t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
341+
queue_max_hw_segments(b));
342+
343+
t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
344+
queue_max_segment_size(b));
345+
346+
t->limits.logical_block_size = max(queue_logical_block_size(t),
347+
queue_logical_block_size(b));
348+
336349
if (!t->queue_lock)
337350
WARN_ON_ONCE(1);
338351
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@@ -430,7 +443,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
430443
__func__, mask);
431444
}
432445

433-
q->seg_boundary_mask = mask;
446+
q->limits.seg_boundary_mask = mask;
434447
}
435448
EXPORT_SYMBOL(blk_queue_segment_boundary);
436449

include/linux/blkdev.h

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,21 @@ struct blk_cmd_filter {
307307
struct kobject kobj;
308308
};
309309

310+
struct queue_limits {
311+
unsigned long bounce_pfn;
312+
unsigned long seg_boundary_mask;
313+
314+
unsigned int max_hw_sectors;
315+
unsigned int max_sectors;
316+
unsigned int max_segment_size;
317+
318+
unsigned short logical_block_size;
319+
unsigned short max_hw_segments;
320+
unsigned short max_phys_segments;
321+
322+
unsigned char no_cluster;
323+
};
324+
310325
struct request_queue
311326
{
312327
/*
@@ -358,7 +373,6 @@ struct request_queue
358373
/*
359374
* queue needs bounce pages for pages above this limit
360375
*/
361-
unsigned long bounce_pfn;
362376
gfp_t bounce_gfp;
363377

364378
/*
@@ -387,14 +401,6 @@ struct request_queue
387401
unsigned int nr_congestion_off;
388402
unsigned int nr_batching;
389403

390-
unsigned int max_sectors;
391-
unsigned int max_hw_sectors;
392-
unsigned short max_phys_segments;
393-
unsigned short max_hw_segments;
394-
unsigned short logical_block_size;
395-
unsigned int max_segment_size;
396-
397-
unsigned long seg_boundary_mask;
398404
void *dma_drain_buffer;
399405
unsigned int dma_drain_size;
400406
unsigned int dma_pad_mask;
@@ -410,6 +416,8 @@ struct request_queue
410416
struct timer_list timeout;
411417
struct list_head timeout_list;
412418

419+
struct queue_limits limits;
420+
413421
/*
414422
* sg stuff
415423
*/
@@ -991,45 +999,45 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
991999

9921000
static inline unsigned long queue_bounce_pfn(struct request_queue *q)
9931001
{
994-
return q->bounce_pfn;
1002+
return q->limits.bounce_pfn;
9951003
}
9961004

9971005
static inline unsigned long queue_segment_boundary(struct request_queue *q)
9981006
{
999-
return q->seg_boundary_mask;
1007+
return q->limits.seg_boundary_mask;
10001008
}
10011009

10021010
static inline unsigned int queue_max_sectors(struct request_queue *q)
10031011
{
1004-
return q->max_sectors;
1012+
return q->limits.max_sectors;
10051013
}
10061014

10071015
static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
10081016
{
1009-
return q->max_hw_sectors;
1017+
return q->limits.max_hw_sectors;
10101018
}
10111019

10121020
static inline unsigned short queue_max_hw_segments(struct request_queue *q)
10131021
{
1014-
return q->max_hw_segments;
1022+
return q->limits.max_hw_segments;
10151023
}
10161024

10171025
static inline unsigned short queue_max_phys_segments(struct request_queue *q)
10181026
{
1019-
return q->max_phys_segments;
1027+
return q->limits.max_phys_segments;
10201028
}
10211029

10221030
static inline unsigned int queue_max_segment_size(struct request_queue *q)
10231031
{
1024-
return q->max_segment_size;
1032+
return q->limits.max_segment_size;
10251033
}
10261034

10271035
static inline unsigned short queue_logical_block_size(struct request_queue *q)
10281036
{
10291037
int retval = 512;
10301038

1031-
if (q && q->logical_block_size)
1032-
retval = q->logical_block_size;
1039+
if (q && q->limits.logical_block_size)
1040+
retval = q->limits.logical_block_size;
10331041

10341042
return retval;
10351043
}

0 commit comments

Comments
 (0)