Skip to content

Commit 6a15674

Browse files
KAGA-KOKOaxboe
authored andcommitted
block: Introduce blk_get_request_flags()
A side effect of this patch is that the GFP mask that is passed to several allocation functions in the legacy block layer is changed from GFP_KERNEL into __GFP_DIRECT_RECLAIM. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Martin Steigerwald <martin@lichtvoll.de> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 055f6e1 commit 6a15674

File tree

2 files changed

+38
-15
lines changed

2 files changed

+38
-15
lines changed

block/blk-core.c

Lines changed: 35 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1160,7 +1160,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
11601160
* @rl: request list to allocate from
11611161
* @op: operation and flags
11621162
* @bio: bio to allocate request for (can be %NULL)
1163-
* @gfp_mask: allocation mask
1163+
* @flags: BLQ_MQ_REQ_* flags
11641164
*
11651165
* Get a free request from @q. This function may fail under memory
11661166
* pressure or if @q is dead.
@@ -1170,7 +1170,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
11701170
* Returns request pointer on success, with @q->queue_lock *not held*.
11711171
*/
11721172
static struct request *__get_request(struct request_list *rl, unsigned int op,
1173-
struct bio *bio, gfp_t gfp_mask)
1173+
struct bio *bio, unsigned int flags)
11741174
{
11751175
struct request_queue *q = rl->q;
11761176
struct request *rq;
@@ -1179,6 +1179,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
11791179
struct io_cq *icq = NULL;
11801180
const bool is_sync = op_is_sync(op);
11811181
int may_queue;
1182+
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
1183+
__GFP_DIRECT_RECLAIM;
11821184
req_flags_t rq_flags = RQF_ALLOCED;
11831185

11841186
lockdep_assert_held(q->queue_lock);
@@ -1339,7 +1341,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
13391341
* @q: request_queue to allocate request from
13401342
* @op: operation and flags
13411343
* @bio: bio to allocate request for (can be %NULL)
1342-
* @gfp_mask: allocation mask
1344+
* @flags: BLK_MQ_REQ_* flags.
13431345
*
13441346
* Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
13451347
* this function keeps retrying under memory pressure and fails iff @q is dead.
@@ -1349,7 +1351,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
13491351
* Returns request pointer on success, with @q->queue_lock *not held*.
13501352
*/
13511353
static struct request *get_request(struct request_queue *q, unsigned int op,
1352-
struct bio *bio, gfp_t gfp_mask)
1354+
struct bio *bio, unsigned int flags)
13531355
{
13541356
const bool is_sync = op_is_sync(op);
13551357
DEFINE_WAIT(wait);
@@ -1361,7 +1363,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
13611363

13621364
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
13631365
retry:
1364-
rq = __get_request(rl, op, bio, gfp_mask);
1366+
rq = __get_request(rl, op, bio, flags);
13651367
if (!IS_ERR(rq))
13661368
return rq;
13671369

@@ -1370,7 +1372,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
13701372
return ERR_PTR(-EAGAIN);
13711373
}
13721374

1373-
if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
1375+
if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
13741376
blk_put_rl(rl);
13751377
return rq;
13761378
}
@@ -1397,10 +1399,13 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
13971399
goto retry;
13981400
}
13991401

1402+
/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
14001403
static struct request *blk_old_get_request(struct request_queue *q,
1401-
unsigned int op, gfp_t gfp_mask)
1404+
unsigned int op, unsigned int flags)
14021405
{
14031406
struct request *rq;
1407+
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
1408+
__GFP_DIRECT_RECLAIM;
14041409
int ret = 0;
14051410

14061411
WARN_ON_ONCE(q->mq_ops);
@@ -1413,7 +1418,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
14131418
if (ret)
14141419
return ERR_PTR(ret);
14151420
spin_lock_irq(q->queue_lock);
1416-
rq = get_request(q, op, NULL, gfp_mask);
1421+
rq = get_request(q, op, NULL, flags);
14171422
if (IS_ERR(rq)) {
14181423
spin_unlock_irq(q->queue_lock);
14191424
blk_queue_exit(q);
@@ -1427,25 +1432,40 @@ static struct request *blk_old_get_request(struct request_queue *q,
14271432
return rq;
14281433
}
14291434

1430-
struct request *blk_get_request(struct request_queue *q, unsigned int op,
1431-
gfp_t gfp_mask)
1435+
/**
1436+
* blk_get_request_flags - allocate a request
1437+
* @q: request queue to allocate a request for
1438+
* @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
1439+
* @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
1440+
*/
1441+
struct request *blk_get_request_flags(struct request_queue *q, unsigned int op,
1442+
unsigned int flags)
14321443
{
14331444
struct request *req;
14341445

1446+
WARN_ON_ONCE(op & REQ_NOWAIT);
1447+
WARN_ON_ONCE(flags & ~BLK_MQ_REQ_NOWAIT);
1448+
14351449
if (q->mq_ops) {
1436-
req = blk_mq_alloc_request(q, op,
1437-
(gfp_mask & __GFP_DIRECT_RECLAIM) ?
1438-
0 : BLK_MQ_REQ_NOWAIT);
1450+
req = blk_mq_alloc_request(q, op, flags);
14391451
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
14401452
q->mq_ops->initialize_rq_fn(req);
14411453
} else {
1442-
req = blk_old_get_request(q, op, gfp_mask);
1454+
req = blk_old_get_request(q, op, flags);
14431455
if (!IS_ERR(req) && q->initialize_rq_fn)
14441456
q->initialize_rq_fn(req);
14451457
}
14461458

14471459
return req;
14481460
}
1461+
EXPORT_SYMBOL(blk_get_request_flags);
1462+
1463+
struct request *blk_get_request(struct request_queue *q, unsigned int op,
1464+
gfp_t gfp_mask)
1465+
{
1466+
return blk_get_request_flags(q, op, gfp_mask & __GFP_DIRECT_RECLAIM ?
1467+
0 : BLK_MQ_REQ_NOWAIT);
1468+
}
14491469
EXPORT_SYMBOL(blk_get_request);
14501470

14511471
/**
@@ -1871,7 +1891,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
18711891
* Returns with the queue unlocked.
18721892
*/
18731893
blk_queue_enter_live(q);
1874-
req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
1894+
req = get_request(q, bio->bi_opf, bio, 0);
18751895
if (IS_ERR(req)) {
18761896
blk_queue_exit(q);
18771897
__wbt_done(q->rq_wb, wb_acct);

include/linux/blkdev.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -927,6 +927,9 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
927927
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
928928
extern void blk_put_request(struct request *);
929929
extern void __blk_put_request(struct request_queue *, struct request *);
930+
extern struct request *blk_get_request_flags(struct request_queue *,
931+
unsigned int op,
932+
unsigned int flags);
930933
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
931934
gfp_t gfp_mask);
932935
extern void blk_requeue_request(struct request_queue *, struct request *);

0 commit comments

Comments
 (0)