@@ -1160,7 +1160,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1160
1160
* @rl: request list to allocate from
1161
1161
* @op: operation and flags
1162
1162
* @bio: bio to allocate request for (can be %NULL)
1163
- * @gfp_mask: allocation mask
1163
+ * @flags: BLQ_MQ_REQ_* flags
1164
1164
*
1165
1165
* Get a free request from @q. This function may fail under memory
1166
1166
* pressure or if @q is dead.
@@ -1170,7 +1170,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1170
1170
* Returns request pointer on success, with @q->queue_lock *not held*.
1171
1171
*/
1172
1172
static struct request * __get_request (struct request_list * rl , unsigned int op ,
1173
- struct bio * bio , gfp_t gfp_mask )
1173
+ struct bio * bio , unsigned int flags )
1174
1174
{
1175
1175
struct request_queue * q = rl -> q ;
1176
1176
struct request * rq ;
@@ -1179,6 +1179,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
1179
1179
struct io_cq * icq = NULL ;
1180
1180
const bool is_sync = op_is_sync (op );
1181
1181
int may_queue ;
1182
+ gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
1183
+ __GFP_DIRECT_RECLAIM ;
1182
1184
req_flags_t rq_flags = RQF_ALLOCED ;
1183
1185
1184
1186
lockdep_assert_held (q -> queue_lock );
@@ -1339,7 +1341,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
1339
1341
* @q: request_queue to allocate request from
1340
1342
* @op: operation and flags
1341
1343
* @bio: bio to allocate request for (can be %NULL)
1342
- * @gfp_mask: allocation mask
1344
+ * @flags: BLK_MQ_REQ_* flags.
1343
1345
*
1344
1346
* Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1345
1347
* this function keeps retrying under memory pressure and fails iff @q is dead.
@@ -1349,7 +1351,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
1349
1351
* Returns request pointer on success, with @q->queue_lock *not held*.
1350
1352
*/
1351
1353
static struct request * get_request (struct request_queue * q , unsigned int op ,
1352
- struct bio * bio , gfp_t gfp_mask )
1354
+ struct bio * bio , unsigned int flags )
1353
1355
{
1354
1356
const bool is_sync = op_is_sync (op );
1355
1357
DEFINE_WAIT (wait );
@@ -1361,7 +1363,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
1361
1363
1362
1364
rl = blk_get_rl (q , bio ); /* transferred to @rq on success */
1363
1365
retry :
1364
- rq = __get_request (rl , op , bio , gfp_mask );
1366
+ rq = __get_request (rl , op , bio , flags );
1365
1367
if (!IS_ERR (rq ))
1366
1368
return rq ;
1367
1369
@@ -1370,7 +1372,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
1370
1372
return ERR_PTR (- EAGAIN );
1371
1373
}
1372
1374
1373
- if (! gfpflags_allow_blocking ( gfp_mask ) || unlikely (blk_queue_dying (q ))) {
1375
+ if (( flags & BLK_MQ_REQ_NOWAIT ) || unlikely (blk_queue_dying (q ))) {
1374
1376
blk_put_rl (rl );
1375
1377
return rq ;
1376
1378
}
@@ -1397,10 +1399,13 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
1397
1399
goto retry ;
1398
1400
}
1399
1401
1402
+ /* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
1400
1403
static struct request * blk_old_get_request (struct request_queue * q ,
1401
- unsigned int op , gfp_t gfp_mask )
1404
+ unsigned int op , unsigned int flags )
1402
1405
{
1403
1406
struct request * rq ;
1407
+ gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
1408
+ __GFP_DIRECT_RECLAIM ;
1404
1409
int ret = 0 ;
1405
1410
1406
1411
WARN_ON_ONCE (q -> mq_ops );
@@ -1413,7 +1418,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
1413
1418
if (ret )
1414
1419
return ERR_PTR (ret );
1415
1420
spin_lock_irq (q -> queue_lock );
1416
- rq = get_request (q , op , NULL , gfp_mask );
1421
+ rq = get_request (q , op , NULL , flags );
1417
1422
if (IS_ERR (rq )) {
1418
1423
spin_unlock_irq (q -> queue_lock );
1419
1424
blk_queue_exit (q );
@@ -1427,25 +1432,40 @@ static struct request *blk_old_get_request(struct request_queue *q,
1427
1432
return rq ;
1428
1433
}
1429
1434
1430
- struct request * blk_get_request (struct request_queue * q , unsigned int op ,
1431
- gfp_t gfp_mask )
1435
+ /**
1436
+ * blk_get_request_flags - allocate a request
1437
+ * @q: request queue to allocate a request for
1438
+ * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
1439
+ * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
1440
+ */
1441
+ struct request * blk_get_request_flags (struct request_queue * q , unsigned int op ,
1442
+ unsigned int flags )
1432
1443
{
1433
1444
struct request * req ;
1434
1445
1446
+ WARN_ON_ONCE (op & REQ_NOWAIT );
1447
+ WARN_ON_ONCE (flags & ~BLK_MQ_REQ_NOWAIT );
1448
+
1435
1449
if (q -> mq_ops ) {
1436
- req = blk_mq_alloc_request (q , op ,
1437
- (gfp_mask & __GFP_DIRECT_RECLAIM ) ?
1438
- 0 : BLK_MQ_REQ_NOWAIT );
1450
+ req = blk_mq_alloc_request (q , op , flags );
1439
1451
if (!IS_ERR (req ) && q -> mq_ops -> initialize_rq_fn )
1440
1452
q -> mq_ops -> initialize_rq_fn (req );
1441
1453
} else {
1442
- req = blk_old_get_request (q , op , gfp_mask );
1454
+ req = blk_old_get_request (q , op , flags );
1443
1455
if (!IS_ERR (req ) && q -> initialize_rq_fn )
1444
1456
q -> initialize_rq_fn (req );
1445
1457
}
1446
1458
1447
1459
return req ;
1448
1460
}
1461
+ EXPORT_SYMBOL (blk_get_request_flags );
1462
+
1463
+ struct request * blk_get_request (struct request_queue * q , unsigned int op ,
1464
+ gfp_t gfp_mask )
1465
+ {
1466
+ return blk_get_request_flags (q , op , gfp_mask & __GFP_DIRECT_RECLAIM ?
1467
+ 0 : BLK_MQ_REQ_NOWAIT );
1468
+ }
1449
1469
EXPORT_SYMBOL (blk_get_request );
1450
1470
1451
1471
/**
@@ -1871,7 +1891,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1871
1891
* Returns with the queue unlocked.
1872
1892
*/
1873
1893
blk_queue_enter_live (q );
1874
- req = get_request (q , bio -> bi_opf , bio , GFP_NOIO );
1894
+ req = get_request (q , bio -> bi_opf , bio , 0 );
1875
1895
if (IS_ERR (req )) {
1876
1896
blk_queue_exit (q );
1877
1897
__wbt_done (q -> rq_wb , wb_acct );
0 commit comments