14
14
#include <linux/hrtimer.h>
15
15
#include <linux/configfs.h>
16
16
#include <linux/badblocks.h>
17
+ #include <linux/fault-inject.h>
17
18
18
19
#define SECTOR_SHIFT 9
19
20
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
26
27
#define TICKS_PER_SEC 50ULL
27
28
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
28
29
30
+ static DECLARE_FAULT_ATTR (null_timeout_attr );
31
+
29
32
static inline u64 mb_per_tick (int mbps )
30
33
{
31
34
return (1 << 20 ) / TICKS_PER_SEC * ((u64 ) mbps );
@@ -162,6 +165,9 @@ static int g_home_node = NUMA_NO_NODE;
162
165
module_param_named (home_node , g_home_node , int , S_IRUGO );
163
166
MODULE_PARM_DESC (home_node , "Home node for the device" );
164
167
168
+ static char g_timeout_str [80 ];
169
+ module_param_string (timeout , g_timeout_str , sizeof (g_timeout_str ), S_IRUGO );
170
+
165
171
static int g_queue_mode = NULL_Q_MQ ;
166
172
167
173
static int null_param_store_val (const char * str , int * val , int min , int max )
@@ -1364,16 +1370,26 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1364
1370
return BLKPREP_DEFER ;
1365
1371
}
1366
1372
1373
+ static bool should_timeout_request (struct request * rq )
1374
+ {
1375
+ if (g_timeout_str [0 ])
1376
+ return should_fail (& null_timeout_attr , 1 );
1377
+
1378
+ return false;
1379
+ }
1380
+
1367
1381
static void null_request_fn (struct request_queue * q )
1368
1382
{
1369
1383
struct request * rq ;
1370
1384
1371
1385
while ((rq = blk_fetch_request (q )) != NULL ) {
1372
1386
struct nullb_cmd * cmd = rq -> special ;
1373
1387
1374
- spin_unlock_irq (q -> queue_lock );
1375
- null_handle_cmd (cmd );
1376
- spin_lock_irq (q -> queue_lock );
1388
+ if (!should_timeout_request (rq )) {
1389
+ spin_unlock_irq (q -> queue_lock );
1390
+ null_handle_cmd (cmd );
1391
+ spin_lock_irq (q -> queue_lock );
1392
+ }
1377
1393
}
1378
1394
}
1379
1395
@@ -1400,7 +1416,10 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1400
1416
1401
1417
blk_mq_start_request (bd -> rq );
1402
1418
1403
- return null_handle_cmd (cmd );
1419
+ if (!should_timeout_request (bd -> rq ))
1420
+ return null_handle_cmd (cmd );
1421
+
1422
+ return BLK_STS_OK ;
1404
1423
}
1405
1424
1406
1425
static const struct blk_mq_ops null_mq_ops = {
@@ -1634,6 +1653,18 @@ static void null_validate_conf(struct nullb_device *dev)
1634
1653
dev -> mbps = 0 ;
1635
1654
}
1636
1655
1656
+ static bool null_setup_fault (void )
1657
+ {
1658
+ if (!g_timeout_str [0 ])
1659
+ return true;
1660
+
1661
+ if (!setup_fault_attr (& null_timeout_attr , g_timeout_str ))
1662
+ return false;
1663
+
1664
+ null_timeout_attr .verbose = 0 ;
1665
+ return true;
1666
+ }
1667
+
1637
1668
static int null_add_dev (struct nullb_device * dev )
1638
1669
{
1639
1670
struct nullb * nullb ;
@@ -1667,6 +1698,9 @@ static int null_add_dev(struct nullb_device *dev)
1667
1698
if (rv )
1668
1699
goto out_cleanup_queues ;
1669
1700
1701
+ if (!null_setup_fault ())
1702
+ goto out_cleanup_queues ;
1703
+
1670
1704
nullb -> tag_set -> timeout = 5 * HZ ;
1671
1705
nullb -> q = blk_mq_init_queue (nullb -> tag_set );
1672
1706
if (IS_ERR (nullb -> q )) {
@@ -1691,6 +1725,10 @@ static int null_add_dev(struct nullb_device *dev)
1691
1725
rv = - ENOMEM ;
1692
1726
goto out_cleanup_queues ;
1693
1727
}
1728
+
1729
+ if (!null_setup_fault ())
1730
+ goto out_cleanup_blk_queue ;
1731
+
1694
1732
blk_queue_prep_rq (nullb -> q , null_rq_prep_fn );
1695
1733
blk_queue_softirq_done (nullb -> q , null_softirq_done_fn );
1696
1734
blk_queue_rq_timed_out (nullb -> q , null_rq_timed_out_fn );
0 commit comments