8
8
#include <linux/slab.h>
9
9
#include <linux/blk-mq.h>
10
10
#include <linux/hrtimer.h>
11
+ #include <linux/lightnvm.h>
11
12
12
13
struct nullb_cmd {
13
14
struct list_head list ;
@@ -39,6 +40,7 @@ struct nullb {
39
40
40
41
struct nullb_queue * queues ;
41
42
unsigned int nr_queues ;
43
+ char disk_name [DISK_NAME_LEN ];
42
44
};
43
45
44
46
static LIST_HEAD (nullb_list );
@@ -119,6 +121,10 @@ static int nr_devices = 2;
119
121
module_param (nr_devices , int , S_IRUGO );
120
122
MODULE_PARM_DESC (nr_devices , "Number of devices to register" );
121
123
124
+ static bool use_lightnvm ;
125
+ module_param (use_lightnvm , bool , S_IRUGO );
126
+ MODULE_PARM_DESC (use_lightnvm , "Register as a LightNVM device" );
127
+
122
128
static int irqmode = NULL_IRQ_SOFTIRQ ;
123
129
124
130
static int null_set_irqmode (const char * str , const struct kernel_param * kp )
@@ -427,6 +433,8 @@ static void null_del_dev(struct nullb *nullb)
427
433
{
428
434
list_del_init (& nullb -> list );
429
435
436
+ if (use_lightnvm )
437
+ nvm_unregister (nullb -> disk -> disk_name );
430
438
del_gendisk (nullb -> disk );
431
439
blk_cleanup_queue (nullb -> q );
432
440
if (queue_mode == NULL_Q_MQ )
@@ -436,6 +444,125 @@ static void null_del_dev(struct nullb *nullb)
436
444
kfree (nullb );
437
445
}
438
446
447
+ #ifdef CONFIG_NVM
448
+
449
+ static void null_lnvm_end_io (struct request * rq , int error )
450
+ {
451
+ struct nvm_rq * rqd = rq -> end_io_data ;
452
+ struct nvm_dev * dev = rqd -> dev ;
453
+
454
+ dev -> mt -> end_io (rqd , error );
455
+
456
+ blk_put_request (rq );
457
+ }
458
+
459
+ static int null_lnvm_submit_io (struct request_queue * q , struct nvm_rq * rqd )
460
+ {
461
+ struct request * rq ;
462
+ struct bio * bio = rqd -> bio ;
463
+
464
+ rq = blk_mq_alloc_request (q , bio_rw (bio ), GFP_KERNEL , 0 );
465
+ if (IS_ERR (rq ))
466
+ return - ENOMEM ;
467
+
468
+ rq -> cmd_type = REQ_TYPE_DRV_PRIV ;
469
+ rq -> __sector = bio -> bi_iter .bi_sector ;
470
+ rq -> ioprio = bio_prio (bio );
471
+
472
+ if (bio_has_data (bio ))
473
+ rq -> nr_phys_segments = bio_phys_segments (q , bio );
474
+
475
+ rq -> __data_len = bio -> bi_iter .bi_size ;
476
+ rq -> bio = rq -> biotail = bio ;
477
+
478
+ rq -> end_io_data = rqd ;
479
+
480
+ blk_execute_rq_nowait (q , NULL , rq , 0 , null_lnvm_end_io );
481
+
482
+ return 0 ;
483
+ }
484
+
485
+ static int null_lnvm_id (struct request_queue * q , struct nvm_id * id )
486
+ {
487
+ sector_t size = gb * 1024 * 1024 * 1024ULL ;
488
+ struct nvm_id_group * grp ;
489
+
490
+ id -> ver_id = 0x1 ;
491
+ id -> vmnt = 0 ;
492
+ id -> cgrps = 1 ;
493
+ id -> cap = 0x3 ;
494
+ id -> dom = 0x1 ;
495
+ id -> ppat = NVM_ADDRMODE_LINEAR ;
496
+
497
+ do_div (size , bs ); /* convert size to pages */
498
+ grp = & id -> groups [0 ];
499
+ grp -> mtype = 0 ;
500
+ grp -> fmtype = 1 ;
501
+ grp -> num_ch = 1 ;
502
+ grp -> num_lun = 1 ;
503
+ grp -> num_pln = 1 ;
504
+ grp -> num_blk = size / 256 ;
505
+ grp -> num_pg = 256 ;
506
+ grp -> fpg_sz = bs ;
507
+ grp -> csecs = bs ;
508
+ grp -> trdt = 25000 ;
509
+ grp -> trdm = 25000 ;
510
+ grp -> tprt = 500000 ;
511
+ grp -> tprm = 500000 ;
512
+ grp -> tbet = 1500000 ;
513
+ grp -> tbem = 1500000 ;
514
+ grp -> mpos = 0x010101 ; /* single plane rwe */
515
+ grp -> cpar = hw_queue_depth ;
516
+
517
+ return 0 ;
518
+ }
519
+
520
+ static void * null_lnvm_create_dma_pool (struct request_queue * q , char * name )
521
+ {
522
+ mempool_t * virtmem_pool ;
523
+
524
+ virtmem_pool = mempool_create_page_pool (64 , 0 );
525
+ if (!virtmem_pool ) {
526
+ pr_err ("null_blk: Unable to create virtual memory pool\n" );
527
+ return NULL ;
528
+ }
529
+
530
+ return virtmem_pool ;
531
+ }
532
+
533
+ static void null_lnvm_destroy_dma_pool (void * pool )
534
+ {
535
+ mempool_destroy (pool );
536
+ }
537
+
538
+ static void * null_lnvm_dev_dma_alloc (struct request_queue * q , void * pool ,
539
+ gfp_t mem_flags , dma_addr_t * dma_handler )
540
+ {
541
+ return mempool_alloc (pool , mem_flags );
542
+ }
543
+
544
+ static void null_lnvm_dev_dma_free (void * pool , void * entry ,
545
+ dma_addr_t dma_handler )
546
+ {
547
+ mempool_free (entry , pool );
548
+ }
549
+
550
+ static struct nvm_dev_ops null_lnvm_dev_ops = {
551
+ .identity = null_lnvm_id ,
552
+ .submit_io = null_lnvm_submit_io ,
553
+
554
+ .create_dma_pool = null_lnvm_create_dma_pool ,
555
+ .destroy_dma_pool = null_lnvm_destroy_dma_pool ,
556
+ .dev_dma_alloc = null_lnvm_dev_dma_alloc ,
557
+ .dev_dma_free = null_lnvm_dev_dma_free ,
558
+
559
+ /* Simulate nvme protocol restriction */
560
+ .max_phys_sect = 64 ,
561
+ };
562
+ #else
563
+ static struct nvm_dev_ops null_lnvm_dev_ops ;
564
+ #endif /* CONFIG_NVM */
565
+
439
566
static int null_open (struct block_device * bdev , fmode_t mode )
440
567
{
441
568
return 0 ;
@@ -575,11 +702,6 @@ static int null_add_dev(void)
575
702
queue_flag_set_unlocked (QUEUE_FLAG_NONROT , nullb -> q );
576
703
queue_flag_clear_unlocked (QUEUE_FLAG_ADD_RANDOM , nullb -> q );
577
704
578
- disk = nullb -> disk = alloc_disk_node (1 , home_node );
579
- if (!disk ) {
580
- rv = - ENOMEM ;
581
- goto out_cleanup_blk_queue ;
582
- }
583
705
584
706
mutex_lock (& lock );
585
707
list_add_tail (& nullb -> list , & nullb_list );
@@ -589,6 +711,21 @@ static int null_add_dev(void)
589
711
blk_queue_logical_block_size (nullb -> q , bs );
590
712
blk_queue_physical_block_size (nullb -> q , bs );
591
713
714
+ sprintf (nullb -> disk_name , "nullb%d" , nullb -> index );
715
+
716
+ if (use_lightnvm ) {
717
+ rv = nvm_register (nullb -> q , nullb -> disk_name ,
718
+ & null_lnvm_dev_ops );
719
+ if (rv )
720
+ goto out_cleanup_blk_queue ;
721
+ goto done ;
722
+ }
723
+
724
+ disk = nullb -> disk = alloc_disk_node (1 , home_node );
725
+ if (!disk ) {
726
+ rv = - ENOMEM ;
727
+ goto out_cleanup_lightnvm ;
728
+ }
592
729
size = gb * 1024 * 1024 * 1024ULL ;
593
730
set_capacity (disk , size >> 9 );
594
731
@@ -598,10 +735,15 @@ static int null_add_dev(void)
598
735
disk -> fops = & null_fops ;
599
736
disk -> private_data = nullb ;
600
737
disk -> queue = nullb -> q ;
601
- sprintf (disk -> disk_name , "nullb%d" , nullb -> index );
738
+ strncpy (disk -> disk_name , nullb -> disk_name , DISK_NAME_LEN );
739
+
602
740
add_disk (disk );
741
+ done :
603
742
return 0 ;
604
743
744
+ out_cleanup_lightnvm :
745
+ if (use_lightnvm )
746
+ nvm_unregister (nullb -> disk_name );
605
747
out_cleanup_blk_queue :
606
748
blk_cleanup_queue (nullb -> q );
607
749
out_cleanup_tags :
@@ -625,6 +767,12 @@ static int __init null_init(void)
625
767
bs = PAGE_SIZE ;
626
768
}
627
769
770
+ if (use_lightnvm && queue_mode != NULL_Q_MQ ) {
771
+ pr_warn ("null_blk: LightNVM only supported for blk-mq\n" );
772
+ pr_warn ("null_blk: defaults queue mode to blk-mq\n" );
773
+ queue_mode = NULL_Q_MQ ;
774
+ }
775
+
628
776
if (queue_mode == NULL_Q_MQ && use_per_node_hctx ) {
629
777
if (submit_queues < nr_online_nodes ) {
630
778
pr_warn ("null_blk: submit_queues param is set to %u." ,
0 commit comments