31
31
#include <linux/vmalloc.h>
32
32
#include <linux/init.h>
33
33
#include <linux/module.h>
34
- #include <linux/blkdev .h>
34
+ #include <linux/blk-mq .h>
35
35
#include <linux/bitops.h>
36
36
#include <linux/mutex.h>
37
37
#include <linux/slab.h>
@@ -66,43 +66,44 @@ static DEFINE_SPINLOCK(z2ram_lock);
66
66
67
67
static struct gendisk * z2ram_gendisk ;
68
68
69
- static void do_z2_request (struct request_queue * q )
69
+ static blk_status_t z2_queue_rq (struct blk_mq_hw_ctx * hctx ,
70
+ const struct blk_mq_queue_data * bd )
70
71
{
71
- struct request * req ;
72
-
73
- req = blk_fetch_request (q );
74
- while (req ) {
75
- unsigned long start = blk_rq_pos (req ) << 9 ;
76
- unsigned long len = blk_rq_cur_bytes (req );
77
- blk_status_t err = BLK_STS_OK ;
78
-
79
- if (start + len > z2ram_size ) {
80
- pr_err (DEVICE_NAME ": bad access: block=%llu, "
81
- "count=%u\n" ,
82
- (unsigned long long )blk_rq_pos (req ),
83
- blk_rq_cur_sectors (req ));
84
- err = BLK_STS_IOERR ;
85
- goto done ;
86
- }
87
- while (len ) {
88
- unsigned long addr = start & Z2RAM_CHUNKMASK ;
89
- unsigned long size = Z2RAM_CHUNKSIZE - addr ;
90
- void * buffer = bio_data (req -> bio );
91
-
92
- if (len < size )
93
- size = len ;
94
- addr += z2ram_map [ start >> Z2RAM_CHUNKSHIFT ];
95
- if (rq_data_dir (req ) == READ )
96
- memcpy (buffer , (char * )addr , size );
97
- else
98
- memcpy ((char * )addr , buffer , size );
99
- start += size ;
100
- len -= size ;
101
- }
102
- done :
103
- if (!__blk_end_request_cur (req , err ))
104
- req = blk_fetch_request (q );
72
+ struct request * req = bd -> rq ;
73
+ unsigned long start = blk_rq_pos (req ) << 9 ;
74
+ unsigned long len = blk_rq_cur_bytes (req );
75
+
76
+ blk_mq_start_request (req );
77
+
78
+ if (start + len > z2ram_size ) {
79
+ pr_err (DEVICE_NAME ": bad access: block=%llu, "
80
+ "count=%u\n" ,
81
+ (unsigned long long )blk_rq_pos (req ),
82
+ blk_rq_cur_sectors (req ));
83
+ return BLK_STS_IOERR ;
84
+ }
85
+
86
+ spin_lock_irq (& z2ram_lock );
87
+
88
+ while (len ) {
89
+ unsigned long addr = start & Z2RAM_CHUNKMASK ;
90
+ unsigned long size = Z2RAM_CHUNKSIZE - addr ;
91
+ void * buffer = bio_data (req -> bio );
92
+
93
+ if (len < size )
94
+ size = len ;
95
+ addr += z2ram_map [ start >> Z2RAM_CHUNKSHIFT ];
96
+ if (rq_data_dir (req ) == READ )
97
+ memcpy (buffer , (char * )addr , size );
98
+ else
99
+ memcpy ((char * )addr , buffer , size );
100
+ start += size ;
101
+ len -= size ;
105
102
}
103
+
104
+ spin_unlock_irq (& z2ram_lock );
105
+ blk_mq_end_request (req , BLK_STS_OK );
106
+ return BLK_STS_OK ;
106
107
}
107
108
108
109
static void
@@ -337,6 +338,11 @@ static struct kobject *z2_find(dev_t dev, int *part, void *data)
337
338
}
338
339
339
340
static struct request_queue * z2_queue ;
341
+ static struct blk_mq_tag_set tag_set ;
342
+
343
+ static const struct blk_mq_ops z2_mq_ops = {
344
+ .queue_rq = z2_queue_rq ,
345
+ };
340
346
341
347
static int __init
342
348
z2_init (void )
@@ -355,9 +361,13 @@ z2_init(void)
355
361
if (!z2ram_gendisk )
356
362
goto out_disk ;
357
363
358
- z2_queue = blk_init_queue (do_z2_request , & z2ram_lock );
359
- if (!z2_queue )
364
+ z2_queue = blk_mq_init_sq_queue (& tag_set , & z2_mq_ops , 16 ,
365
+ BLK_MQ_F_SHOULD_MERGE );
366
+ if (IS_ERR (z2_queue )) {
367
+ ret = PTR_ERR (z2_queue );
368
+ z2_queue = NULL ;
360
369
goto out_queue ;
370
+ }
361
371
362
372
z2ram_gendisk -> major = Z2RAM_MAJOR ;
363
373
z2ram_gendisk -> first_minor = 0 ;
@@ -387,6 +397,7 @@ static void __exit z2_exit(void)
387
397
del_gendisk (z2ram_gendisk );
388
398
put_disk (z2ram_gendisk );
389
399
blk_cleanup_queue (z2_queue );
400
+ blk_mq_free_tag_set (& tag_set );
390
401
391
402
if ( current_device != -1 )
392
403
{
0 commit comments