31
31
#include <linux/cdrom.h>
32
32
#include <linux/genhd.h>
33
33
#include <linux/bio.h>
34
- #include <linux/blkdev .h>
34
+ #include <linux/blk-mq .h>
35
35
#include <linux/interrupt.h>
36
36
#include <linux/device.h>
37
37
#include <linux/mutex.h>
38
38
#include <linux/wait.h>
39
- #include <linux/workqueue.h>
40
39
#include <linux/platform_device.h>
41
40
#include <scsi/scsi.h>
42
41
#include <asm/io.h>
@@ -102,11 +101,6 @@ static int gdrom_major;
102
101
static DECLARE_WAIT_QUEUE_HEAD (command_queue );
103
102
static DECLARE_WAIT_QUEUE_HEAD (request_queue );
104
103
105
- static DEFINE_SPINLOCK (gdrom_lock );
106
- static void gdrom_readdisk_dma (struct work_struct * work );
107
- static DECLARE_WORK (work , gdrom_readdisk_dma ) ;
108
- static LIST_HEAD (gdrom_deferred );
109
-
110
104
struct gdromtoc {
111
105
unsigned int entry [99 ];
112
106
unsigned int first , last ;
@@ -122,6 +116,7 @@ static struct gdrom_unit {
122
116
char disk_type ;
123
117
struct gdromtoc * toc ;
124
118
struct request_queue * gdrom_rq ;
119
+ struct blk_mq_tag_set tag_set ;
125
120
} gd ;
126
121
127
122
struct gdrom_id {
@@ -584,103 +579,83 @@ static int gdrom_set_interrupt_handlers(void)
584
579
* 9 -> sectors >> 8
585
580
* 10 -> sectors
586
581
*/
587
- static void gdrom_readdisk_dma (struct work_struct * work )
582
+ static blk_status_t gdrom_readdisk_dma (struct request * req )
588
583
{
589
584
int block , block_cnt ;
590
585
blk_status_t err ;
591
586
struct packet_command * read_command ;
592
- struct list_head * elem , * next ;
593
- struct request * req ;
594
587
unsigned long timeout ;
595
588
596
- if (list_empty (& gdrom_deferred ))
597
- return ;
598
589
read_command = kzalloc (sizeof (struct packet_command ), GFP_KERNEL );
599
590
if (!read_command )
600
- return ; /* get more memory later? */
591
+ return BLK_STS_RESOURCE ;
592
+
601
593
read_command -> cmd [0 ] = 0x30 ;
602
594
read_command -> cmd [1 ] = 0x20 ;
603
- spin_lock (& gdrom_lock );
604
- list_for_each_safe (elem , next , & gdrom_deferred ) {
605
- req = list_entry (elem , struct request , queuelist );
606
- spin_unlock (& gdrom_lock );
607
- block = blk_rq_pos (req )/GD_TO_BLK + GD_SESSION_OFFSET ;
608
- block_cnt = blk_rq_sectors (req )/GD_TO_BLK ;
609
- __raw_writel (virt_to_phys (bio_data (req -> bio )), GDROM_DMA_STARTADDR_REG );
610
- __raw_writel (block_cnt * GDROM_HARD_SECTOR , GDROM_DMA_LENGTH_REG );
611
- __raw_writel (1 , GDROM_DMA_DIRECTION_REG );
612
- __raw_writel (1 , GDROM_DMA_ENABLE_REG );
613
- read_command -> cmd [2 ] = (block >> 16 ) & 0xFF ;
614
- read_command -> cmd [3 ] = (block >> 8 ) & 0xFF ;
615
- read_command -> cmd [4 ] = block & 0xFF ;
616
- read_command -> cmd [8 ] = (block_cnt >> 16 ) & 0xFF ;
617
- read_command -> cmd [9 ] = (block_cnt >> 8 ) & 0xFF ;
618
- read_command -> cmd [10 ] = block_cnt & 0xFF ;
619
- /* set for DMA */
620
- __raw_writeb (1 , GDROM_ERROR_REG );
621
- /* other registers */
622
- __raw_writeb (0 , GDROM_SECNUM_REG );
623
- __raw_writeb (0 , GDROM_BCL_REG );
624
- __raw_writeb (0 , GDROM_BCH_REG );
625
- __raw_writeb (0 , GDROM_DSEL_REG );
626
- __raw_writeb (0 , GDROM_INTSEC_REG );
627
- /* Wait for registers to reset after any previous activity */
628
- timeout = jiffies + HZ / 2 ;
629
- while (gdrom_is_busy () && time_before (jiffies , timeout ))
630
- cpu_relax ();
631
- __raw_writeb (GDROM_COM_PACKET , GDROM_STATUSCOMMAND_REG );
632
- timeout = jiffies + HZ / 2 ;
633
- /* Wait for packet command to finish */
634
- while (gdrom_is_busy () && time_before (jiffies , timeout ))
635
- cpu_relax ();
636
- gd .pending = 1 ;
637
- gd .transfer = 1 ;
638
- outsw (GDROM_DATA_REG , & read_command -> cmd , 6 );
639
- timeout = jiffies + HZ / 2 ;
640
- /* Wait for any pending DMA to finish */
641
- while (__raw_readb (GDROM_DMA_STATUS_REG ) &&
642
- time_before (jiffies , timeout ))
643
- cpu_relax ();
644
- /* start transfer */
645
- __raw_writeb (1 , GDROM_DMA_STATUS_REG );
646
- wait_event_interruptible_timeout (request_queue ,
647
- gd .transfer == 0 , GDROM_DEFAULT_TIMEOUT );
648
- err = gd .transfer ? BLK_STS_IOERR : BLK_STS_OK ;
649
- gd .transfer = 0 ;
650
- gd .pending = 0 ;
651
- /* now seek to take the request spinlock
652
- * before handling ending the request */
653
- spin_lock (& gdrom_lock );
654
- list_del_init (& req -> queuelist );
655
- __blk_end_request_all (req , err );
656
- }
657
- spin_unlock (& gdrom_lock );
595
+ block = blk_rq_pos (req )/GD_TO_BLK + GD_SESSION_OFFSET ;
596
+ block_cnt = blk_rq_sectors (req )/GD_TO_BLK ;
597
+ __raw_writel (virt_to_phys (bio_data (req -> bio )), GDROM_DMA_STARTADDR_REG );
598
+ __raw_writel (block_cnt * GDROM_HARD_SECTOR , GDROM_DMA_LENGTH_REG );
599
+ __raw_writel (1 , GDROM_DMA_DIRECTION_REG );
600
+ __raw_writel (1 , GDROM_DMA_ENABLE_REG );
601
+ read_command -> cmd [2 ] = (block >> 16 ) & 0xFF ;
602
+ read_command -> cmd [3 ] = (block >> 8 ) & 0xFF ;
603
+ read_command -> cmd [4 ] = block & 0xFF ;
604
+ read_command -> cmd [8 ] = (block_cnt >> 16 ) & 0xFF ;
605
+ read_command -> cmd [9 ] = (block_cnt >> 8 ) & 0xFF ;
606
+ read_command -> cmd [10 ] = block_cnt & 0xFF ;
607
+ /* set for DMA */
608
+ __raw_writeb (1 , GDROM_ERROR_REG );
609
+ /* other registers */
610
+ __raw_writeb (0 , GDROM_SECNUM_REG );
611
+ __raw_writeb (0 , GDROM_BCL_REG );
612
+ __raw_writeb (0 , GDROM_BCH_REG );
613
+ __raw_writeb (0 , GDROM_DSEL_REG );
614
+ __raw_writeb (0 , GDROM_INTSEC_REG );
615
+ /* Wait for registers to reset after any previous activity */
616
+ timeout = jiffies + HZ / 2 ;
617
+ while (gdrom_is_busy () && time_before (jiffies , timeout ))
618
+ cpu_relax ();
619
+ __raw_writeb (GDROM_COM_PACKET , GDROM_STATUSCOMMAND_REG );
620
+ timeout = jiffies + HZ / 2 ;
621
+ /* Wait for packet command to finish */
622
+ while (gdrom_is_busy () && time_before (jiffies , timeout ))
623
+ cpu_relax ();
624
+ gd .pending = 1 ;
625
+ gd .transfer = 1 ;
626
+ outsw (GDROM_DATA_REG , & read_command -> cmd , 6 );
627
+ timeout = jiffies + HZ / 2 ;
628
+ /* Wait for any pending DMA to finish */
629
+ while (__raw_readb (GDROM_DMA_STATUS_REG ) &&
630
+ time_before (jiffies , timeout ))
631
+ cpu_relax ();
632
+ /* start transfer */
633
+ __raw_writeb (1 , GDROM_DMA_STATUS_REG );
634
+ wait_event_interruptible_timeout (request_queue ,
635
+ gd .transfer == 0 , GDROM_DEFAULT_TIMEOUT );
636
+ err = gd .transfer ? BLK_STS_IOERR : BLK_STS_OK ;
637
+ gd .transfer = 0 ;
638
+ gd .pending = 0 ;
639
+
640
+ blk_mq_end_request (req , err );
658
641
kfree (read_command );
642
+ return BLK_STS_OK ;
659
643
}
660
644
661
- static void gdrom_request (struct request_queue * rq )
662
- {
663
- struct request * req ;
664
-
665
- while ((req = blk_fetch_request (rq )) != NULL ) {
666
- switch (req_op (req )) {
667
- case REQ_OP_READ :
668
- /*
669
- * Add to list of deferred work and then schedule
670
- * workqueue.
671
- */
672
- list_add_tail (& req -> queuelist , & gdrom_deferred );
673
- schedule_work (& work );
674
- break ;
675
- case REQ_OP_WRITE :
676
- pr_notice ("Read only device - write request ignored\n" );
677
- __blk_end_request_all (req , BLK_STS_IOERR );
678
- break ;
679
- default :
680
- printk (KERN_DEBUG "gdrom: Non-fs request ignored\n" );
681
- __blk_end_request_all (req , BLK_STS_IOERR );
682
- break ;
683
- }
645
+ static blk_status_t gdrom_queue_rq (struct blk_mq_hw_ctx * hctx ,
646
+ const struct blk_mq_queue_data * bd )
647
+ {
648
+ blk_mq_start_request (bd -> rq );
649
+
650
+ switch (req_op (bd -> rq )) {
651
+ case REQ_OP_READ :
652
+ return gdrom_readdisk_dma (bd -> rq );
653
+ case REQ_OP_WRITE :
654
+ pr_notice ("Read only device - write request ignored\n" );
655
+ return BLK_STS_IOERR ;
656
+ default :
657
+ printk (KERN_DEBUG "gdrom: Non-fs request ignored\n" );
658
+ return BLK_STS_IOERR ;
684
659
}
685
660
}
686
661
@@ -768,6 +743,10 @@ static int probe_gdrom_setupqueue(void)
768
743
return gdrom_init_dma_mode ();
769
744
}
770
745
746
+ static const struct blk_mq_ops gdrom_mq_ops = {
747
+ .queue_rq = gdrom_queue_rq ,
748
+ };
749
+
771
750
/*
772
751
* register this as a block device and as compliant with the
773
752
* universal CD Rom driver interface
@@ -811,11 +790,15 @@ static int probe_gdrom(struct platform_device *devptr)
811
790
err = gdrom_set_interrupt_handlers ();
812
791
if (err )
813
792
goto probe_fail_cmdirq_register ;
814
- gd .gdrom_rq = blk_init_queue (gdrom_request , & gdrom_lock );
815
- if (!gd .gdrom_rq ) {
816
- err = - ENOMEM ;
793
+
794
+ gd .gdrom_rq = blk_mq_init_sq_queue (& gd .tag_set , & gdrom_mq_ops , 1 ,
795
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING );
796
+ if (IS_ERR (gd .gdrom_rq )) {
797
+ rc = PTR_ERR (gd .gdrom_rq );
798
+ gd .gdrom_rq = NULL ;
817
799
goto probe_fail_requestq ;
818
800
}
801
+
819
802
blk_queue_bounce_limit (gd .gdrom_rq , BLK_BOUNCE_HIGH );
820
803
821
804
err = probe_gdrom_setupqueue ();
@@ -832,6 +815,7 @@ static int probe_gdrom(struct platform_device *devptr)
832
815
833
816
probe_fail_toc :
834
817
blk_cleanup_queue (gd .gdrom_rq );
818
+ blk_mq_free_tag_set (& gd .tag_set );
835
819
probe_fail_requestq :
836
820
free_irq (HW_EVENT_GDROM_DMA , & gd );
837
821
free_irq (HW_EVENT_GDROM_CMD , & gd );
@@ -849,8 +833,8 @@ static int probe_gdrom(struct platform_device *devptr)
849
833
850
834
static int remove_gdrom (struct platform_device * devptr )
851
835
{
852
- flush_work (& work );
853
836
blk_cleanup_queue (gd .gdrom_rq );
837
+ blk_mq_free_tag_set (& gd .tag_set );
854
838
free_irq (HW_EVENT_GDROM_CMD , & gd );
855
839
free_irq (HW_EVENT_GDROM_DMA , & gd );
856
840
del_gendisk (gd .disk );
0 commit comments