Skip to content

Commit ad5fc6b

Browse files
committed
gdrom: convert to blk-mq
Ditch the deffered list, lock, and workqueue handling. Just mark the set as being blocking, so we are invoked from a workqueue already. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent a9f38e1 commit ad5fc6b

File tree

1 file changed

+79
-95
lines changed

1 file changed

+79
-95
lines changed

drivers/cdrom/gdrom.c

Lines changed: 79 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,11 @@
3131
#include <linux/cdrom.h>
3232
#include <linux/genhd.h>
3333
#include <linux/bio.h>
34-
#include <linux/blkdev.h>
34+
#include <linux/blk-mq.h>
3535
#include <linux/interrupt.h>
3636
#include <linux/device.h>
3737
#include <linux/mutex.h>
3838
#include <linux/wait.h>
39-
#include <linux/workqueue.h>
4039
#include <linux/platform_device.h>
4140
#include <scsi/scsi.h>
4241
#include <asm/io.h>
@@ -102,11 +101,6 @@ static int gdrom_major;
102101
static DECLARE_WAIT_QUEUE_HEAD(command_queue);
103102
static DECLARE_WAIT_QUEUE_HEAD(request_queue);
104103

105-
static DEFINE_SPINLOCK(gdrom_lock);
106-
static void gdrom_readdisk_dma(struct work_struct *work);
107-
static DECLARE_WORK(work, gdrom_readdisk_dma);
108-
static LIST_HEAD(gdrom_deferred);
109-
110104
struct gdromtoc {
111105
unsigned int entry[99];
112106
unsigned int first, last;
@@ -122,6 +116,7 @@ static struct gdrom_unit {
122116
char disk_type;
123117
struct gdromtoc *toc;
124118
struct request_queue *gdrom_rq;
119+
struct blk_mq_tag_set tag_set;
125120
} gd;
126121

127122
struct gdrom_id {
@@ -584,103 +579,83 @@ static int gdrom_set_interrupt_handlers(void)
584579
* 9 -> sectors >> 8
585580
* 10 -> sectors
586581
*/
587-
static void gdrom_readdisk_dma(struct work_struct *work)
582+
static blk_status_t gdrom_readdisk_dma(struct request *req)
588583
{
589584
int block, block_cnt;
590585
blk_status_t err;
591586
struct packet_command *read_command;
592-
struct list_head *elem, *next;
593-
struct request *req;
594587
unsigned long timeout;
595588

596-
if (list_empty(&gdrom_deferred))
597-
return;
598589
read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
599590
if (!read_command)
600-
return; /* get more memory later? */
591+
return BLK_STS_RESOURCE;
592+
601593
read_command->cmd[0] = 0x30;
602594
read_command->cmd[1] = 0x20;
603-
spin_lock(&gdrom_lock);
604-
list_for_each_safe(elem, next, &gdrom_deferred) {
605-
req = list_entry(elem, struct request, queuelist);
606-
spin_unlock(&gdrom_lock);
607-
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
608-
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
609-
__raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
610-
__raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
611-
__raw_writel(1, GDROM_DMA_DIRECTION_REG);
612-
__raw_writel(1, GDROM_DMA_ENABLE_REG);
613-
read_command->cmd[2] = (block >> 16) & 0xFF;
614-
read_command->cmd[3] = (block >> 8) & 0xFF;
615-
read_command->cmd[4] = block & 0xFF;
616-
read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
617-
read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
618-
read_command->cmd[10] = block_cnt & 0xFF;
619-
/* set for DMA */
620-
__raw_writeb(1, GDROM_ERROR_REG);
621-
/* other registers */
622-
__raw_writeb(0, GDROM_SECNUM_REG);
623-
__raw_writeb(0, GDROM_BCL_REG);
624-
__raw_writeb(0, GDROM_BCH_REG);
625-
__raw_writeb(0, GDROM_DSEL_REG);
626-
__raw_writeb(0, GDROM_INTSEC_REG);
627-
/* Wait for registers to reset after any previous activity */
628-
timeout = jiffies + HZ / 2;
629-
while (gdrom_is_busy() && time_before(jiffies, timeout))
630-
cpu_relax();
631-
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
632-
timeout = jiffies + HZ / 2;
633-
/* Wait for packet command to finish */
634-
while (gdrom_is_busy() && time_before(jiffies, timeout))
635-
cpu_relax();
636-
gd.pending = 1;
637-
gd.transfer = 1;
638-
outsw(GDROM_DATA_REG, &read_command->cmd, 6);
639-
timeout = jiffies + HZ / 2;
640-
/* Wait for any pending DMA to finish */
641-
while (__raw_readb(GDROM_DMA_STATUS_REG) &&
642-
time_before(jiffies, timeout))
643-
cpu_relax();
644-
/* start transfer */
645-
__raw_writeb(1, GDROM_DMA_STATUS_REG);
646-
wait_event_interruptible_timeout(request_queue,
647-
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
648-
err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
649-
gd.transfer = 0;
650-
gd.pending = 0;
651-
/* now seek to take the request spinlock
652-
* before handling ending the request */
653-
spin_lock(&gdrom_lock);
654-
list_del_init(&req->queuelist);
655-
__blk_end_request_all(req, err);
656-
}
657-
spin_unlock(&gdrom_lock);
595+
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
596+
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
597+
__raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
598+
__raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
599+
__raw_writel(1, GDROM_DMA_DIRECTION_REG);
600+
__raw_writel(1, GDROM_DMA_ENABLE_REG);
601+
read_command->cmd[2] = (block >> 16) & 0xFF;
602+
read_command->cmd[3] = (block >> 8) & 0xFF;
603+
read_command->cmd[4] = block & 0xFF;
604+
read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
605+
read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
606+
read_command->cmd[10] = block_cnt & 0xFF;
607+
/* set for DMA */
608+
__raw_writeb(1, GDROM_ERROR_REG);
609+
/* other registers */
610+
__raw_writeb(0, GDROM_SECNUM_REG);
611+
__raw_writeb(0, GDROM_BCL_REG);
612+
__raw_writeb(0, GDROM_BCH_REG);
613+
__raw_writeb(0, GDROM_DSEL_REG);
614+
__raw_writeb(0, GDROM_INTSEC_REG);
615+
/* Wait for registers to reset after any previous activity */
616+
timeout = jiffies + HZ / 2;
617+
while (gdrom_is_busy() && time_before(jiffies, timeout))
618+
cpu_relax();
619+
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
620+
timeout = jiffies + HZ / 2;
621+
/* Wait for packet command to finish */
622+
while (gdrom_is_busy() && time_before(jiffies, timeout))
623+
cpu_relax();
624+
gd.pending = 1;
625+
gd.transfer = 1;
626+
outsw(GDROM_DATA_REG, &read_command->cmd, 6);
627+
timeout = jiffies + HZ / 2;
628+
/* Wait for any pending DMA to finish */
629+
while (__raw_readb(GDROM_DMA_STATUS_REG) &&
630+
time_before(jiffies, timeout))
631+
cpu_relax();
632+
/* start transfer */
633+
__raw_writeb(1, GDROM_DMA_STATUS_REG);
634+
wait_event_interruptible_timeout(request_queue,
635+
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
636+
err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
637+
gd.transfer = 0;
638+
gd.pending = 0;
639+
640+
blk_mq_end_request(req, err);
658641
kfree(read_command);
642+
return BLK_STS_OK;
659643
}
660644

661-
static void gdrom_request(struct request_queue *rq)
662-
{
663-
struct request *req;
664-
665-
while ((req = blk_fetch_request(rq)) != NULL) {
666-
switch (req_op(req)) {
667-
case REQ_OP_READ:
668-
/*
669-
* Add to list of deferred work and then schedule
670-
* workqueue.
671-
*/
672-
list_add_tail(&req->queuelist, &gdrom_deferred);
673-
schedule_work(&work);
674-
break;
675-
case REQ_OP_WRITE:
676-
pr_notice("Read only device - write request ignored\n");
677-
__blk_end_request_all(req, BLK_STS_IOERR);
678-
break;
679-
default:
680-
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
681-
__blk_end_request_all(req, BLK_STS_IOERR);
682-
break;
683-
}
645+
static blk_status_t gdrom_queue_rq(struct blk_mq_hw_ctx *hctx,
646+
const struct blk_mq_queue_data *bd)
647+
{
648+
blk_mq_start_request(bd->rq);
649+
650+
switch (req_op(bd->rq)) {
651+
case REQ_OP_READ:
652+
return gdrom_readdisk_dma(bd->rq);
653+
case REQ_OP_WRITE:
654+
pr_notice("Read only device - write request ignored\n");
655+
return BLK_STS_IOERR;
656+
default:
657+
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
658+
return BLK_STS_IOERR;
684659
}
685660
}
686661

@@ -768,6 +743,10 @@ static int probe_gdrom_setupqueue(void)
768743
return gdrom_init_dma_mode();
769744
}
770745

746+
static const struct blk_mq_ops gdrom_mq_ops = {
747+
.queue_rq = gdrom_queue_rq,
748+
};
749+
771750
/*
772751
* register this as a block device and as compliant with the
773752
* universal CD Rom driver interface
@@ -811,11 +790,15 @@ static int probe_gdrom(struct platform_device *devptr)
811790
err = gdrom_set_interrupt_handlers();
812791
if (err)
813792
goto probe_fail_cmdirq_register;
814-
gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
815-
if (!gd.gdrom_rq) {
816-
err = -ENOMEM;
793+
794+
gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
795+
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
796+
if (IS_ERR(gd.gdrom_rq)) {
797+
rc = PTR_ERR(gd.gdrom_rq);
798+
gd.gdrom_rq = NULL;
817799
goto probe_fail_requestq;
818800
}
801+
819802
blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH);
820803

821804
err = probe_gdrom_setupqueue();
@@ -832,6 +815,7 @@ static int probe_gdrom(struct platform_device *devptr)
832815

833816
probe_fail_toc:
834817
blk_cleanup_queue(gd.gdrom_rq);
818+
blk_mq_free_tag_set(&gd.tag_set);
835819
probe_fail_requestq:
836820
free_irq(HW_EVENT_GDROM_DMA, &gd);
837821
free_irq(HW_EVENT_GDROM_CMD, &gd);
@@ -849,8 +833,8 @@ static int probe_gdrom(struct platform_device *devptr)
849833

850834
static int remove_gdrom(struct platform_device *devptr)
851835
{
852-
flush_work(&work);
853836
blk_cleanup_queue(gd.gdrom_rq);
837+
blk_mq_free_tag_set(&gd.tag_set);
854838
free_irq(HW_EVENT_GDROM_CMD, &gd);
855839
free_irq(HW_EVENT_GDROM_DMA, &gd);
856840
del_gendisk(gd.disk);

0 commit comments

Comments
 (0)