Skip to content

Commit e7278a8

Browse files
KAGA-KOKOaxboe
authored andcommitted
skd: Coalesce struct request and struct skd_request_context
Set request_queue.cmd_size, introduce skd_init_rq() and skd_exit_rq() and remove skd_device.skreq_table. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 5d00324 commit e7278a8

File tree

1 file changed

+54
-120
lines changed

1 file changed

+54
-120
lines changed

drivers/block/skd_main.c

Lines changed: 54 additions & 120 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,6 @@ struct skd_request_context {
183183
u16 id;
184184
u32 fitmsg_id;
185185

186-
struct request *req;
187186
u8 flush_cmd;
188187

189188
u32 timeout_stamp;
@@ -256,8 +255,6 @@ struct skd_device {
256255
atomic_t timeout_stamp;
257256
struct skd_fitmsg_context *skmsg_table;
258257

259-
struct skd_request_context *skreq_table;
260-
261258
struct skd_special_context internal_skspcl;
262259
u32 read_cap_blocksize;
263260
u32 read_cap_last_lba;
@@ -500,7 +497,7 @@ static void skd_process_request(struct request *req)
500497
struct skd_fitmsg_context *skmsg;
501498
struct fit_msg_hdr *fmh;
502499
const u32 tag = blk_mq_unique_tag(req);
503-
struct skd_request_context *const skreq = &skdev->skreq_table[tag];
500+
struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
504501
struct skd_scsi_request *scsi_req;
505502
unsigned long io_flags;
506503
u32 lba;
@@ -537,14 +534,14 @@ static void skd_process_request(struct request *req)
537534
skreq->n_sg = 0;
538535
skreq->sg_byte_count = 0;
539536

540-
skreq->req = req;
541537
skreq->fitmsg_id = 0;
542538

543539
skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
544540

545541
if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
546542
dev_dbg(&skdev->pdev->dev, "error Out\n");
547-
skd_end_request(skdev, skreq->req, BLK_STS_RESOURCE);
543+
skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
544+
BLK_STS_RESOURCE);
548545
return;
549546
}
550547

@@ -705,7 +702,7 @@ static void skd_end_request(struct skd_device *skdev, struct request *req,
705702
static bool skd_preop_sg_list(struct skd_device *skdev,
706703
struct skd_request_context *skreq)
707704
{
708-
struct request *req = skreq->req;
705+
struct request *req = blk_mq_rq_from_pdu(skreq);
709706
struct scatterlist *sgl = &skreq->sg[0], *sg;
710707
int n_sg;
711708
int i;
@@ -1563,32 +1560,13 @@ static void skd_release_skreq(struct skd_device *skdev,
15631560
SKD_ASSERT(atomic_read(&skdev->timeout_slot[timo_slot]) > 0);
15641561
atomic_dec(&skdev->timeout_slot[timo_slot]);
15651562

1566-
/*
1567-
* Reset backpointer
1568-
*/
1569-
skreq->req = NULL;
1570-
15711563
/*
15721564
* Reclaim the skd_request_context
15731565
*/
15741566
skreq->state = SKD_REQ_STATE_IDLE;
15751567
skreq->id += SKD_ID_INCR;
15761568
}
15771569

1578-
static struct skd_request_context *skd_skreq_from_rq(struct skd_device *skdev,
1579-
struct request *rq)
1580-
{
1581-
struct skd_request_context *skreq;
1582-
int i;
1583-
1584-
for (i = 0, skreq = skdev->skreq_table; i < skdev->num_fitmsg_context;
1585-
i++, skreq++)
1586-
if (skreq->req == rq)
1587-
return skreq;
1588-
1589-
return NULL;
1590-
}
1591-
15921570
static int skd_isr_completion_posted(struct skd_device *skdev,
15931571
int limit, int *enqueued)
15941572
{
@@ -1661,7 +1639,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
16611639
if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
16621640
tag))
16631641
continue;
1664-
skreq = skd_skreq_from_rq(skdev, rq);
1642+
skreq = blk_mq_rq_to_pdu(rq);
16651643

16661644
/*
16671645
* Make sure the request ID for the slot matches.
@@ -2034,7 +2012,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
20342012
static void skd_recover_request(struct skd_device *skdev,
20352013
struct skd_request_context *skreq)
20362014
{
2037-
struct request *req = skreq->req;
2015+
struct request *req = blk_mq_rq_from_pdu(skreq);
20382016

20392017
if (skreq->state != SKD_REQ_STATE_BUSY)
20402018
return;
@@ -2047,7 +2025,6 @@ static void skd_recover_request(struct skd_device *skdev,
20472025
if (skreq->n_sg > 0)
20482026
skd_postop_sg_list(skdev, skreq);
20492027

2050-
skreq->req = NULL;
20512028
skreq->state = SKD_REQ_STATE_IDLE;
20522029

20532030
skd_end_request(skdev, req, BLK_STS_IOERR);
@@ -2058,8 +2035,12 @@ static void skd_recover_requests(struct skd_device *skdev)
20582035
int i;
20592036

20602037
for (i = 0; i < skdev->num_req_context; i++) {
2061-
struct skd_request_context *skreq = &skdev->skreq_table[i];
2038+
struct request *rq = blk_map_queue_find_tag(skdev->queue->
2039+
queue_tags, i);
2040+
struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
20622041

2042+
if (!rq)
2043+
continue;
20632044
skd_recover_request(skdev, skreq);
20642045
}
20652046

@@ -2862,53 +2843,28 @@ static void skd_free_sg_list(struct skd_device *skdev,
28622843
pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
28632844
}
28642845

2865-
static int skd_cons_skreq(struct skd_device *skdev)
2846+
static int skd_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
28662847
{
2867-
int rc = 0;
2868-
u32 i;
2869-
2870-
dev_dbg(&skdev->pdev->dev,
2871-
"skreq_table kcalloc, struct %lu, count %u total %lu\n",
2872-
sizeof(struct skd_request_context), skdev->num_req_context,
2873-
sizeof(struct skd_request_context) * skdev->num_req_context);
2874-
2875-
skdev->skreq_table = kcalloc(skdev->num_req_context,
2876-
sizeof(struct skd_request_context),
2877-
GFP_KERNEL);
2878-
if (skdev->skreq_table == NULL) {
2879-
rc = -ENOMEM;
2880-
goto err_out;
2881-
}
2882-
2883-
dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
2884-
skdev->sgs_per_request, sizeof(struct scatterlist),
2885-
skdev->sgs_per_request * sizeof(struct scatterlist));
2886-
2887-
for (i = 0; i < skdev->num_req_context; i++) {
2888-
struct skd_request_context *skreq;
2848+
struct skd_device *skdev = q->queuedata;
2849+
struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
28892850

2890-
skreq = &skdev->skreq_table[i];
2891-
skreq->state = SKD_REQ_STATE_IDLE;
2892-
skreq->sg = kcalloc(skdev->sgs_per_request,
2893-
sizeof(struct scatterlist), GFP_KERNEL);
2894-
if (skreq->sg == NULL) {
2895-
rc = -ENOMEM;
2896-
goto err_out;
2897-
}
2898-
sg_init_table(skreq->sg, skdev->sgs_per_request);
2851+
skreq->state = SKD_REQ_STATE_IDLE;
2852+
skreq->sg = (void *)(skreq + 1);
2853+
sg_init_table(skreq->sg, skd_sgs_per_request);
2854+
skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2855+
&skreq->sksg_dma_address);
28992856

2900-
skreq->sksg_list = skd_cons_sg_list(skdev,
2901-
skdev->sgs_per_request,
2902-
&skreq->sksg_dma_address);
2857+
return skreq->sksg_list ? 0 : -ENOMEM;
2858+
}
29032859

2904-
if (skreq->sksg_list == NULL) {
2905-
rc = -ENOMEM;
2906-
goto err_out;
2907-
}
2908-
}
2860+
static void skd_exit_rq(struct request_queue *q, struct request *rq)
2861+
{
2862+
struct skd_device *skdev = q->queuedata;
2863+
struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
29092864

2910-
err_out:
2911-
return rc;
2865+
skd_free_sg_list(skdev, skreq->sksg_list,
2866+
skdev->sgs_per_request,
2867+
skreq->sksg_dma_address);
29122868
}
29132869

29142870
static int skd_cons_sksb(struct skd_device *skdev)
@@ -2976,18 +2932,30 @@ static int skd_cons_disk(struct skd_device *skdev)
29762932
disk->fops = &skd_blockdev_ops;
29772933
disk->private_data = skdev;
29782934

2979-
q = blk_init_queue(skd_request_fn, &skdev->lock);
2935+
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
29802936
if (!q) {
29812937
rc = -ENOMEM;
29822938
goto err_out;
29832939
}
29842940
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2941+
q->queuedata = skdev;
2942+
q->request_fn = skd_request_fn;
2943+
q->queue_lock = &skdev->lock;
29852944
q->nr_requests = skd_max_queue_depth / 2;
2986-
blk_queue_init_tags(q, skd_max_queue_depth, NULL, BLK_TAG_ALLOC_FIFO);
2945+
q->cmd_size = sizeof(struct skd_request_context) +
2946+
skdev->sgs_per_request * sizeof(struct scatterlist);
2947+
q->init_rq_fn = skd_init_rq;
2948+
q->exit_rq_fn = skd_exit_rq;
2949+
rc = blk_init_allocated_queue(q);
2950+
if (rc < 0)
2951+
goto cleanup_q;
2952+
rc = blk_queue_init_tags(q, skd_max_queue_depth, NULL,
2953+
BLK_TAG_ALLOC_FIFO);
2954+
if (rc < 0)
2955+
goto cleanup_q;
29872956

29882957
skdev->queue = q;
29892958
disk->queue = q;
2990-
q->queuedata = skdev;
29912959

29922960
blk_queue_write_cache(q, true, true);
29932961
blk_queue_max_segments(q, skdev->sgs_per_request);
@@ -3006,6 +2974,10 @@ static int skd_cons_disk(struct skd_device *skdev)
30062974

30072975
err_out:
30082976
return rc;
2977+
2978+
cleanup_q:
2979+
blk_cleanup_queue(q);
2980+
goto err_out;
30092981
}
30102982

30112983
#define SKD_N_DEV_TABLE 16u
@@ -3052,11 +3024,6 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
30523024
if (rc < 0)
30533025
goto err_out;
30543026

3055-
dev_dbg(&skdev->pdev->dev, "skreq\n");
3056-
rc = skd_cons_skreq(skdev);
3057-
if (rc < 0)
3058-
goto err_out;
3059-
30603027
dev_dbg(&skdev->pdev->dev, "sksb\n");
30613028
rc = skd_cons_sksb(skdev);
30623029
if (rc < 0)
@@ -3117,32 +3084,6 @@ static void skd_free_skmsg(struct skd_device *skdev)
31173084
skdev->skmsg_table = NULL;
31183085
}
31193086

3120-
static void skd_free_skreq(struct skd_device *skdev)
3121-
{
3122-
u32 i;
3123-
3124-
if (skdev->skreq_table == NULL)
3125-
return;
3126-
3127-
for (i = 0; i < skdev->num_req_context; i++) {
3128-
struct skd_request_context *skreq;
3129-
3130-
skreq = &skdev->skreq_table[i];
3131-
3132-
skd_free_sg_list(skdev, skreq->sksg_list,
3133-
skdev->sgs_per_request,
3134-
skreq->sksg_dma_address);
3135-
3136-
skreq->sksg_list = NULL;
3137-
skreq->sksg_dma_address = 0;
3138-
3139-
kfree(skreq->sg);
3140-
}
3141-
3142-
kfree(skdev->skreq_table);
3143-
skdev->skreq_table = NULL;
3144-
}
3145-
31463087
static void skd_free_sksb(struct skd_device *skdev)
31473088
{
31483089
struct skd_special_context *skspcl;
@@ -3204,9 +3145,6 @@ static void skd_destruct(struct skd_device *skdev)
32043145
dev_dbg(&skdev->pdev->dev, "sksb\n");
32053146
skd_free_sksb(skdev);
32063147

3207-
dev_dbg(&skdev->pdev->dev, "skreq\n");
3208-
skd_free_skreq(skdev);
3209-
32103148
dev_dbg(&skdev->pdev->dev, "skmsg\n");
32113149
skd_free_skmsg(skdev);
32123150

@@ -3734,23 +3672,19 @@ static void skd_log_skdev(struct skd_device *skdev, const char *event)
37343672
static void skd_log_skreq(struct skd_device *skdev,
37353673
struct skd_request_context *skreq, const char *event)
37363674
{
3675+
struct request *req = blk_mq_rq_from_pdu(skreq);
3676+
u32 lba = blk_rq_pos(req);
3677+
u32 count = blk_rq_sectors(req);
3678+
37373679
dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
37383680
dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
37393681
skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
37403682
skreq->fitmsg_id);
37413683
dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
37423684
skreq->timeout_stamp, skreq->data_dir, skreq->n_sg);
3743-
3744-
if (skreq->req != NULL) {
3745-
struct request *req = skreq->req;
3746-
u32 lba = (u32)blk_rq_pos(req);
3747-
u32 count = blk_rq_sectors(req);
3748-
3749-
dev_dbg(&skdev->pdev->dev,
3750-
"req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
3751-
lba, lba, count, count, (int)rq_data_dir(req));
3752-
} else
3753-
dev_dbg(&skdev->pdev->dev, "req=NULL\n");
3685+
dev_dbg(&skdev->pdev->dev,
3686+
"req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
3687+
count, count, (int)rq_data_dir(req));
37543688
}
37553689

37563690
/*

0 commit comments

Comments
 (0)