@@ -183,7 +183,6 @@ struct skd_request_context {
183
183
u16 id ;
184
184
u32 fitmsg_id ;
185
185
186
- struct request * req ;
187
186
u8 flush_cmd ;
188
187
189
188
u32 timeout_stamp ;
@@ -256,8 +255,6 @@ struct skd_device {
256
255
atomic_t timeout_stamp ;
257
256
struct skd_fitmsg_context * skmsg_table ;
258
257
259
- struct skd_request_context * skreq_table ;
260
-
261
258
struct skd_special_context internal_skspcl ;
262
259
u32 read_cap_blocksize ;
263
260
u32 read_cap_last_lba ;
@@ -500,7 +497,7 @@ static void skd_process_request(struct request *req)
500
497
struct skd_fitmsg_context * skmsg ;
501
498
struct fit_msg_hdr * fmh ;
502
499
const u32 tag = blk_mq_unique_tag (req );
503
- struct skd_request_context * const skreq = & skdev -> skreq_table [ tag ] ;
500
+ struct skd_request_context * const skreq = blk_mq_rq_to_pdu ( req ) ;
504
501
struct skd_scsi_request * scsi_req ;
505
502
unsigned long io_flags ;
506
503
u32 lba ;
@@ -537,14 +534,14 @@ static void skd_process_request(struct request *req)
537
534
skreq -> n_sg = 0 ;
538
535
skreq -> sg_byte_count = 0 ;
539
536
540
- skreq -> req = req ;
541
537
skreq -> fitmsg_id = 0 ;
542
538
543
539
skreq -> data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE ;
544
540
545
541
if (req -> bio && !skd_preop_sg_list (skdev , skreq )) {
546
542
dev_dbg (& skdev -> pdev -> dev , "error Out\n" );
547
- skd_end_request (skdev , skreq -> req , BLK_STS_RESOURCE );
543
+ skd_end_request (skdev , blk_mq_rq_from_pdu (skreq ),
544
+ BLK_STS_RESOURCE );
548
545
return ;
549
546
}
550
547
@@ -705,7 +702,7 @@ static void skd_end_request(struct skd_device *skdev, struct request *req,
705
702
static bool skd_preop_sg_list (struct skd_device * skdev ,
706
703
struct skd_request_context * skreq )
707
704
{
708
- struct request * req = skreq -> req ;
705
+ struct request * req = blk_mq_rq_from_pdu ( skreq ) ;
709
706
struct scatterlist * sgl = & skreq -> sg [0 ], * sg ;
710
707
int n_sg ;
711
708
int i ;
@@ -1563,32 +1560,13 @@ static void skd_release_skreq(struct skd_device *skdev,
1563
1560
SKD_ASSERT (atomic_read (& skdev -> timeout_slot [timo_slot ]) > 0 );
1564
1561
atomic_dec (& skdev -> timeout_slot [timo_slot ]);
1565
1562
1566
- /*
1567
- * Reset backpointer
1568
- */
1569
- skreq -> req = NULL ;
1570
-
1571
1563
/*
1572
1564
* Reclaim the skd_request_context
1573
1565
*/
1574
1566
skreq -> state = SKD_REQ_STATE_IDLE ;
1575
1567
skreq -> id += SKD_ID_INCR ;
1576
1568
}
1577
1569
1578
- static struct skd_request_context * skd_skreq_from_rq (struct skd_device * skdev ,
1579
- struct request * rq )
1580
- {
1581
- struct skd_request_context * skreq ;
1582
- int i ;
1583
-
1584
- for (i = 0 , skreq = skdev -> skreq_table ; i < skdev -> num_fitmsg_context ;
1585
- i ++ , skreq ++ )
1586
- if (skreq -> req == rq )
1587
- return skreq ;
1588
-
1589
- return NULL ;
1590
- }
1591
-
1592
1570
static int skd_isr_completion_posted (struct skd_device * skdev ,
1593
1571
int limit , int * enqueued )
1594
1572
{
@@ -1661,7 +1639,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
1661
1639
if (WARN (!rq , "No request for tag %#x -> %#x\n" , cmp_cntxt ,
1662
1640
tag ))
1663
1641
continue ;
1664
- skreq = skd_skreq_from_rq ( skdev , rq );
1642
+ skreq = blk_mq_rq_to_pdu ( rq );
1665
1643
1666
1644
/*
1667
1645
* Make sure the request ID for the slot matches.
@@ -2034,7 +2012,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
2034
2012
static void skd_recover_request (struct skd_device * skdev ,
2035
2013
struct skd_request_context * skreq )
2036
2014
{
2037
- struct request * req = skreq -> req ;
2015
+ struct request * req = blk_mq_rq_from_pdu ( skreq ) ;
2038
2016
2039
2017
if (skreq -> state != SKD_REQ_STATE_BUSY )
2040
2018
return ;
@@ -2047,7 +2025,6 @@ static void skd_recover_request(struct skd_device *skdev,
2047
2025
if (skreq -> n_sg > 0 )
2048
2026
skd_postop_sg_list (skdev , skreq );
2049
2027
2050
- skreq -> req = NULL ;
2051
2028
skreq -> state = SKD_REQ_STATE_IDLE ;
2052
2029
2053
2030
skd_end_request (skdev , req , BLK_STS_IOERR );
@@ -2058,8 +2035,12 @@ static void skd_recover_requests(struct skd_device *skdev)
2058
2035
int i ;
2059
2036
2060
2037
for (i = 0 ; i < skdev -> num_req_context ; i ++ ) {
2061
- struct skd_request_context * skreq = & skdev -> skreq_table [i ];
2038
+ struct request * rq = blk_map_queue_find_tag (skdev -> queue ->
2039
+ queue_tags , i );
2040
+ struct skd_request_context * skreq = blk_mq_rq_to_pdu (rq );
2062
2041
2042
+ if (!rq )
2043
+ continue ;
2063
2044
skd_recover_request (skdev , skreq );
2064
2045
}
2065
2046
@@ -2862,53 +2843,28 @@ static void skd_free_sg_list(struct skd_device *skdev,
2862
2843
pci_free_consistent (skdev -> pdev , nbytes , sg_list , dma_addr );
2863
2844
}
2864
2845
2865
- static int skd_cons_skreq (struct skd_device * skdev )
2846
+ static int skd_init_rq (struct request_queue * q , struct request * rq , gfp_t gfp )
2866
2847
{
2867
- int rc = 0 ;
2868
- u32 i ;
2869
-
2870
- dev_dbg (& skdev -> pdev -> dev ,
2871
- "skreq_table kcalloc, struct %lu, count %u total %lu\n" ,
2872
- sizeof (struct skd_request_context ), skdev -> num_req_context ,
2873
- sizeof (struct skd_request_context ) * skdev -> num_req_context );
2874
-
2875
- skdev -> skreq_table = kcalloc (skdev -> num_req_context ,
2876
- sizeof (struct skd_request_context ),
2877
- GFP_KERNEL );
2878
- if (skdev -> skreq_table == NULL ) {
2879
- rc = - ENOMEM ;
2880
- goto err_out ;
2881
- }
2882
-
2883
- dev_dbg (& skdev -> pdev -> dev , "alloc sg_table sg_per_req %u scatlist %lu total %lu\n" ,
2884
- skdev -> sgs_per_request , sizeof (struct scatterlist ),
2885
- skdev -> sgs_per_request * sizeof (struct scatterlist ));
2886
-
2887
- for (i = 0 ; i < skdev -> num_req_context ; i ++ ) {
2888
- struct skd_request_context * skreq ;
2848
+ struct skd_device * skdev = q -> queuedata ;
2849
+ struct skd_request_context * skreq = blk_mq_rq_to_pdu (rq );
2889
2850
2890
- skreq = & skdev -> skreq_table [i ];
2891
- skreq -> state = SKD_REQ_STATE_IDLE ;
2892
- skreq -> sg = kcalloc (skdev -> sgs_per_request ,
2893
- sizeof (struct scatterlist ), GFP_KERNEL );
2894
- if (skreq -> sg == NULL ) {
2895
- rc = - ENOMEM ;
2896
- goto err_out ;
2897
- }
2898
- sg_init_table (skreq -> sg , skdev -> sgs_per_request );
2851
+ skreq -> state = SKD_REQ_STATE_IDLE ;
2852
+ skreq -> sg = (void * )(skreq + 1 );
2853
+ sg_init_table (skreq -> sg , skd_sgs_per_request );
2854
+ skreq -> sksg_list = skd_cons_sg_list (skdev , skd_sgs_per_request ,
2855
+ & skreq -> sksg_dma_address );
2899
2856
2900
- skreq -> sksg_list = skd_cons_sg_list (skdev ,
2901
- skdev -> sgs_per_request ,
2902
- & skreq -> sksg_dma_address );
2857
+ return skreq -> sksg_list ? 0 : - ENOMEM ;
2858
+ }
2903
2859
2904
- if (skreq -> sksg_list == NULL ) {
2905
- rc = - ENOMEM ;
2906
- goto err_out ;
2907
- }
2908
- }
2860
+ static void skd_exit_rq (struct request_queue * q , struct request * rq )
2861
+ {
2862
+ struct skd_device * skdev = q -> queuedata ;
2863
+ struct skd_request_context * skreq = blk_mq_rq_to_pdu (rq );
2909
2864
2910
- err_out :
2911
- return rc ;
2865
+ skd_free_sg_list (skdev , skreq -> sksg_list ,
2866
+ skdev -> sgs_per_request ,
2867
+ skreq -> sksg_dma_address );
2912
2868
}
2913
2869
2914
2870
static int skd_cons_sksb (struct skd_device * skdev )
@@ -2976,18 +2932,30 @@ static int skd_cons_disk(struct skd_device *skdev)
2976
2932
disk -> fops = & skd_blockdev_ops ;
2977
2933
disk -> private_data = skdev ;
2978
2934
2979
- q = blk_init_queue ( skd_request_fn , & skdev -> lock );
2935
+ q = blk_alloc_queue_node ( GFP_KERNEL , NUMA_NO_NODE );
2980
2936
if (!q ) {
2981
2937
rc = - ENOMEM ;
2982
2938
goto err_out ;
2983
2939
}
2984
2940
blk_queue_bounce_limit (q , BLK_BOUNCE_HIGH );
2941
+ q -> queuedata = skdev ;
2942
+ q -> request_fn = skd_request_fn ;
2943
+ q -> queue_lock = & skdev -> lock ;
2985
2944
q -> nr_requests = skd_max_queue_depth / 2 ;
2986
- blk_queue_init_tags (q , skd_max_queue_depth , NULL , BLK_TAG_ALLOC_FIFO );
2945
+ q -> cmd_size = sizeof (struct skd_request_context ) +
2946
+ skdev -> sgs_per_request * sizeof (struct scatterlist );
2947
+ q -> init_rq_fn = skd_init_rq ;
2948
+ q -> exit_rq_fn = skd_exit_rq ;
2949
+ rc = blk_init_allocated_queue (q );
2950
+ if (rc < 0 )
2951
+ goto cleanup_q ;
2952
+ rc = blk_queue_init_tags (q , skd_max_queue_depth , NULL ,
2953
+ BLK_TAG_ALLOC_FIFO );
2954
+ if (rc < 0 )
2955
+ goto cleanup_q ;
2987
2956
2988
2957
skdev -> queue = q ;
2989
2958
disk -> queue = q ;
2990
- q -> queuedata = skdev ;
2991
2959
2992
2960
blk_queue_write_cache (q , true, true);
2993
2961
blk_queue_max_segments (q , skdev -> sgs_per_request );
@@ -3006,6 +2974,10 @@ static int skd_cons_disk(struct skd_device *skdev)
3006
2974
3007
2975
err_out :
3008
2976
return rc ;
2977
+
2978
+ cleanup_q :
2979
+ blk_cleanup_queue (q );
2980
+ goto err_out ;
3009
2981
}
3010
2982
3011
2983
#define SKD_N_DEV_TABLE 16u
@@ -3052,11 +3024,6 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
3052
3024
if (rc < 0 )
3053
3025
goto err_out ;
3054
3026
3055
- dev_dbg (& skdev -> pdev -> dev , "skreq\n" );
3056
- rc = skd_cons_skreq (skdev );
3057
- if (rc < 0 )
3058
- goto err_out ;
3059
-
3060
3027
dev_dbg (& skdev -> pdev -> dev , "sksb\n" );
3061
3028
rc = skd_cons_sksb (skdev );
3062
3029
if (rc < 0 )
@@ -3117,32 +3084,6 @@ static void skd_free_skmsg(struct skd_device *skdev)
3117
3084
skdev -> skmsg_table = NULL ;
3118
3085
}
3119
3086
3120
- static void skd_free_skreq (struct skd_device * skdev )
3121
- {
3122
- u32 i ;
3123
-
3124
- if (skdev -> skreq_table == NULL )
3125
- return ;
3126
-
3127
- for (i = 0 ; i < skdev -> num_req_context ; i ++ ) {
3128
- struct skd_request_context * skreq ;
3129
-
3130
- skreq = & skdev -> skreq_table [i ];
3131
-
3132
- skd_free_sg_list (skdev , skreq -> sksg_list ,
3133
- skdev -> sgs_per_request ,
3134
- skreq -> sksg_dma_address );
3135
-
3136
- skreq -> sksg_list = NULL ;
3137
- skreq -> sksg_dma_address = 0 ;
3138
-
3139
- kfree (skreq -> sg );
3140
- }
3141
-
3142
- kfree (skdev -> skreq_table );
3143
- skdev -> skreq_table = NULL ;
3144
- }
3145
-
3146
3087
static void skd_free_sksb (struct skd_device * skdev )
3147
3088
{
3148
3089
struct skd_special_context * skspcl ;
@@ -3204,9 +3145,6 @@ static void skd_destruct(struct skd_device *skdev)
3204
3145
dev_dbg (& skdev -> pdev -> dev , "sksb\n" );
3205
3146
skd_free_sksb (skdev );
3206
3147
3207
- dev_dbg (& skdev -> pdev -> dev , "skreq\n" );
3208
- skd_free_skreq (skdev );
3209
-
3210
3148
dev_dbg (& skdev -> pdev -> dev , "skmsg\n" );
3211
3149
skd_free_skmsg (skdev );
3212
3150
@@ -3734,23 +3672,19 @@ static void skd_log_skdev(struct skd_device *skdev, const char *event)
3734
3672
static void skd_log_skreq (struct skd_device * skdev ,
3735
3673
struct skd_request_context * skreq , const char * event )
3736
3674
{
3675
+ struct request * req = blk_mq_rq_from_pdu (skreq );
3676
+ u32 lba = blk_rq_pos (req );
3677
+ u32 count = blk_rq_sectors (req );
3678
+
3737
3679
dev_dbg (& skdev -> pdev -> dev , "skreq=%p event='%s'\n" , skreq , event );
3738
3680
dev_dbg (& skdev -> pdev -> dev , " state=%s(%d) id=0x%04x fitmsg=0x%04x\n" ,
3739
3681
skd_skreq_state_to_str (skreq -> state ), skreq -> state , skreq -> id ,
3740
3682
skreq -> fitmsg_id );
3741
3683
dev_dbg (& skdev -> pdev -> dev , " timo=0x%x sg_dir=%d n_sg=%d\n" ,
3742
3684
skreq -> timeout_stamp , skreq -> data_dir , skreq -> n_sg );
3743
-
3744
- if (skreq -> req != NULL ) {
3745
- struct request * req = skreq -> req ;
3746
- u32 lba = (u32 )blk_rq_pos (req );
3747
- u32 count = blk_rq_sectors (req );
3748
-
3749
- dev_dbg (& skdev -> pdev -> dev ,
3750
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n" , req ,
3751
- lba , lba , count , count , (int )rq_data_dir (req ));
3752
- } else
3753
- dev_dbg (& skdev -> pdev -> dev , "req=NULL\n" );
3685
+ dev_dbg (& skdev -> pdev -> dev ,
3686
+ "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n" , req , lba , lba ,
3687
+ count , count , (int )rq_data_dir (req ));
3754
3688
}
3755
3689
3756
3690
/*
0 commit comments