@@ -821,8 +821,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
821
821
dma_addr_t dma_addr ;
822
822
int i , ret = - ENOMEM ;
823
823
824
- INIT_LIST_HEAD (& ch -> free_reqs );
825
-
826
824
ch -> req_ring = kcalloc (target -> req_ring_size , sizeof (* ch -> req_ring ),
827
825
GFP_KERNEL );
828
826
if (!ch -> req_ring )
@@ -853,8 +851,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
853
851
goto out ;
854
852
855
853
req -> indirect_dma_addr = dma_addr ;
856
- req -> index = i ;
857
- list_add_tail (& req -> list , & ch -> free_reqs );
858
854
}
859
855
ret = 0 ;
860
856
@@ -1076,7 +1072,6 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1076
1072
1077
1073
spin_lock_irqsave (& ch -> lock , flags );
1078
1074
ch -> req_lim += req_lim_delta ;
1079
- list_add_tail (& req -> list , & ch -> free_reqs );
1080
1075
spin_unlock_irqrestore (& ch -> lock , flags );
1081
1076
}
1082
1077
@@ -1648,8 +1643,11 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1648
1643
ch -> tsk_mgmt_status = rsp -> data [3 ];
1649
1644
complete (& ch -> tsk_mgmt_done );
1650
1645
} else {
1651
- req = & ch -> req_ring [rsp -> tag ];
1652
- scmnd = srp_claim_req (ch , req , NULL , NULL );
1646
+ scmnd = scsi_host_find_tag (target -> scsi_host , rsp -> tag );
1647
+ if (scmnd ) {
1648
+ req = (void * )scmnd -> host_scribble ;
1649
+ scmnd = srp_claim_req (ch , req , NULL , scmnd );
1650
+ }
1653
1651
if (!scmnd ) {
1654
1652
shost_printk (KERN_ERR , target -> scsi_host ,
1655
1653
"Null scmnd for RSP w/tag %016llx\n" ,
@@ -1889,6 +1887,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1889
1887
struct srp_cmd * cmd ;
1890
1888
struct ib_device * dev ;
1891
1889
unsigned long flags ;
1890
+ u32 tag ;
1891
+ u16 idx ;
1892
1892
int len , ret ;
1893
1893
const bool in_scsi_eh = !in_interrupt () && current == shost -> ehandler ;
1894
1894
@@ -1905,17 +1905,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1905
1905
if (unlikely (scmnd -> result ))
1906
1906
goto err ;
1907
1907
1908
+ WARN_ON_ONCE (scmnd -> request -> tag < 0 );
1909
+ tag = blk_mq_unique_tag (scmnd -> request );
1908
1910
ch = & target -> ch ;
1911
+ idx = blk_mq_unique_tag_to_tag (tag );
1912
+ WARN_ONCE (idx >= target -> req_ring_size , "%s: tag %#x: idx %d >= %d\n" ,
1913
+ dev_name (& shost -> shost_gendev ), tag , idx ,
1914
+ target -> req_ring_size );
1909
1915
1910
1916
spin_lock_irqsave (& ch -> lock , flags );
1911
1917
iu = __srp_get_tx_iu (ch , SRP_IU_CMD );
1912
- if (!iu )
1913
- goto err_unlock ;
1914
-
1915
- req = list_first_entry (& ch -> free_reqs , struct srp_request , list );
1916
- list_del (& req -> list );
1917
1918
spin_unlock_irqrestore (& ch -> lock , flags );
1918
1919
1920
+ if (!iu )
1921
+ goto err ;
1922
+
1923
+ req = & ch -> req_ring [idx ];
1919
1924
dev = target -> srp_host -> srp_dev -> dev ;
1920
1925
ib_dma_sync_single_for_cpu (dev , iu -> dma , target -> max_iu_len ,
1921
1926
DMA_TO_DEVICE );
@@ -1927,7 +1932,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1927
1932
1928
1933
cmd -> opcode = SRP_CMD ;
1929
1934
cmd -> lun = cpu_to_be64 ((u64 ) scmnd -> device -> lun << 48 );
1930
- cmd -> tag = req -> index ;
1935
+ cmd -> tag = tag ;
1931
1936
memcpy (cmd -> cdb , scmnd -> cmnd , scmnd -> cmd_len );
1932
1937
1933
1938
req -> scmnd = scmnd ;
@@ -1976,12 +1981,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1976
1981
*/
1977
1982
req -> scmnd = NULL ;
1978
1983
1979
- spin_lock_irqsave (& ch -> lock , flags );
1980
- list_add (& req -> list , & ch -> free_reqs );
1981
-
1982
- err_unlock :
1983
- spin_unlock_irqrestore (& ch -> lock , flags );
1984
-
1985
1984
err :
1986
1985
if (scmnd -> result ) {
1987
1986
scmnd -> scsi_done (scmnd );
@@ -2387,6 +2386,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2387
2386
{
2388
2387
struct srp_target_port * target = host_to_target (scmnd -> device -> host );
2389
2388
struct srp_request * req = (struct srp_request * ) scmnd -> host_scribble ;
2389
+ u32 tag ;
2390
2390
struct srp_rdma_ch * ch ;
2391
2391
int ret ;
2392
2392
@@ -2395,7 +2395,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2395
2395
ch = & target -> ch ;
2396
2396
if (!req || !srp_claim_req (ch , req , NULL , scmnd ))
2397
2397
return SUCCESS ;
2398
- if (srp_send_tsk_mgmt (ch , req -> index , scmnd -> device -> lun ,
2398
+ tag = blk_mq_unique_tag (scmnd -> request );
2399
+ if (srp_send_tsk_mgmt (ch , tag , scmnd -> device -> lun ,
2399
2400
SRP_TSK_ABORT_TASK ) == 0 )
2400
2401
ret = SUCCESS ;
2401
2402
else if (target -> rport -> state == SRP_RPORT_LOST )
@@ -2633,7 +2634,8 @@ static struct scsi_host_template srp_template = {
2633
2634
.this_id = -1 ,
2634
2635
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE ,
2635
2636
.use_clustering = ENABLE_CLUSTERING ,
2636
- .shost_attrs = srp_host_attrs
2637
+ .shost_attrs = srp_host_attrs ,
2638
+ .use_blk_tags = 1 ,
2637
2639
};
2638
2640
2639
2641
static int srp_sdev_count (struct Scsi_Host * host )
@@ -3054,6 +3056,10 @@ static ssize_t srp_create_target(struct device *dev,
3054
3056
if (ret )
3055
3057
goto err ;
3056
3058
3059
+ ret = scsi_init_shared_tag_map (target_host , target_host -> can_queue );
3060
+ if (ret )
3061
+ goto err ;
3062
+
3057
3063
target -> req_ring_size = target -> queue_size - SRP_TSK_MGMT_SQ_SIZE ;
3058
3064
3059
3065
if (!srp_conn_unique (target -> srp_host , target )) {
0 commit comments