@@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
111
111
return & fnic -> io_req_lock [hash ];
112
112
}
113
113
114
+ static inline spinlock_t * fnic_io_lock_tag (struct fnic * fnic ,
115
+ int tag )
116
+ {
117
+ return & fnic -> io_req_lock [tag & (FNIC_IO_LOCKS - 1 )];
118
+ }
119
+
114
120
/*
115
121
* Unmap the data buffer and sense buffer for an io_req,
116
122
* also unmap and free the device-private scatter/gather list.
@@ -956,9 +962,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
956
962
spin_unlock_irqrestore (io_lock , flags );
957
963
return ;
958
964
}
959
- CMD_STATE (sc ) = FNIC_IOREQ_ABTS_COMPLETE ;
960
965
CMD_ABTS_STATUS (sc ) = hdr_status ;
961
-
962
966
CMD_FLAGS (sc ) |= FNIC_IO_ABT_TERM_DONE ;
963
967
FNIC_SCSI_DBG (KERN_DEBUG , fnic -> lport -> host ,
964
968
"abts cmpl recd. id %d status %s\n" ,
@@ -1116,7 +1120,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1116
1120
1117
1121
static void fnic_cleanup_io (struct fnic * fnic , int exclude_id )
1118
1122
{
1119
- unsigned int i ;
1123
+ int i ;
1120
1124
struct fnic_io_req * io_req ;
1121
1125
unsigned long flags = 0 ;
1122
1126
struct scsi_cmnd * sc ;
@@ -1127,12 +1131,14 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1127
1131
if (i == exclude_id )
1128
1132
continue ;
1129
1133
1134
+ io_lock = fnic_io_lock_tag (fnic , i );
1135
+ spin_lock_irqsave (io_lock , flags );
1130
1136
sc = scsi_host_find_tag (fnic -> lport -> host , i );
1131
- if (!sc )
1137
+ if (!sc ) {
1138
+ spin_unlock_irqrestore (io_lock , flags );
1132
1139
continue ;
1140
+ }
1133
1141
1134
- io_lock = fnic_io_lock_hash (fnic , sc );
1135
- spin_lock_irqsave (io_lock , flags );
1136
1142
io_req = (struct fnic_io_req * )CMD_SP (sc );
1137
1143
if ((CMD_FLAGS (sc ) & FNIC_DEVICE_RESET ) &&
1138
1144
!(CMD_FLAGS (sc ) & FNIC_DEV_RST_DONE )) {
@@ -1310,12 +1316,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1310
1316
1311
1317
for (tag = 0 ; tag < FNIC_MAX_IO_REQ ; tag ++ ) {
1312
1318
abt_tag = tag ;
1319
+ io_lock = fnic_io_lock_tag (fnic , tag );
1320
+ spin_lock_irqsave (io_lock , flags );
1313
1321
sc = scsi_host_find_tag (fnic -> lport -> host , tag );
1314
- if (!sc )
1322
+ if (!sc ) {
1323
+ spin_unlock_irqrestore (io_lock , flags );
1315
1324
continue ;
1316
-
1317
- io_lock = fnic_io_lock_hash (fnic , sc );
1318
- spin_lock_irqsave (io_lock , flags );
1325
+ }
1319
1326
1320
1327
io_req = (struct fnic_io_req * )CMD_SP (sc );
1321
1328
@@ -1426,16 +1433,19 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1426
1433
1427
1434
for (tag = 0 ; tag < FNIC_MAX_IO_REQ ; tag ++ ) {
1428
1435
abt_tag = tag ;
1436
+ io_lock = fnic_io_lock_tag (fnic , tag );
1437
+ spin_lock_irqsave (io_lock , flags );
1429
1438
sc = scsi_host_find_tag (fnic -> lport -> host , tag );
1430
- if (!sc )
1439
+ if (!sc ) {
1440
+ spin_unlock_irqrestore (io_lock , flags );
1431
1441
continue ;
1442
+ }
1432
1443
1433
1444
cmd_rport = starget_to_rport (scsi_target (sc -> device ));
1434
- if (rport != cmd_rport )
1445
+ if (rport != cmd_rport ) {
1446
+ spin_unlock_irqrestore (io_lock , flags );
1435
1447
continue ;
1436
-
1437
- io_lock = fnic_io_lock_hash (fnic , sc );
1438
- spin_lock_irqsave (io_lock , flags );
1448
+ }
1439
1449
1440
1450
io_req = (struct fnic_io_req * )CMD_SP (sc );
1441
1451
@@ -1648,13 +1658,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1648
1658
io_req -> abts_done = NULL ;
1649
1659
1650
1660
/* fw did not complete abort, timed out */
1651
- if (CMD_STATE (sc ) == FNIC_IOREQ_ABTS_PENDING ) {
1661
+ if (CMD_ABTS_STATUS (sc ) == FCPIO_INVALID_CODE ) {
1652
1662
spin_unlock_irqrestore (io_lock , flags );
1653
1663
CMD_FLAGS (sc ) |= FNIC_IO_ABT_TERM_TIMED_OUT ;
1654
1664
ret = FAILED ;
1655
1665
goto fnic_abort_cmd_end ;
1656
1666
}
1657
1667
1668
+ CMD_STATE (sc ) = FNIC_IOREQ_ABTS_COMPLETE ;
1669
+
1658
1670
/*
1659
1671
* firmware completed the abort, check the status,
1660
1672
* free the io_req irrespective of failure or success
@@ -1753,16 +1765,17 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1753
1765
enum fnic_ioreq_state old_ioreq_state ;
1754
1766
1755
1767
for (tag = 0 ; tag < FNIC_MAX_IO_REQ ; tag ++ ) {
1768
+ io_lock = fnic_io_lock_tag (fnic , tag );
1769
+ spin_lock_irqsave (io_lock , flags );
1756
1770
sc = scsi_host_find_tag (fnic -> lport -> host , tag );
1757
1771
/*
1758
1772
* ignore this lun reset cmd or cmds that do not belong to
1759
1773
* this lun
1760
1774
*/
1761
- if (!sc || sc == lr_sc || sc -> device != lun_dev )
1775
+ if (!sc || sc == lr_sc || sc -> device != lun_dev ) {
1776
+ spin_unlock_irqrestore (io_lock , flags );
1762
1777
continue ;
1763
-
1764
- io_lock = fnic_io_lock_hash (fnic , sc );
1765
- spin_lock_irqsave (io_lock , flags );
1778
+ }
1766
1779
1767
1780
io_req = (struct fnic_io_req * )CMD_SP (sc );
1768
1781
@@ -1791,6 +1804,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1791
1804
spin_unlock_irqrestore (io_lock , flags );
1792
1805
continue ;
1793
1806
}
1807
+
1808
+ if (io_req -> abts_done )
1809
+ shost_printk (KERN_ERR , fnic -> lport -> host ,
1810
+ "%s: io_req->abts_done is set state is %s\n" ,
1811
+ __func__ , fnic_ioreq_state_to_str (CMD_STATE (sc )));
1794
1812
old_ioreq_state = CMD_STATE (sc );
1795
1813
/*
1796
1814
* Any pending IO issued prior to reset is expected to be
@@ -1801,11 +1819,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1801
1819
*/
1802
1820
CMD_STATE (sc ) = FNIC_IOREQ_ABTS_PENDING ;
1803
1821
1804
- if (io_req -> abts_done )
1805
- shost_printk (KERN_ERR , fnic -> lport -> host ,
1806
- "%s: io_req->abts_done is set state is %s\n" ,
1807
- __func__ , fnic_ioreq_state_to_str (CMD_STATE (sc )));
1808
-
1809
1822
BUG_ON (io_req -> abts_done );
1810
1823
1811
1824
abt_tag = tag ;
@@ -1858,12 +1871,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1858
1871
io_req -> abts_done = NULL ;
1859
1872
1860
1873
/* if abort is still pending with fw, fail */
1861
- if (CMD_STATE (sc ) == FNIC_IOREQ_ABTS_PENDING ) {
1874
+ if (CMD_ABTS_STATUS (sc ) == FCPIO_INVALID_CODE ) {
1862
1875
spin_unlock_irqrestore (io_lock , flags );
1863
1876
CMD_FLAGS (sc ) |= FNIC_IO_ABT_TERM_DONE ;
1864
1877
ret = 1 ;
1865
1878
goto clean_pending_aborts_end ;
1866
1879
}
1880
+ CMD_STATE (sc ) = FNIC_IOREQ_ABTS_COMPLETE ;
1867
1881
CMD_SP (sc ) = NULL ;
1868
1882
spin_unlock_irqrestore (io_lock , flags );
1869
1883
@@ -2061,8 +2075,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
2061
2075
spin_unlock_irqrestore (io_lock , flags );
2062
2076
int_to_scsilun (sc -> device -> lun , & fc_lun );
2063
2077
/*
2064
- * Issue abort and terminate on the device reset request.
2065
- * If q'ing of the abort fails, retry issue it after a delay.
2078
+ * Issue abort and terminate on device reset request.
2079
+ * If q'ing of terminate fails, retry it after a delay.
2066
2080
*/
2067
2081
while (1 ) {
2068
2082
spin_lock_irqsave (io_lock , flags );
0 commit comments