44
44
#include <scsi/scsi_host.h>
45
45
#include <scsi/scsi_tcq.h>
46
46
#include <scsi/scsi_eh.h>
47
+ #include <scsi/scsi_dbg.h>
47
48
#include <linux/cciss_ioctl.h>
48
49
#include <linux/string.h>
49
50
#include <linux/bitmap.h>
@@ -212,6 +213,9 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
212
213
213
214
static void cmd_free (struct ctlr_info * h , struct CommandList * c );
214
215
static struct CommandList * cmd_alloc (struct ctlr_info * h );
216
+ static void cmd_tagged_free (struct ctlr_info * h , struct CommandList * c );
217
+ static struct CommandList * cmd_tagged_alloc (struct ctlr_info * h ,
218
+ struct scsi_cmnd * scmd );
215
219
static int fill_cmd (struct CommandList * c , u8 cmd , struct ctlr_info * h ,
216
220
void * buff , size_t size , u16 page_code , unsigned char * scsi3addr ,
217
221
int cmd_type );
@@ -2010,11 +2014,17 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2010
2014
}
2011
2015
}
2012
2016
2017
+ static void hpsa_cmd_resolve_and_free (struct ctlr_info * h ,
2018
+ struct CommandList * c )
2019
+ {
2020
+ hpsa_cmd_resolve_events (h , c );
2021
+ cmd_tagged_free (h , c );
2022
+ }
2023
+
2013
2024
static void hpsa_cmd_free_and_done (struct ctlr_info * h ,
2014
2025
struct CommandList * c , struct scsi_cmnd * cmd )
2015
2026
{
2016
- hpsa_cmd_resolve_events (h , c );
2017
- cmd_free (h , c );
2027
+ hpsa_cmd_resolve_and_free (h , c );
2018
2028
cmd -> scsi_done (cmd );
2019
2029
}
2020
2030
@@ -2035,8 +2045,7 @@ static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2035
2045
hpsa_set_scsi_cmd_aborted (cmd );
2036
2046
dev_warn (& h -> pdev -> dev , "CDB %16phN was aborted with status 0x%x\n" ,
2037
2047
c -> Request .CDB , c -> err_info -> ScsiStatus );
2038
- hpsa_cmd_resolve_events (h , c );
2039
- cmd_free (h , c ); /* FIX-ME: change to cmd_tagged_free(h, c) */
2048
+ hpsa_cmd_resolve_and_free (h , c );
2040
2049
}
2041
2050
2042
2051
static void process_ioaccel2_completion (struct ctlr_info * h ,
@@ -4500,7 +4509,7 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
4500
4509
}
4501
4510
4502
4511
if (hpsa_scatter_gather (h , c , cmd ) < 0 ) { /* Fill SG list */
4503
- cmd_free (h , c );
4512
+ hpsa_cmd_resolve_and_free (h , c );
4504
4513
return SCSI_MLQUEUE_HOST_BUSY ;
4505
4514
}
4506
4515
enqueue_cmd_and_start_io (h , c );
@@ -4546,6 +4555,8 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4546
4555
{
4547
4556
dma_addr_t cmd_dma_handle = h -> cmd_pool_dhandle + index * sizeof (* c );
4548
4557
4558
+ BUG_ON (c -> cmdindex != index );
4559
+
4549
4560
memset (c -> Request .CDB , 0 , sizeof (c -> Request .CDB ));
4550
4561
memset (c -> err_info , 0 , sizeof (* c -> err_info ));
4551
4562
c -> busaddr = (u32 ) cmd_dma_handle ;
@@ -4640,27 +4651,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4640
4651
4641
4652
/* Get the ptr to our adapter structure out of cmd->host. */
4642
4653
h = sdev_to_hba (cmd -> device );
4654
+
4655
+ BUG_ON (cmd -> request -> tag < 0 );
4656
+
4643
4657
dev = cmd -> device -> hostdata ;
4644
4658
if (!dev ) {
4645
4659
cmd -> result = DID_NO_CONNECT << 16 ;
4646
4660
cmd -> scsi_done (cmd );
4647
4661
return 0 ;
4648
4662
}
4649
- memcpy (scsi3addr , dev -> scsi3addr , sizeof (scsi3addr ));
4650
4663
4651
- if (unlikely (lockup_detected (h ))) {
4652
- cmd -> result = DID_NO_CONNECT << 16 ;
4653
- cmd -> scsi_done (cmd );
4654
- return 0 ;
4655
- }
4656
- c = cmd_alloc (h );
4664
+ memcpy (scsi3addr , dev -> scsi3addr , sizeof (scsi3addr ));
4657
4665
4658
4666
if (unlikely (lockup_detected (h ))) {
4659
4667
cmd -> result = DID_NO_CONNECT << 16 ;
4660
- cmd_free (h , c );
4661
4668
cmd -> scsi_done (cmd );
4662
4669
return 0 ;
4663
4670
}
4671
+ c = cmd_tagged_alloc (h , cmd );
4664
4672
4665
4673
/*
4666
4674
* Call alternate submit routine for I/O accelerated commands.
@@ -4673,7 +4681,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4673
4681
if (rc == 0 )
4674
4682
return 0 ;
4675
4683
if (rc == SCSI_MLQUEUE_HOST_BUSY ) {
4676
- cmd_free (h , c );
4684
+ hpsa_cmd_resolve_and_free (h , c );
4677
4685
return SCSI_MLQUEUE_HOST_BUSY ;
4678
4686
}
4679
4687
}
@@ -4787,15 +4795,23 @@ static int hpsa_register_scsi(struct ctlr_info *h)
4787
4795
sh -> hostdata [0 ] = (unsigned long ) h ;
4788
4796
sh -> irq = h -> intr [h -> intr_mode ];
4789
4797
sh -> unique_id = sh -> irq ;
4798
+ error = scsi_init_shared_tag_map (sh , sh -> can_queue );
4799
+ if (error ) {
4800
+ dev_err (& h -> pdev -> dev ,
4801
+ "%s: scsi_init_shared_tag_map failed for controller %d\n" ,
4802
+ __func__ , h -> ctlr );
4803
+ goto fail_host_put ;
4804
+ }
4790
4805
error = scsi_add_host (sh , & h -> pdev -> dev );
4791
- if (error )
4806
+ if (error ) {
4807
+ dev_err (& h -> pdev -> dev , "%s: scsi_add_host failed for controller %d\n" ,
4808
+ __func__ , h -> ctlr );
4792
4809
goto fail_host_put ;
4810
+ }
4793
4811
scsi_scan_host (sh );
4794
4812
return 0 ;
4795
4813
4796
4814
fail_host_put :
4797
- dev_err (& h -> pdev -> dev , "%s: scsi_add_host"
4798
- " failed for controller %d\n" , __func__ , h -> ctlr );
4799
4815
scsi_host_put (sh );
4800
4816
return error ;
4801
4817
fail :
@@ -4804,6 +4820,23 @@ static int hpsa_register_scsi(struct ctlr_info *h)
4804
4820
return - ENOMEM ;
4805
4821
}
4806
4822
4823
+ /*
4824
+ * The block layer has already gone to the trouble of picking out a unique,
4825
+ * small-integer tag for this request. We use an offset from that value as
4826
+ * an index to select our command block. (The offset allows us to reserve the
4827
+ * low-numbered entries for our own uses.)
4828
+ */
4829
+ static int hpsa_get_cmd_index (struct scsi_cmnd * scmd )
4830
+ {
4831
+ int idx = scmd -> request -> tag ;
4832
+
4833
+ if (idx < 0 )
4834
+ return idx ;
4835
+
4836
+ /* Offset to leave space for internal cmds. */
4837
+ return idx += HPSA_NRESERVED_CMDS ;
4838
+ }
4839
+
4807
4840
/*
4808
4841
* Send a TEST_UNIT_READY command to the specified LUN using the specified
4809
4842
* reply queue; returns zero if the unit is ready, and non-zero otherwise.
@@ -4925,6 +4958,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4925
4958
int rc ;
4926
4959
struct ctlr_info * h ;
4927
4960
struct hpsa_scsi_dev_t * dev ;
4961
+ char msg [40 ];
4928
4962
4929
4963
/* find the controller to which the command to be aborted was sent */
4930
4964
h = sdev_to_hba (scsicmd -> device );
@@ -4943,19 +4977,17 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4943
4977
4944
4978
/* if controller locked up, we can guarantee command won't complete */
4945
4979
if (lockup_detected (h )) {
4946
- dev_warn (& h -> pdev -> dev ,
4947
- "scsi %d:%d:%d:%d RESET FAILED, lockup detected\n" ,
4948
- h -> scsi_host -> host_no , dev -> bus , dev -> target ,
4949
- dev -> lun );
4980
+ sprintf (msg , "cmd %d RESET FAILED, lockup detected" ,
4981
+ hpsa_get_cmd_index (scsicmd ));
4982
+ hpsa_show_dev_msg (KERN_WARNING , h , dev , msg );
4950
4983
return FAILED ;
4951
4984
}
4952
4985
4953
4986
/* this reset request might be the result of a lockup; check */
4954
4987
if (detect_controller_lockup (h )) {
4955
- dev_warn (& h -> pdev -> dev ,
4956
- "scsi %d:%d:%d:%d RESET FAILED, new lockup detected\n" ,
4957
- h -> scsi_host -> host_no , dev -> bus , dev -> target ,
4958
- dev -> lun );
4988
+ sprintf (msg , "cmd %d RESET FAILED, new lockup detected" ,
4989
+ hpsa_get_cmd_index (scsicmd ));
4990
+ hpsa_show_dev_msg (KERN_WARNING , h , dev , msg );
4959
4991
return FAILED ;
4960
4992
}
4961
4993
@@ -5398,6 +5430,58 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5398
5430
return !lockup_detected (h ) ? SUCCESS : FAILED ;
5399
5431
}
5400
5432
5433
+ /*
5434
+ * For operations with an associated SCSI command, a command block is allocated
5435
+ * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5436
+ * block request tag as an index into a table of entries. cmd_tagged_free() is
5437
+ * the complement, although cmd_free() may be called instead.
5438
+ */
5439
+ static struct CommandList * cmd_tagged_alloc (struct ctlr_info * h ,
5440
+ struct scsi_cmnd * scmd )
5441
+ {
5442
+ int idx = hpsa_get_cmd_index (scmd );
5443
+ struct CommandList * c = h -> cmd_pool + idx ;
5444
+
5445
+ if (idx < HPSA_NRESERVED_CMDS || idx >= h -> nr_cmds ) {
5446
+ dev_err (& h -> pdev -> dev , "Bad block tag: %d not in [%d..%d]\n" ,
5447
+ idx , HPSA_NRESERVED_CMDS , h -> nr_cmds - 1 );
5448
+ /* The index value comes from the block layer, so if it's out of
5449
+ * bounds, it's probably not our bug.
5450
+ */
5451
+ BUG ();
5452
+ }
5453
+
5454
+ atomic_inc (& c -> refcount );
5455
+ if (unlikely (!hpsa_is_cmd_idle (c ))) {
5456
+ /*
5457
+ * We expect that the SCSI layer will hand us a unique tag
5458
+ * value. Thus, there should never be a collision here between
5459
+ * two requests...because if the selected command isn't idle
5460
+ * then someone is going to be very disappointed.
5461
+ */
5462
+ dev_err (& h -> pdev -> dev ,
5463
+ "tag collision (tag=%d) in cmd_tagged_alloc().\n" ,
5464
+ idx );
5465
+ if (c -> scsi_cmd != NULL )
5466
+ scsi_print_command (c -> scsi_cmd );
5467
+ scsi_print_command (scmd );
5468
+ }
5469
+
5470
+ hpsa_cmd_partial_init (h , idx , c );
5471
+ return c ;
5472
+ }
5473
+
5474
+ static void cmd_tagged_free (struct ctlr_info * h , struct CommandList * c )
5475
+ {
5476
+ /*
5477
+ * Release our reference to the block. We don't need to do anything
5478
+ * else to free it, because it is accessed by index. (There's no point
5479
+ * in checking the result of the decrement, since we cannot guarantee
5480
+ * that there isn't a concurrent abort which is also accessing it.)
5481
+ */
5482
+ (void )atomic_dec (& c -> refcount );
5483
+ }
5484
+
5401
5485
/*
5402
5486
* For operations that cannot sleep, a command block is allocated at init,
5403
5487
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -5411,7 +5495,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
5411
5495
{
5412
5496
struct CommandList * c ;
5413
5497
int refcount , i ;
5414
- unsigned long offset ;
5498
+ int offset = 0 ;
5415
5499
5416
5500
/*
5417
5501
* There is some *extremely* small but non-zero chance that that
@@ -5423,31 +5507,44 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
5423
5507
* very unlucky thread might be starved anyway, never able to
5424
5508
* beat the other threads. In reality, this happens so
5425
5509
* infrequently as to be indistinguishable from never.
5510
+ *
5511
+ * Note that we start allocating commands before the SCSI host structure
5512
+ * is initialized. Since the search starts at bit zero, this
5513
+ * all works, since we have at least one command structure available;
5514
+ * however, it means that the structures with the low indexes have to be
5515
+ * reserved for driver-initiated requests, while requests from the block
5516
+ * layer will use the higher indexes.
5426
5517
*/
5427
5518
5428
- offset = h -> last_allocation ; /* benignly racy */
5429
5519
for (;;) {
5430
- i = find_next_zero_bit (h -> cmd_pool_bits , h -> nr_cmds , offset );
5431
- if (unlikely (i == h -> nr_cmds )) {
5520
+ i = find_next_zero_bit (h -> cmd_pool_bits ,
5521
+ HPSA_NRESERVED_CMDS ,
5522
+ offset );
5523
+ if (unlikely (i >= HPSA_NRESERVED_CMDS )) {
5432
5524
offset = 0 ;
5433
5525
continue ;
5434
5526
}
5435
5527
c = h -> cmd_pool + i ;
5436
5528
refcount = atomic_inc_return (& c -> refcount );
5437
5529
if (unlikely (refcount > 1 )) {
5438
5530
cmd_free (h , c ); /* already in use */
5439
- offset = (i + 1 ) % h -> nr_cmds ;
5531
+ offset = (i + 1 ) % HPSA_NRESERVED_CMDS ;
5440
5532
continue ;
5441
5533
}
5442
5534
set_bit (i & (BITS_PER_LONG - 1 ),
5443
5535
h -> cmd_pool_bits + (i / BITS_PER_LONG ));
5444
5536
break ; /* it's ours now. */
5445
5537
}
5446
- h -> last_allocation = i ; /* benignly racy */
5447
5538
hpsa_cmd_partial_init (h , i , c );
5448
5539
return c ;
5449
5540
}
5450
5541
5542
+ /*
5543
+ * This is the complementary operation to cmd_alloc(). Note, however, in some
5544
+ * corner cases it may also be used to free blocks allocated by
5545
+ * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5546
+ * the clear-bit is harmless.
5547
+ */
5451
5548
static void cmd_free (struct ctlr_info * h , struct CommandList * c )
5452
5549
{
5453
5550
if (atomic_dec_and_test (& c -> refcount )) {
0 commit comments