@@ -238,7 +238,6 @@ struct skd_device {
238
238
enum skd_drvr_state state ;
239
239
u32 drive_state ;
240
240
241
- atomic_t in_flight ;
242
241
u32 cur_max_queue_depth ;
243
242
u32 queue_low_water_mark ;
244
243
u32 dev_max_queue_depth ;
@@ -391,6 +390,22 @@ static void skd_log_skreq(struct skd_device *skdev,
391
390
* READ/WRITE REQUESTS
392
391
*****************************************************************************
393
392
*/
393
+ static void skd_inc_in_flight (struct request * rq , void * data , bool reserved )
394
+ {
395
+ int * count = data ;
396
+
397
+ count ++ ;
398
+ }
399
+
400
+ static int skd_in_flight (struct skd_device * skdev )
401
+ {
402
+ int count = 0 ;
403
+
404
+ blk_mq_tagset_busy_iter (& skdev -> tag_set , skd_inc_in_flight , & count );
405
+
406
+ return count ;
407
+ }
408
+
394
409
static void
395
410
skd_prep_rw_cdb (struct skd_scsi_request * scsi_req ,
396
411
int data_dir , unsigned lba ,
@@ -567,9 +582,8 @@ static void skd_process_request(struct request *req, bool last)
567
582
skmsg -> length += sizeof (struct skd_scsi_request );
568
583
fmh -> num_protocol_cmds_coalesced ++ ;
569
584
570
- atomic_inc (& skdev -> in_flight );
571
585
dev_dbg (& skdev -> pdev -> dev , "req=0x%x busy=%d\n" , skreq -> id ,
572
- atomic_read ( & skdev -> in_flight ));
586
+ skd_in_flight ( skdev ));
573
587
574
588
/*
575
589
* If the FIT msg buffer is full send it.
@@ -1218,7 +1232,7 @@ static void skd_send_fitmsg(struct skd_device *skdev,
1218
1232
u64 qcmd ;
1219
1233
1220
1234
dev_dbg (& skdev -> pdev -> dev , "dma address 0x%llx, busy=%d\n" ,
1221
- skmsg -> mb_dma_address , atomic_read ( & skdev -> in_flight ));
1235
+ skmsg -> mb_dma_address , skd_in_flight ( skdev ));
1222
1236
dev_dbg (& skdev -> pdev -> dev , "msg_buf %p\n" , skmsg -> msg_buf );
1223
1237
1224
1238
qcmd = skmsg -> mb_dma_address ;
@@ -1451,13 +1465,6 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
1451
1465
static void skd_release_skreq (struct skd_device * skdev ,
1452
1466
struct skd_request_context * skreq )
1453
1467
{
1454
- /*
1455
- * Decrease the number of active requests.
1456
- * Also decrements the count in the timeout slot.
1457
- */
1458
- SKD_ASSERT (atomic_read (& skdev -> in_flight ) > 0 );
1459
- atomic_dec (& skdev -> in_flight );
1460
-
1461
1468
/*
1462
1469
* Reclaim the skd_request_context
1463
1470
*/
@@ -1498,7 +1505,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
1498
1505
dev_dbg (& skdev -> pdev -> dev ,
1499
1506
"cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n" ,
1500
1507
skdev -> skcomp_cycle , skdev -> skcomp_ix , cmp_cycle ,
1501
- cmp_cntxt , cmp_status , atomic_read ( & skdev -> in_flight ),
1508
+ cmp_cntxt , cmp_status , skd_in_flight ( skdev ),
1502
1509
cmp_bytes , skdev -> proto_ver );
1503
1510
1504
1511
if (cmp_cycle != skdev -> skcomp_cycle ) {
@@ -1590,7 +1597,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
1590
1597
}
1591
1598
1592
1599
if (skdev -> state == SKD_DRVR_STATE_PAUSING &&
1593
- atomic_read ( & skdev -> in_flight ) == 0 ) {
1600
+ skd_in_flight ( skdev ) == 0 ) {
1594
1601
skdev -> state = SKD_DRVR_STATE_PAUSED ;
1595
1602
wake_up_interruptible (& skdev -> waitq );
1596
1603
}
@@ -1929,8 +1936,6 @@ static void skd_recover_request(struct request *req, void *data, bool reserved)
1929
1936
static void skd_recover_requests (struct skd_device * skdev )
1930
1937
{
1931
1938
blk_mq_tagset_busy_iter (& skdev -> tag_set , skd_recover_request , skdev );
1932
-
1933
- atomic_set (& skdev -> in_flight , 0 );
1934
1939
}
1935
1940
1936
1941
static void skd_isr_msg_from_dev (struct skd_device * skdev )
@@ -3560,7 +3565,7 @@ static void skd_log_skdev(struct skd_device *skdev, const char *event)
3560
3565
skd_drive_state_to_str (skdev -> drive_state ), skdev -> drive_state ,
3561
3566
skd_skdev_state_to_str (skdev -> state ), skdev -> state );
3562
3567
dev_dbg (& skdev -> pdev -> dev , " busy=%d limit=%d dev=%d lowat=%d\n" ,
3563
- atomic_read ( & skdev -> in_flight ), skdev -> cur_max_queue_depth ,
3568
+ skd_in_flight ( skdev ), skdev -> cur_max_queue_depth ,
3564
3569
skdev -> dev_max_queue_depth , skdev -> queue_low_water_mark );
3565
3570
dev_dbg (& skdev -> pdev -> dev , " cycle=%d cycle_ix=%d\n" ,
3566
3571
skdev -> skcomp_cycle , skdev -> skcomp_ix );
0 commit comments