@@ -2431,18 +2431,33 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2431
2431
}
2432
2432
}
2433
2433
2434
+ static bool
2435
+ __transport_wait_for_tasks (struct se_cmd * , bool , bool * , bool * ,
2436
+ unsigned long * flags );
2437
+
2438
+ static void target_wait_free_cmd (struct se_cmd * cmd , bool * aborted , bool * tas )
2439
+ {
2440
+ unsigned long flags ;
2441
+
2442
+ spin_lock_irqsave (& cmd -> t_state_lock , flags );
2443
+ __transport_wait_for_tasks (cmd , true, aborted , tas , & flags );
2444
+ spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2445
+ }
2446
+
2434
2447
int transport_generic_free_cmd (struct se_cmd * cmd , int wait_for_tasks )
2435
2448
{
2436
2449
int ret = 0 ;
2450
+ bool aborted = false, tas = false;
2437
2451
2438
2452
if (!(cmd -> se_cmd_flags & SCF_SE_LUN_CMD )) {
2439
2453
if (wait_for_tasks && (cmd -> se_cmd_flags & SCF_SCSI_TMR_CDB ))
2440
- transport_wait_for_tasks (cmd );
2454
+ target_wait_free_cmd (cmd , & aborted , & tas );
2441
2455
2442
- ret = transport_put_cmd (cmd );
2456
+ if (!aborted || tas )
2457
+ ret = transport_put_cmd (cmd );
2443
2458
} else {
2444
2459
if (wait_for_tasks )
2445
- transport_wait_for_tasks (cmd );
2460
+ target_wait_free_cmd (cmd , & aborted , & tas );
2446
2461
/*
2447
2462
* Handle WRITE failure case where transport_generic_new_cmd()
2448
2463
* has already added se_cmd to state_list, but fabric has
@@ -2454,7 +2469,20 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2454
2469
if (cmd -> se_lun )
2455
2470
transport_lun_remove_cmd (cmd );
2456
2471
2457
- ret = transport_put_cmd (cmd );
2472
+ if (!aborted || tas )
2473
+ ret = transport_put_cmd (cmd );
2474
+ }
2475
+ /*
2476
+ * If the task has been internally aborted due to TMR ABORT_TASK
2477
+ * or LUN_RESET, target_core_tmr.c is responsible for performing
2478
+ * the remaining calls to target_put_sess_cmd(), and not the
2479
+ * callers of this function.
2480
+ */
2481
+ if (aborted ) {
2482
+ pr_debug ("Detected CMD_T_ABORTED for ITT: %llu\n" , cmd -> tag );
2483
+ wait_for_completion (& cmd -> cmd_wait_comp );
2484
+ cmd -> se_tfo -> release_cmd (cmd );
2485
+ ret = 1 ;
2458
2486
}
2459
2487
return ret ;
2460
2488
}
@@ -2509,6 +2537,7 @@ static void target_release_cmd_kref(struct kref *kref)
2509
2537
struct se_cmd * se_cmd = container_of (kref , struct se_cmd , cmd_kref );
2510
2538
struct se_session * se_sess = se_cmd -> se_sess ;
2511
2539
unsigned long flags ;
2540
+ bool fabric_stop ;
2512
2541
2513
2542
spin_lock_irqsave (& se_sess -> sess_cmd_lock , flags );
2514
2543
if (list_empty (& se_cmd -> se_cmd_list )) {
@@ -2517,13 +2546,19 @@ static void target_release_cmd_kref(struct kref *kref)
2517
2546
se_cmd -> se_tfo -> release_cmd (se_cmd );
2518
2547
return ;
2519
2548
}
2520
- if (se_sess -> sess_tearing_down && se_cmd -> cmd_wait_set ) {
2549
+
2550
+ spin_lock (& se_cmd -> t_state_lock );
2551
+ fabric_stop = (se_cmd -> transport_state & CMD_T_FABRIC_STOP );
2552
+ spin_unlock (& se_cmd -> t_state_lock );
2553
+
2554
+ if (se_cmd -> cmd_wait_set || fabric_stop ) {
2555
+ list_del_init (& se_cmd -> se_cmd_list );
2521
2556
spin_unlock_irqrestore (& se_sess -> sess_cmd_lock , flags );
2522
2557
target_free_cmd_mem (se_cmd );
2523
2558
complete (& se_cmd -> cmd_wait_comp );
2524
2559
return ;
2525
2560
}
2526
- list_del (& se_cmd -> se_cmd_list );
2561
+ list_del_init (& se_cmd -> se_cmd_list );
2527
2562
spin_unlock_irqrestore (& se_sess -> sess_cmd_lock , flags );
2528
2563
2529
2564
target_free_cmd_mem (se_cmd );
@@ -2555,6 +2590,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2555
2590
{
2556
2591
struct se_cmd * se_cmd ;
2557
2592
unsigned long flags ;
2593
+ int rc ;
2558
2594
2559
2595
spin_lock_irqsave (& se_sess -> sess_cmd_lock , flags );
2560
2596
if (se_sess -> sess_tearing_down ) {
@@ -2564,8 +2600,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2564
2600
se_sess -> sess_tearing_down = 1 ;
2565
2601
list_splice_init (& se_sess -> sess_cmd_list , & se_sess -> sess_wait_list );
2566
2602
2567
- list_for_each_entry (se_cmd , & se_sess -> sess_wait_list , se_cmd_list )
2568
- se_cmd -> cmd_wait_set = 1 ;
2603
+ list_for_each_entry (se_cmd , & se_sess -> sess_wait_list , se_cmd_list ) {
2604
+ rc = kref_get_unless_zero (& se_cmd -> cmd_kref );
2605
+ if (rc ) {
2606
+ se_cmd -> cmd_wait_set = 1 ;
2607
+ spin_lock (& se_cmd -> t_state_lock );
2608
+ se_cmd -> transport_state |= CMD_T_FABRIC_STOP ;
2609
+ spin_unlock (& se_cmd -> t_state_lock );
2610
+ }
2611
+ }
2569
2612
2570
2613
spin_unlock_irqrestore (& se_sess -> sess_cmd_lock , flags );
2571
2614
}
@@ -2578,15 +2621,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2578
2621
{
2579
2622
struct se_cmd * se_cmd , * tmp_cmd ;
2580
2623
unsigned long flags ;
2624
+ bool tas ;
2581
2625
2582
2626
list_for_each_entry_safe (se_cmd , tmp_cmd ,
2583
2627
& se_sess -> sess_wait_list , se_cmd_list ) {
2584
- list_del (& se_cmd -> se_cmd_list );
2628
+ list_del_init (& se_cmd -> se_cmd_list );
2585
2629
2586
2630
pr_debug ("Waiting for se_cmd: %p t_state: %d, fabric state:"
2587
2631
" %d\n" , se_cmd , se_cmd -> t_state ,
2588
2632
se_cmd -> se_tfo -> get_cmd_state (se_cmd ));
2589
2633
2634
+ spin_lock_irqsave (& se_cmd -> t_state_lock , flags );
2635
+ tas = (se_cmd -> transport_state & CMD_T_TAS );
2636
+ spin_unlock_irqrestore (& se_cmd -> t_state_lock , flags );
2637
+
2638
+ if (!target_put_sess_cmd (se_cmd )) {
2639
+ if (tas )
2640
+ target_put_sess_cmd (se_cmd );
2641
+ }
2642
+
2590
2643
wait_for_completion (& se_cmd -> cmd_wait_comp );
2591
2644
pr_debug ("After cmd_wait_comp: se_cmd: %p t_state: %d"
2592
2645
" fabric state: %d\n" , se_cmd , se_cmd -> t_state ,
@@ -2608,53 +2661,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
2608
2661
wait_for_completion (& lun -> lun_ref_comp );
2609
2662
}
2610
2663
2611
- /**
2612
- * transport_wait_for_tasks - wait for completion to occur
2613
- * @cmd: command to wait
2614
- *
2615
- * Called from frontend fabric context to wait for storage engine
2616
- * to pause and/or release frontend generated struct se_cmd.
2617
- */
2618
- bool transport_wait_for_tasks (struct se_cmd * cmd )
2664
+ static bool
2665
+ __transport_wait_for_tasks (struct se_cmd * cmd , bool fabric_stop ,
2666
+ bool * aborted , bool * tas , unsigned long * flags )
2667
+ __releases (& cmd - > t_state_lock )
2668
+ __acquires (& cmd - > t_state_lock )
2619
2669
{
2620
- unsigned long flags ;
2621
2670
2622
- spin_lock_irqsave (& cmd -> t_state_lock , flags );
2671
+ assert_spin_locked (& cmd -> t_state_lock );
2672
+ WARN_ON_ONCE (!irqs_disabled ());
2673
+
2674
+ if (fabric_stop )
2675
+ cmd -> transport_state |= CMD_T_FABRIC_STOP ;
2676
+
2677
+ if (cmd -> transport_state & CMD_T_ABORTED )
2678
+ * aborted = true;
2679
+
2680
+ if (cmd -> transport_state & CMD_T_TAS )
2681
+ * tas = true;
2682
+
2623
2683
if (!(cmd -> se_cmd_flags & SCF_SE_LUN_CMD ) &&
2624
- !(cmd -> se_cmd_flags & SCF_SCSI_TMR_CDB )) {
2625
- spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2684
+ !(cmd -> se_cmd_flags & SCF_SCSI_TMR_CDB ))
2626
2685
return false;
2627
- }
2628
2686
2629
2687
if (!(cmd -> se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE ) &&
2630
- !(cmd -> se_cmd_flags & SCF_SCSI_TMR_CDB )) {
2631
- spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2688
+ !(cmd -> se_cmd_flags & SCF_SCSI_TMR_CDB ))
2632
2689
return false;
2633
- }
2634
2690
2635
- if (!(cmd -> transport_state & CMD_T_ACTIVE )) {
2636
- spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2691
+ if (!(cmd -> transport_state & CMD_T_ACTIVE ))
2692
+ return false;
2693
+
2694
+ if (fabric_stop && * aborted )
2637
2695
return false;
2638
- }
2639
2696
2640
2697
cmd -> transport_state |= CMD_T_STOP ;
2641
2698
2642
- pr_debug ("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n" ,
2643
- cmd , cmd -> tag , cmd -> se_tfo -> get_cmd_state (cmd ), cmd -> t_state );
2699
+ pr_debug ("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2700
+ " t_state: %d, CMD_T_STOP\n" , cmd , cmd -> tag ,
2701
+ cmd -> se_tfo -> get_cmd_state (cmd ), cmd -> t_state );
2644
2702
2645
- spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2703
+ spin_unlock_irqrestore (& cmd -> t_state_lock , * flags );
2646
2704
2647
2705
wait_for_completion (& cmd -> t_transport_stop_comp );
2648
2706
2649
- spin_lock_irqsave (& cmd -> t_state_lock , flags );
2707
+ spin_lock_irqsave (& cmd -> t_state_lock , * flags );
2650
2708
cmd -> transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP );
2651
2709
2652
- pr_debug ("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n" ,
2653
- cmd -> tag );
2710
+ pr_debug ("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2711
+ "t_transport_stop_comp) for ITT: 0x%08llx\n" , cmd -> tag );
2654
2712
2713
+ return true;
2714
+ }
2715
+
2716
+ /**
2717
+ * transport_wait_for_tasks - wait for completion to occur
2718
+ * @cmd: command to wait
2719
+ *
2720
+ * Called from frontend fabric context to wait for storage engine
2721
+ * to pause and/or release frontend generated struct se_cmd.
2722
+ */
2723
+ bool transport_wait_for_tasks (struct se_cmd * cmd )
2724
+ {
2725
+ unsigned long flags ;
2726
+ bool ret , aborted = false, tas = false;
2727
+
2728
+ spin_lock_irqsave (& cmd -> t_state_lock , flags );
2729
+ ret = __transport_wait_for_tasks (cmd , false, & aborted , & tas , & flags );
2655
2730
spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2656
2731
2657
- return true ;
2732
+ return ret ;
2658
2733
}
2659
2734
EXPORT_SYMBOL (transport_wait_for_tasks );
2660
2735
0 commit comments