@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
64
64
struct kmem_cache * t10_alua_lba_map_mem_cache ;
65
65
66
66
static void transport_complete_task_attr (struct se_cmd * cmd );
67
+ static int translate_sense_reason (struct se_cmd * cmd , sense_reason_t reason );
67
68
static void transport_handle_queue_full (struct se_cmd * cmd ,
68
- struct se_device * dev );
69
+ struct se_device * dev , int err , bool write_pending );
69
70
static int transport_put_cmd (struct se_cmd * cmd );
70
71
static void target_complete_ok_work (struct work_struct * work );
71
72
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
804
805
805
806
if (cmd -> t_state == TRANSPORT_COMPLETE_QF_WP )
806
807
transport_write_pending_qf (cmd );
807
- else if (cmd -> t_state == TRANSPORT_COMPLETE_QF_OK )
808
+ else if (cmd -> t_state == TRANSPORT_COMPLETE_QF_OK ||
809
+ cmd -> t_state == TRANSPORT_COMPLETE_QF_ERR )
808
810
transport_complete_qf (cmd );
809
811
}
810
812
}
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1719
1721
}
1720
1722
trace_target_cmd_complete (cmd );
1721
1723
ret = cmd -> se_tfo -> queue_status (cmd );
1722
- if (ret == - EAGAIN || ret == - ENOMEM )
1724
+ if (ret )
1723
1725
goto queue_full ;
1724
1726
goto check_stop ;
1725
1727
default :
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730
1732
}
1731
1733
1732
1734
ret = transport_send_check_condition_and_sense (cmd , sense_reason , 0 );
1733
- if (ret == - EAGAIN || ret == - ENOMEM )
1735
+ if (ret )
1734
1736
goto queue_full ;
1735
1737
1736
1738
check_stop :
@@ -1739,8 +1741,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1739
1741
return ;
1740
1742
1741
1743
queue_full :
1742
- cmd -> t_state = TRANSPORT_COMPLETE_QF_OK ;
1743
- transport_handle_queue_full (cmd , cmd -> se_dev );
1744
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , false);
1744
1745
}
1745
1746
EXPORT_SYMBOL (transport_generic_request_failure );
1746
1747
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
1977
1978
int ret = 0 ;
1978
1979
1979
1980
transport_complete_task_attr (cmd );
1981
+ /*
1982
+ * If a fabric driver ->write_pending() or ->queue_data_in() callback
1983
+ * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
1984
+ * the same callbacks should not be retried. Return CHECK_CONDITION
1985
+ * if a scsi_status is not already set.
1986
+ *
1987
+ * If a fabric driver ->queue_status() has returned non zero, always
1988
+ * keep retrying no matter what..
1989
+ */
1990
+ if (cmd -> t_state == TRANSPORT_COMPLETE_QF_ERR ) {
1991
+ if (cmd -> scsi_status )
1992
+ goto queue_status ;
1980
1993
1981
- if (cmd -> se_cmd_flags & SCF_TRANSPORT_TASK_SENSE ) {
1982
- trace_target_cmd_complete (cmd );
1983
- ret = cmd -> se_tfo -> queue_status (cmd );
1984
- goto out ;
1994
+ cmd -> se_cmd_flags |= SCF_EMULATED_TASK_SENSE ;
1995
+ cmd -> scsi_status = SAM_STAT_CHECK_CONDITION ;
1996
+ cmd -> scsi_sense_length = TRANSPORT_SENSE_BUFFER ;
1997
+ translate_sense_reason (cmd , TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE );
1998
+ goto queue_status ;
1985
1999
}
1986
2000
2001
+ if (cmd -> se_cmd_flags & SCF_TRANSPORT_TASK_SENSE )
2002
+ goto queue_status ;
2003
+
1987
2004
switch (cmd -> data_direction ) {
1988
2005
case DMA_FROM_DEVICE :
1989
2006
if (cmd -> scsi_status )
@@ -2007,19 +2024,33 @@ static void transport_complete_qf(struct se_cmd *cmd)
2007
2024
break ;
2008
2025
}
2009
2026
2010
- out :
2011
2027
if (ret < 0 ) {
2012
- transport_handle_queue_full (cmd , cmd -> se_dev );
2028
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , false );
2013
2029
return ;
2014
2030
}
2015
2031
transport_lun_remove_cmd (cmd );
2016
2032
transport_cmd_check_stop_to_fabric (cmd );
2017
2033
}
2018
2034
2019
- static void transport_handle_queue_full (
2020
- struct se_cmd * cmd ,
2021
- struct se_device * dev )
2035
+ static void transport_handle_queue_full (struct se_cmd * cmd , struct se_device * dev ,
2036
+ int err , bool write_pending )
2022
2037
{
2038
+ /*
2039
+ * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2040
+ * ->queue_data_in() callbacks from new process context.
2041
+ *
2042
+ * Otherwise for other errors, transport_complete_qf() will send
2043
+ * CHECK_CONDITION via ->queue_status() instead of attempting to
2044
+ * retry associated fabric driver data-transfer callbacks.
2045
+ */
2046
+ if (err == - EAGAIN || err == - ENOMEM ) {
2047
+ cmd -> t_state = (write_pending ) ? TRANSPORT_COMPLETE_QF_WP :
2048
+ TRANSPORT_COMPLETE_QF_OK ;
2049
+ } else {
2050
+ pr_warn_ratelimited ("Got unknown fabric queue status: %d\n" , err );
2051
+ cmd -> t_state = TRANSPORT_COMPLETE_QF_ERR ;
2052
+ }
2053
+
2023
2054
spin_lock_irq (& dev -> qf_cmd_lock );
2024
2055
list_add_tail (& cmd -> se_qf_node , & cmd -> se_dev -> qf_cmd_list );
2025
2056
atomic_inc_mb (& dev -> dev_qf_count );
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
2083
2114
WARN_ON (!cmd -> scsi_status );
2084
2115
ret = transport_send_check_condition_and_sense (
2085
2116
cmd , 0 , 1 );
2086
- if (ret == - EAGAIN || ret == - ENOMEM )
2117
+ if (ret )
2087
2118
goto queue_full ;
2088
2119
2089
2120
transport_lun_remove_cmd (cmd );
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
2109
2140
} else if (rc ) {
2110
2141
ret = transport_send_check_condition_and_sense (cmd ,
2111
2142
rc , 0 );
2112
- if (ret == - EAGAIN || ret == - ENOMEM )
2143
+ if (ret )
2113
2144
goto queue_full ;
2114
2145
2115
2146
transport_lun_remove_cmd (cmd );
@@ -2134,7 +2165,7 @@ static void target_complete_ok_work(struct work_struct *work)
2134
2165
if (target_read_prot_action (cmd )) {
2135
2166
ret = transport_send_check_condition_and_sense (cmd ,
2136
2167
cmd -> pi_err , 0 );
2137
- if (ret == - EAGAIN || ret == - ENOMEM )
2168
+ if (ret )
2138
2169
goto queue_full ;
2139
2170
2140
2171
transport_lun_remove_cmd (cmd );
@@ -2144,7 +2175,7 @@ static void target_complete_ok_work(struct work_struct *work)
2144
2175
2145
2176
trace_target_cmd_complete (cmd );
2146
2177
ret = cmd -> se_tfo -> queue_data_in (cmd );
2147
- if (ret == - EAGAIN || ret == - ENOMEM )
2178
+ if (ret )
2148
2179
goto queue_full ;
2149
2180
break ;
2150
2181
case DMA_TO_DEVICE :
@@ -2157,7 +2188,7 @@ static void target_complete_ok_work(struct work_struct *work)
2157
2188
atomic_long_add (cmd -> data_length ,
2158
2189
& cmd -> se_lun -> lun_stats .tx_data_octets );
2159
2190
ret = cmd -> se_tfo -> queue_data_in (cmd );
2160
- if (ret == - EAGAIN || ret == - ENOMEM )
2191
+ if (ret )
2161
2192
goto queue_full ;
2162
2193
break ;
2163
2194
}
@@ -2166,7 +2197,7 @@ static void target_complete_ok_work(struct work_struct *work)
2166
2197
queue_status :
2167
2198
trace_target_cmd_complete (cmd );
2168
2199
ret = cmd -> se_tfo -> queue_status (cmd );
2169
- if (ret == - EAGAIN || ret == - ENOMEM )
2200
+ if (ret )
2170
2201
goto queue_full ;
2171
2202
break ;
2172
2203
default :
@@ -2180,8 +2211,8 @@ static void target_complete_ok_work(struct work_struct *work)
2180
2211
queue_full :
2181
2212
pr_debug ("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2182
2213
" data_direction: %d\n" , cmd , cmd -> data_direction );
2183
- cmd -> t_state = TRANSPORT_COMPLETE_QF_OK ;
2184
- transport_handle_queue_full (cmd , cmd -> se_dev );
2214
+
2215
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , false );
2185
2216
}
2186
2217
2187
2218
void target_free_sgl (struct scatterlist * sgl , int nents )
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2449
2480
spin_unlock_irqrestore (& cmd -> t_state_lock , flags );
2450
2481
2451
2482
ret = cmd -> se_tfo -> write_pending (cmd );
2452
- if (ret == - EAGAIN || ret == - ENOMEM )
2483
+ if (ret )
2453
2484
goto queue_full ;
2454
2485
2455
- /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2456
- WARN_ON (ret );
2457
-
2458
- return (!ret ) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE ;
2486
+ return 0 ;
2459
2487
2460
2488
queue_full :
2461
2489
pr_debug ("Handling write_pending QUEUE__FULL: se_cmd: %p\n" , cmd );
2462
- cmd -> t_state = TRANSPORT_COMPLETE_QF_WP ;
2463
- transport_handle_queue_full (cmd , cmd -> se_dev );
2490
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , true);
2464
2491
return 0 ;
2465
2492
}
2466
2493
EXPORT_SYMBOL (transport_generic_new_cmd );
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2470
2497
int ret ;
2471
2498
2472
2499
ret = cmd -> se_tfo -> write_pending (cmd );
2473
- if (ret == - EAGAIN || ret == - ENOMEM ) {
2500
+ if (ret ) {
2474
2501
pr_debug ("Handling write_pending QUEUE__FULL: se_cmd: %p\n" ,
2475
2502
cmd );
2476
- transport_handle_queue_full (cmd , cmd -> se_dev );
2503
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , true );
2477
2504
}
2478
2505
}
2479
2506
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3011
3038
__releases (& cmd - > t_state_lock )
3012
3039
__acquires (& cmd - > t_state_lock )
3013
3040
{
3041
+ int ret ;
3042
+
3014
3043
assert_spin_locked (& cmd -> t_state_lock );
3015
3044
WARN_ON_ONCE (!irqs_disabled ());
3016
3045
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3034
3063
trace_target_cmd_complete (cmd );
3035
3064
3036
3065
spin_unlock_irq (& cmd -> t_state_lock );
3037
- cmd -> se_tfo -> queue_status (cmd );
3066
+ ret = cmd -> se_tfo -> queue_status (cmd );
3067
+ if (ret )
3068
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , false);
3038
3069
spin_lock_irq (& cmd -> t_state_lock );
3039
3070
3040
3071
return 1 ;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
3055
3086
void transport_send_task_abort (struct se_cmd * cmd )
3056
3087
{
3057
3088
unsigned long flags ;
3089
+ int ret ;
3058
3090
3059
3091
spin_lock_irqsave (& cmd -> t_state_lock , flags );
3060
3092
if (cmd -> se_cmd_flags & (SCF_SENT_CHECK_CONDITION )) {
@@ -3090,7 +3122,9 @@ void transport_send_task_abort(struct se_cmd *cmd)
3090
3122
cmd -> t_task_cdb [0 ], cmd -> tag );
3091
3123
3092
3124
trace_target_cmd_complete (cmd );
3093
- cmd -> se_tfo -> queue_status (cmd );
3125
+ ret = cmd -> se_tfo -> queue_status (cmd );
3126
+ if (ret )
3127
+ transport_handle_queue_full (cmd , cmd -> se_dev , ret , false);
3094
3128
}
3095
3129
3096
3130
static void target_tmr_work (struct work_struct * work )
0 commit comments