@@ -1047,6 +1047,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1047
1047
* __spi_pump_messages - function which processes spi message queue
1048
1048
* @master: master to process queue for
1049
1049
* @in_kthread: true if we are in the context of the message pump thread
1050
+ * @bus_locked: true if the bus mutex is held when calling this function
1050
1051
*
1051
1052
* This function checks if there is any spi message in the queue that
1052
1053
* needs processing and if so call out to the driver to initialize hardware
@@ -1056,7 +1057,8 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1056
1057
* inside spi_sync(); the queue extraction handling at the top of the
1057
1058
* function should deal with this safely.
1058
1059
*/
1059
- static void __spi_pump_messages (struct spi_master * master , bool in_kthread )
1060
+ static void __spi_pump_messages (struct spi_master * master , bool in_kthread ,
1061
+ bool bus_locked )
1060
1062
{
1061
1063
unsigned long flags ;
1062
1064
bool was_busy = false;
@@ -1152,7 +1154,9 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1152
1154
}
1153
1155
}
1154
1156
1155
- mutex_lock (& master -> bus_lock_mutex );
1157
+ if (!bus_locked )
1158
+ mutex_lock (& master -> bus_lock_mutex );
1159
+
1156
1160
trace_spi_message_start (master -> cur_msg );
1157
1161
1158
1162
if (master -> prepare_message ) {
@@ -1162,8 +1166,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1162
1166
"failed to prepare message: %d\n" , ret );
1163
1167
master -> cur_msg -> status = ret ;
1164
1168
spi_finalize_current_message (master );
1165
- mutex_unlock (& master -> bus_lock_mutex );
1166
- return ;
1169
+ goto out ;
1167
1170
}
1168
1171
master -> cur_msg_prepared = true;
1169
1172
}
@@ -1172,21 +1175,23 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1172
1175
if (ret ) {
1173
1176
master -> cur_msg -> status = ret ;
1174
1177
spi_finalize_current_message (master );
1175
- mutex_unlock (& master -> bus_lock_mutex );
1176
- return ;
1178
+ goto out ;
1177
1179
}
1178
1180
1179
1181
ret = master -> transfer_one_message (master , master -> cur_msg );
1180
1182
if (ret ) {
1181
1183
dev_err (& master -> dev ,
1182
1184
"failed to transfer one message from queue\n" );
1183
- mutex_unlock (& master -> bus_lock_mutex );
1184
- return ;
1185
+ goto out ;
1185
1186
}
1186
- mutex_unlock (& master -> bus_lock_mutex );
1187
+
1188
+ out :
1189
+ if (!bus_locked )
1190
+ mutex_unlock (& master -> bus_lock_mutex );
1187
1191
1188
1192
/* Prod the scheduler in case transfer_one() was busy waiting */
1189
- cond_resched ();
1193
+ if (!ret )
1194
+ cond_resched ();
1190
1195
}
1191
1196
1192
1197
/**
@@ -1198,7 +1203,7 @@ static void spi_pump_messages(struct kthread_work *work)
1198
1203
struct spi_master * master =
1199
1204
container_of (work , struct spi_master , pump_messages );
1200
1205
1201
- __spi_pump_messages (master , true);
1206
+ __spi_pump_messages (master , true, false );
1202
1207
}
1203
1208
1204
1209
static int spi_init_queue (struct spi_master * master )
@@ -2479,7 +2484,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2479
2484
spi_sync_immediate );
2480
2485
SPI_STATISTICS_INCREMENT_FIELD (& spi -> statistics ,
2481
2486
spi_sync_immediate );
2482
- __spi_pump_messages (master , false);
2487
+ __spi_pump_messages (master , false, bus_locked );
2483
2488
}
2484
2489
2485
2490
wait_for_completion (& done );
0 commit comments