@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
599
599
struct lock_class_key * key )
600
600
{
601
601
memset (worker , 0 , sizeof (struct kthread_worker ));
602
- spin_lock_init (& worker -> lock );
602
+ raw_spin_lock_init (& worker -> lock );
603
603
lockdep_set_class_and_name (& worker -> lock , key , name );
604
604
INIT_LIST_HEAD (& worker -> work_list );
605
605
INIT_LIST_HEAD (& worker -> delayed_work_list );
@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
641
641
642
642
if (kthread_should_stop ()) {
643
643
__set_current_state (TASK_RUNNING );
644
- spin_lock_irq (& worker -> lock );
644
+ raw_spin_lock_irq (& worker -> lock );
645
645
worker -> task = NULL ;
646
- spin_unlock_irq (& worker -> lock );
646
+ raw_spin_unlock_irq (& worker -> lock );
647
647
return 0 ;
648
648
}
649
649
650
650
work = NULL ;
651
- spin_lock_irq (& worker -> lock );
651
+ raw_spin_lock_irq (& worker -> lock );
652
652
if (!list_empty (& worker -> work_list )) {
653
653
work = list_first_entry (& worker -> work_list ,
654
654
struct kthread_work , node );
655
655
list_del_init (& work -> node );
656
656
}
657
657
worker -> current_work = work ;
658
- spin_unlock_irq (& worker -> lock );
658
+ raw_spin_unlock_irq (& worker -> lock );
659
659
660
660
if (work ) {
661
661
__set_current_state (TASK_RUNNING );
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
812
812
bool ret = false;
813
813
unsigned long flags ;
814
814
815
- spin_lock_irqsave (& worker -> lock , flags );
815
+ raw_spin_lock_irqsave (& worker -> lock , flags );
816
816
if (!queuing_blocked (worker , work )) {
817
817
kthread_insert_work (worker , work , & worker -> work_list );
818
818
ret = true;
819
819
}
820
- spin_unlock_irqrestore (& worker -> lock , flags );
820
+ raw_spin_unlock_irqrestore (& worker -> lock , flags );
821
821
return ret ;
822
822
}
823
823
EXPORT_SYMBOL_GPL (kthread_queue_work );
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
843
843
if (WARN_ON_ONCE (!worker ))
844
844
return ;
845
845
846
- spin_lock (& worker -> lock );
846
+ raw_spin_lock (& worker -> lock );
847
847
/* Work must not be used with >1 worker, see kthread_queue_work(). */
848
848
WARN_ON_ONCE (work -> worker != worker );
849
849
@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
852
852
list_del_init (& work -> node );
853
853
kthread_insert_work (worker , work , & worker -> work_list );
854
854
855
- spin_unlock (& worker -> lock );
855
+ raw_spin_unlock (& worker -> lock );
856
856
}
857
857
EXPORT_SYMBOL (kthread_delayed_work_timer_fn );
858
858
@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
908
908
unsigned long flags ;
909
909
bool ret = false;
910
910
911
- spin_lock_irqsave (& worker -> lock , flags );
911
+ raw_spin_lock_irqsave (& worker -> lock , flags );
912
912
913
913
if (!queuing_blocked (worker , work )) {
914
914
__kthread_queue_delayed_work (worker , dwork , delay );
915
915
ret = true;
916
916
}
917
917
918
- spin_unlock_irqrestore (& worker -> lock , flags );
918
+ raw_spin_unlock_irqrestore (& worker -> lock , flags );
919
919
return ret ;
920
920
}
921
921
EXPORT_SYMBOL_GPL (kthread_queue_delayed_work );
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
951
951
if (!worker )
952
952
return ;
953
953
954
- spin_lock_irq (& worker -> lock );
954
+ raw_spin_lock_irq (& worker -> lock );
955
955
/* Work must not be used with >1 worker, see kthread_queue_work(). */
956
956
WARN_ON_ONCE (work -> worker != worker );
957
957
@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
963
963
else
964
964
noop = true;
965
965
966
- spin_unlock_irq (& worker -> lock );
966
+ raw_spin_unlock_irq (& worker -> lock );
967
967
968
968
if (!noop )
969
969
wait_for_completion (& fwork .done );
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
996
996
* any queuing is blocked by setting the canceling counter.
997
997
*/
998
998
work -> canceling ++ ;
999
- spin_unlock_irqrestore (& worker -> lock , * flags );
999
+ raw_spin_unlock_irqrestore (& worker -> lock , * flags );
1000
1000
del_timer_sync (& dwork -> timer );
1001
- spin_lock_irqsave (& worker -> lock , * flags );
1001
+ raw_spin_lock_irqsave (& worker -> lock , * flags );
1002
1002
work -> canceling -- ;
1003
1003
}
1004
1004
@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
1045
1045
unsigned long flags ;
1046
1046
int ret = false;
1047
1047
1048
- spin_lock_irqsave (& worker -> lock , flags );
1048
+ raw_spin_lock_irqsave (& worker -> lock , flags );
1049
1049
1050
1050
/* Do not bother with canceling when never queued. */
1051
1051
if (!work -> worker )
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
1062
1062
fast_queue :
1063
1063
__kthread_queue_delayed_work (worker , dwork , delay );
1064
1064
out :
1065
- spin_unlock_irqrestore (& worker -> lock , flags );
1065
+ raw_spin_unlock_irqrestore (& worker -> lock , flags );
1066
1066
return ret ;
1067
1067
}
1068
1068
EXPORT_SYMBOL_GPL (kthread_mod_delayed_work );
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1076
1076
if (!worker )
1077
1077
goto out ;
1078
1078
1079
- spin_lock_irqsave (& worker -> lock , flags );
1079
+ raw_spin_lock_irqsave (& worker -> lock , flags );
1080
1080
/* Work must not be used with >1 worker, see kthread_queue_work(). */
1081
1081
WARN_ON_ONCE (work -> worker != worker );
1082
1082
@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1090
1090
* In the meantime, block any queuing by setting the canceling counter.
1091
1091
*/
1092
1092
work -> canceling ++ ;
1093
- spin_unlock_irqrestore (& worker -> lock , flags );
1093
+ raw_spin_unlock_irqrestore (& worker -> lock , flags );
1094
1094
kthread_flush_work (work );
1095
- spin_lock_irqsave (& worker -> lock , flags );
1095
+ raw_spin_lock_irqsave (& worker -> lock , flags );
1096
1096
work -> canceling -- ;
1097
1097
1098
1098
out_fast :
1099
- spin_unlock_irqrestore (& worker -> lock , flags );
1099
+ raw_spin_unlock_irqrestore (& worker -> lock , flags );
1100
1100
out :
1101
1101
return ret ;
1102
1102
}
0 commit comments