Skip to content

Commit fe99a4f

Browse files
Julia CartwrightKAGA-KOKO
authored andcommitted
kthread: Convert worker lock to raw spinlock
In order to enable the queuing of kthread work items from hardirq context even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a raw_spin_lock. This is only acceptable to do because the work performed under the lock is well-bounded and minimal. Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Reported-by: Tim Sander <tim@krieglstein.org> Signed-off-by: Julia Cartwright <julia@ni.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Guenter Roeck <linux@roeck-us.net> Link: https://lkml.kernel.org/r/20190212162554.19779-1-bigeasy@linutronix.de
1 parent c89d92e commit fe99a4f

File tree

2 files changed

+23
-23
lines changed

2 files changed

+23
-23
lines changed

include/linux/kthread.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ enum {
8585

8686
struct kthread_worker {
8787
unsigned int flags;
88-
spinlock_t lock;
88+
raw_spinlock_t lock;
8989
struct list_head work_list;
9090
struct list_head delayed_work_list;
9191
struct task_struct *task;
@@ -106,7 +106,7 @@ struct kthread_delayed_work {
106106
};
107107

108108
#define KTHREAD_WORKER_INIT(worker) { \
109-
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
109+
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
110110
.work_list = LIST_HEAD_INIT((worker).work_list), \
111111
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
112112
}

kernel/kthread.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
599599
struct lock_class_key *key)
600600
{
601601
memset(worker, 0, sizeof(struct kthread_worker));
602-
spin_lock_init(&worker->lock);
602+
raw_spin_lock_init(&worker->lock);
603603
lockdep_set_class_and_name(&worker->lock, key, name);
604604
INIT_LIST_HEAD(&worker->work_list);
605605
INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
641641

642642
if (kthread_should_stop()) {
643643
__set_current_state(TASK_RUNNING);
644-
spin_lock_irq(&worker->lock);
644+
raw_spin_lock_irq(&worker->lock);
645645
worker->task = NULL;
646-
spin_unlock_irq(&worker->lock);
646+
raw_spin_unlock_irq(&worker->lock);
647647
return 0;
648648
}
649649

650650
work = NULL;
651-
spin_lock_irq(&worker->lock);
651+
raw_spin_lock_irq(&worker->lock);
652652
if (!list_empty(&worker->work_list)) {
653653
work = list_first_entry(&worker->work_list,
654654
struct kthread_work, node);
655655
list_del_init(&work->node);
656656
}
657657
worker->current_work = work;
658-
spin_unlock_irq(&worker->lock);
658+
raw_spin_unlock_irq(&worker->lock);
659659

660660
if (work) {
661661
__set_current_state(TASK_RUNNING);
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
812812
bool ret = false;
813813
unsigned long flags;
814814

815-
spin_lock_irqsave(&worker->lock, flags);
815+
raw_spin_lock_irqsave(&worker->lock, flags);
816816
if (!queuing_blocked(worker, work)) {
817817
kthread_insert_work(worker, work, &worker->work_list);
818818
ret = true;
819819
}
820-
spin_unlock_irqrestore(&worker->lock, flags);
820+
raw_spin_unlock_irqrestore(&worker->lock, flags);
821821
return ret;
822822
}
823823
EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
843843
if (WARN_ON_ONCE(!worker))
844844
return;
845845

846-
spin_lock(&worker->lock);
846+
raw_spin_lock(&worker->lock);
847847
/* Work must not be used with >1 worker, see kthread_queue_work(). */
848848
WARN_ON_ONCE(work->worker != worker);
849849

@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
852852
list_del_init(&work->node);
853853
kthread_insert_work(worker, work, &worker->work_list);
854854

855-
spin_unlock(&worker->lock);
855+
raw_spin_unlock(&worker->lock);
856856
}
857857
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
858858

@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
908908
unsigned long flags;
909909
bool ret = false;
910910

911-
spin_lock_irqsave(&worker->lock, flags);
911+
raw_spin_lock_irqsave(&worker->lock, flags);
912912

913913
if (!queuing_blocked(worker, work)) {
914914
__kthread_queue_delayed_work(worker, dwork, delay);
915915
ret = true;
916916
}
917917

918-
spin_unlock_irqrestore(&worker->lock, flags);
918+
raw_spin_unlock_irqrestore(&worker->lock, flags);
919919
return ret;
920920
}
921921
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
951951
if (!worker)
952952
return;
953953

954-
spin_lock_irq(&worker->lock);
954+
raw_spin_lock_irq(&worker->lock);
955955
/* Work must not be used with >1 worker, see kthread_queue_work(). */
956956
WARN_ON_ONCE(work->worker != worker);
957957

@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
963963
else
964964
noop = true;
965965

966-
spin_unlock_irq(&worker->lock);
966+
raw_spin_unlock_irq(&worker->lock);
967967

968968
if (!noop)
969969
wait_for_completion(&fwork.done);
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
996996
* any queuing is blocked by setting the canceling counter.
997997
*/
998998
work->canceling++;
999-
spin_unlock_irqrestore(&worker->lock, *flags);
999+
raw_spin_unlock_irqrestore(&worker->lock, *flags);
10001000
del_timer_sync(&dwork->timer);
1001-
spin_lock_irqsave(&worker->lock, *flags);
1001+
raw_spin_lock_irqsave(&worker->lock, *flags);
10021002
work->canceling--;
10031003
}
10041004

@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
10451045
unsigned long flags;
10461046
int ret = false;
10471047

1048-
spin_lock_irqsave(&worker->lock, flags);
1048+
raw_spin_lock_irqsave(&worker->lock, flags);
10491049

10501050
/* Do not bother with canceling when never queued. */
10511051
if (!work->worker)
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
10621062
fast_queue:
10631063
__kthread_queue_delayed_work(worker, dwork, delay);
10641064
out:
1065-
spin_unlock_irqrestore(&worker->lock, flags);
1065+
raw_spin_unlock_irqrestore(&worker->lock, flags);
10661066
return ret;
10671067
}
10681068
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
10761076
if (!worker)
10771077
goto out;
10781078

1079-
spin_lock_irqsave(&worker->lock, flags);
1079+
raw_spin_lock_irqsave(&worker->lock, flags);
10801080
/* Work must not be used with >1 worker, see kthread_queue_work(). */
10811081
WARN_ON_ONCE(work->worker != worker);
10821082

@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
10901090
* In the meantime, block any queuing by setting the canceling counter.
10911091
*/
10921092
work->canceling++;
1093-
spin_unlock_irqrestore(&worker->lock, flags);
1093+
raw_spin_unlock_irqrestore(&worker->lock, flags);
10941094
kthread_flush_work(work);
1095-
spin_lock_irqsave(&worker->lock, flags);
1095+
raw_spin_lock_irqsave(&worker->lock, flags);
10961096
work->canceling--;
10971097

10981098
out_fast:
1099-
spin_unlock_irqrestore(&worker->lock, flags);
1099+
raw_spin_unlock_irqrestore(&worker->lock, flags);
11001100
out:
11011101
return ret;
11021102
}

0 commit comments

Comments
 (0)