Skip to content

Commit e2c631b

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
clocksource: Revert "Remove kthread"
I turns out that the silly spawn kthread from worker was actually needed. clocksource_watchdog_kthread() cannot be called directly from clocksource_watchdog_work(), because clocksource_select() calls timekeeping_notify() which uses stop_machine(). One cannot use stop_machine() from a workqueue() due lock inversions wrt CPU hotplug. Revert the patch but add a comment that explain why we jump through such apparently silly hoops. Fixes: 7197e77 ("clocksource: Remove kthread") Reported-by: Siegfried Metz <frame@mailbox.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Niklas Cassel <niklas.cassel@linaro.org> Tested-by: Kevin Shanahan <kevin@shanahan.id.au> Tested-by: viktor_jaegerskuepper@freenet.de Tested-by: Siegfried Metz <frame@mailbox.org> Cc: rafael.j.wysocki@intel.com Cc: len.brown@intel.com Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: bjorn.andersson@linaro.org Link: https://lkml.kernel.org/r/20180905084158.GR24124@hirez.programming.kicks-ass.net
1 parent c43c5e9 commit e2c631b

File tree

1 file changed

+30
-10
lines changed

1 file changed

+30
-10
lines changed

kernel/time/clocksource.c

Lines changed: 30 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
133133
spin_unlock_irqrestore(&watchdog_lock, *flags);
134134
}
135135

136+
static int clocksource_watchdog_kthread(void *data);
137+
static void __clocksource_change_rating(struct clocksource *cs, int rating);
138+
136139
/*
137140
* Interval: 0.5sec Threshold: 0.0625s
138141
*/
139142
#define WATCHDOG_INTERVAL (HZ >> 1)
140143
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
141144

145+
static void clocksource_watchdog_work(struct work_struct *work)
146+
{
147+
/*
148+
* We cannot directly run clocksource_watchdog_kthread() here, because
149+
* clocksource_select() calls timekeeping_notify() which uses
150+
* stop_machine(). One cannot use stop_machine() from a workqueue() due
151+
* lock inversions wrt CPU hotplug.
152+
*
153+
* Also, we only ever run this work once or twice during the lifetime
154+
* of the kernel, so there is no point in creating a more permanent
155+
* kthread for this.
156+
*
157+
* If kthread_run fails the next watchdog scan over the
158+
* watchdog_list will find the unstable clock again.
159+
*/
160+
kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
161+
}
162+
142163
static void __clocksource_unstable(struct clocksource *cs)
143164
{
144165
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
145166
cs->flags |= CLOCK_SOURCE_UNSTABLE;
146167

147168
/*
148-
* If the clocksource is registered clocksource_watchdog_work() will
169+
* If the clocksource is registered clocksource_watchdog_kthread() will
149170
* re-rate and re-select.
150171
*/
151172
if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
156177
if (cs->mark_unstable)
157178
cs->mark_unstable(cs);
158179

159-
/* kick clocksource_watchdog_work() */
180+
/* kick clocksource_watchdog_kthread() */
160181
if (finished_booting)
161182
schedule_work(&watchdog_work);
162183
}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
166187
* @cs: clocksource to be marked unstable
167188
*
168189
* This function is called by the x86 TSC code to mark clocksources as unstable;
169-
* it defers demotion and re-selection to a work.
190+
* it defers demotion and re-selection to a kthread.
170191
*/
171192
void clocksource_mark_unstable(struct clocksource *cs)
172193
{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
391412
}
392413
}
393414

394-
static void __clocksource_change_rating(struct clocksource *cs, int rating);
395-
396-
static int __clocksource_watchdog_work(void)
415+
static int __clocksource_watchdog_kthread(void)
397416
{
398417
struct clocksource *cs, *tmp;
399418
unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
418437
return select;
419438
}
420439

421-
static void clocksource_watchdog_work(struct work_struct *work)
440+
static int clocksource_watchdog_kthread(void *data)
422441
{
423442
mutex_lock(&clocksource_mutex);
424-
if (__clocksource_watchdog_work())
443+
if (__clocksource_watchdog_kthread())
425444
clocksource_select();
426445
mutex_unlock(&clocksource_mutex);
446+
return 0;
427447
}
428448

429449
static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
442462
static void clocksource_select_watchdog(bool fallback) { }
443463
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
444464
static inline void clocksource_resume_watchdog(void) { }
445-
static inline int __clocksource_watchdog_work(void) { return 0; }
465+
static inline int __clocksource_watchdog_kthread(void) { return 0; }
446466
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
447467
void clocksource_mark_unstable(struct clocksource *cs) { }
448468

@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
810830
/*
811831
* Run the watchdog first to eliminate unstable clock sources
812832
*/
813-
__clocksource_watchdog_work();
833+
__clocksource_watchdog_kthread();
814834
clocksource_select();
815835
mutex_unlock(&clocksource_mutex);
816836
return 0;

0 commit comments

Comments
 (0)