@@ -129,31 +129,19 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
129
129
spin_unlock_irqrestore (& watchdog_lock , * flags );
130
130
}
131
131
132
- static int clocksource_watchdog_kthread (void * data );
133
- static void __clocksource_change_rating (struct clocksource * cs , int rating );
134
-
135
132
/*
136
133
* Interval: 0.5sec Threshold: 0.0625s
137
134
*/
138
135
#define WATCHDOG_INTERVAL (HZ >> 1)
139
136
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
140
137
141
- static void clocksource_watchdog_work (struct work_struct * work )
142
- {
143
- /*
144
- * If kthread_run fails the next watchdog scan over the
145
- * watchdog_list will find the unstable clock again.
146
- */
147
- kthread_run (clocksource_watchdog_kthread , NULL , "kwatchdog" );
148
- }
149
-
150
138
static void __clocksource_unstable (struct clocksource * cs )
151
139
{
152
140
cs -> flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG );
153
141
cs -> flags |= CLOCK_SOURCE_UNSTABLE ;
154
142
155
143
/*
156
- * If the clocksource is registered clocksource_watchdog_kthread () will
144
+ * If the clocksource is registered clocksource_watchdog_work () will
157
145
* re-rate and re-select.
158
146
*/
159
147
if (list_empty (& cs -> list )) {
@@ -164,7 +152,7 @@ static void __clocksource_unstable(struct clocksource *cs)
164
152
if (cs -> mark_unstable )
165
153
cs -> mark_unstable (cs );
166
154
167
- /* kick clocksource_watchdog_kthread () */
155
+ /* kick clocksource_watchdog_work () */
168
156
if (finished_booting )
169
157
schedule_work (& watchdog_work );
170
158
}
@@ -174,7 +162,7 @@ static void __clocksource_unstable(struct clocksource *cs)
174
162
* @cs: clocksource to be marked unstable
175
163
*
176
164
* This function is called by the x86 TSC code to mark clocksources as unstable;
177
- * it defers demotion and re-selection to a kthread .
165
+ * it defers demotion and re-selection to a work .
178
166
*/
179
167
void clocksource_mark_unstable (struct clocksource * cs )
180
168
{
@@ -399,7 +387,9 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
399
387
}
400
388
}
401
389
402
- static int __clocksource_watchdog_kthread (void )
390
+ static void __clocksource_change_rating (struct clocksource * cs , int rating );
391
+
392
+ static int __clocksource_watchdog_work (void )
403
393
{
404
394
struct clocksource * cs , * tmp ;
405
395
unsigned long flags ;
@@ -424,13 +414,12 @@ static int __clocksource_watchdog_kthread(void)
424
414
return select ;
425
415
}
426
416
427
- static int clocksource_watchdog_kthread ( void * data )
417
+ static void clocksource_watchdog_work ( struct work_struct * work )
428
418
{
429
419
mutex_lock (& clocksource_mutex );
430
- if (__clocksource_watchdog_kthread ())
420
+ if (__clocksource_watchdog_work ())
431
421
clocksource_select ();
432
422
mutex_unlock (& clocksource_mutex );
433
- return 0 ;
434
423
}
435
424
436
425
static bool clocksource_is_watchdog (struct clocksource * cs )
@@ -449,7 +438,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
449
438
static void clocksource_select_watchdog (bool fallback ) { }
450
439
static inline void clocksource_dequeue_watchdog (struct clocksource * cs ) { }
451
440
static inline void clocksource_resume_watchdog (void ) { }
452
- static inline int __clocksource_watchdog_kthread (void ) { return 0 ; }
441
+ static inline int __clocksource_watchdog_work (void ) { return 0 ; }
453
442
static bool clocksource_is_watchdog (struct clocksource * cs ) { return false; }
454
443
void clocksource_mark_unstable (struct clocksource * cs ) { }
455
444
@@ -683,7 +672,7 @@ static int __init clocksource_done_booting(void)
683
672
/*
684
673
* Run the watchdog first to eliminate unstable clock sources
685
674
*/
686
- __clocksource_watchdog_kthread ();
675
+ __clocksource_watchdog_work ();
687
676
clocksource_select ();
688
677
mutex_unlock (& clocksource_mutex );
689
678
return 0 ;
0 commit comments