24
24
25
25
#include <asm/irq_regs.h>
26
26
#include <linux/kvm_para.h>
27
- #include <linux/perf_event.h>
28
27
#include <linux/kthread.h>
29
28
30
29
static DEFINE_MUTEX (watchdog_proc_mutex );
@@ -80,50 +79,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn);
80
79
static DEFINE_PER_CPU (unsigned long, hrtimer_interrupts ) ;
81
80
static DEFINE_PER_CPU (unsigned long, soft_lockup_hrtimer_cnt ) ;
82
81
static DEFINE_PER_CPU (struct task_struct * , softlockup_task_ptr_saved ) ;
83
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
84
- static DEFINE_PER_CPU (bool , hard_watchdog_warn ) ;
85
- static DEFINE_PER_CPU (bool , watchdog_nmi_touch ) ;
86
82
static DEFINE_PER_CPU (unsigned long, hrtimer_interrupts_saved ) ;
87
- static DEFINE_PER_CPU (struct perf_event * , watchdog_ev ) ;
88
- #endif
89
83
static unsigned long soft_lockup_nmi_warn ;
90
84
91
- /* boot commands */
92
- /*
93
- * Should we panic when a soft-lockup or hard-lockup occurs:
94
- */
95
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
96
- unsigned int __read_mostly hardlockup_panic =
97
- CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE ;
98
- static unsigned long hardlockup_allcpu_dumped ;
99
- /*
100
- * We may not want to enable hard lockup detection by default in all cases,
101
- * for example when running the kernel as a guest on a hypervisor. In these
102
- * cases this function can be called to disable hard lockup detection. This
103
- * function should only be executed once by the boot processor before the
104
- * kernel command line parameters are parsed, because otherwise it is not
105
- * possible to override this in hardlockup_panic_setup().
106
- */
107
- void hardlockup_detector_disable (void )
108
- {
109
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED ;
110
- }
111
-
112
- static int __init hardlockup_panic_setup (char * str )
113
- {
114
- if (!strncmp (str , "panic" , 5 ))
115
- hardlockup_panic = 1 ;
116
- else if (!strncmp (str , "nopanic" , 7 ))
117
- hardlockup_panic = 0 ;
118
- else if (!strncmp (str , "0" , 1 ))
119
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED ;
120
- else if (!strncmp (str , "1" , 1 ))
121
- watchdog_enabled |= NMI_WATCHDOG_ENABLED ;
122
- return 1 ;
123
- }
124
- __setup ("nmi_watchdog=" , hardlockup_panic_setup );
125
- #endif
126
-
127
85
unsigned int __read_mostly softlockup_panic =
128
86
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE ;
129
87
@@ -244,30 +202,12 @@ void touch_all_softlockup_watchdogs(void)
244
202
wq_watchdog_touch (-1 );
245
203
}
246
204
247
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
248
- void touch_nmi_watchdog (void )
249
- {
250
- /*
251
- * Using __raw here because some code paths have
252
- * preemption enabled. If preemption is enabled
253
- * then interrupts should be enabled too, in which
254
- * case we shouldn't have to worry about the watchdog
255
- * going off.
256
- */
257
- raw_cpu_write (watchdog_nmi_touch , true);
258
- touch_softlockup_watchdog ();
259
- }
260
- EXPORT_SYMBOL (touch_nmi_watchdog );
261
-
262
- #endif
263
-
264
205
void touch_softlockup_watchdog_sync (void )
265
206
{
266
207
__this_cpu_write (softlockup_touch_sync , true);
267
208
__this_cpu_write (watchdog_touch_ts , 0 );
268
209
}
269
210
270
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
271
211
/* watchdog detector functions */
272
212
bool is_hardlockup (void )
273
213
{
@@ -279,7 +219,6 @@ bool is_hardlockup(void)
279
219
__this_cpu_write (hrtimer_interrupts_saved , hrint );
280
220
return false;
281
221
}
282
- #endif
283
222
284
223
static int is_softlockup (unsigned long touch_ts )
285
224
{
@@ -293,77 +232,22 @@ static int is_softlockup(unsigned long touch_ts)
293
232
return 0 ;
294
233
}
295
234
296
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
297
-
298
- static struct perf_event_attr wd_hw_attr = {
299
- .type = PERF_TYPE_HARDWARE ,
300
- .config = PERF_COUNT_HW_CPU_CYCLES ,
301
- .size = sizeof (struct perf_event_attr ),
302
- .pinned = 1 ,
303
- .disabled = 1 ,
304
- };
305
-
306
- /* Callback function for perf event subsystem */
307
- static void watchdog_overflow_callback (struct perf_event * event ,
308
- struct perf_sample_data * data ,
309
- struct pt_regs * regs )
310
- {
311
- /* Ensure the watchdog never gets throttled */
312
- event -> hw .interrupts = 0 ;
313
-
314
- if (__this_cpu_read (watchdog_nmi_touch ) == true) {
315
- __this_cpu_write (watchdog_nmi_touch , false);
316
- return ;
317
- }
318
-
319
- /* check for a hardlockup
320
- * This is done by making sure our timer interrupt
321
- * is incrementing. The timer interrupt should have
322
- * fired multiple times before we overflow'd. If it hasn't
323
- * then this is a good indication the cpu is stuck
324
- */
325
- if (is_hardlockup ()) {
326
- int this_cpu = smp_processor_id ();
327
-
328
- /* only print hardlockups once */
329
- if (__this_cpu_read (hard_watchdog_warn ) == true)
330
- return ;
331
-
332
- pr_emerg ("Watchdog detected hard LOCKUP on cpu %d" , this_cpu );
333
- print_modules ();
334
- print_irqtrace_events (current );
335
- if (regs )
336
- show_regs (regs );
337
- else
338
- dump_stack ();
339
-
340
- /*
341
- * Perform all-CPU dump only once to avoid multiple hardlockups
342
- * generating interleaving traces
343
- */
344
- if (sysctl_hardlockup_all_cpu_backtrace &&
345
- !test_and_set_bit (0 , & hardlockup_allcpu_dumped ))
346
- trigger_allbutself_cpu_backtrace ();
347
-
348
- if (hardlockup_panic )
349
- nmi_panic (regs , "Hard LOCKUP" );
350
-
351
- __this_cpu_write (hard_watchdog_warn , true);
352
- return ;
353
- }
354
-
355
- __this_cpu_write (hard_watchdog_warn , false);
356
- return ;
357
- }
358
- #endif /* CONFIG_HARDLOCKUP_DETECTOR */
359
-
360
235
static void watchdog_interrupt_count (void )
361
236
{
362
237
__this_cpu_inc (hrtimer_interrupts );
363
238
}
364
239
365
- static int watchdog_nmi_enable (unsigned int cpu );
366
- static void watchdog_nmi_disable (unsigned int cpu );
240
+ /*
241
+ * These two functions are mostly architecture specific
242
+ * defining them as weak here.
243
+ */
244
+ int __weak watchdog_nmi_enable (unsigned int cpu )
245
+ {
246
+ return 0 ;
247
+ }
248
+ void __weak watchdog_nmi_disable (unsigned int cpu )
249
+ {
250
+ }
367
251
368
252
static int watchdog_enable_all_cpus (void );
369
253
static void watchdog_disable_all_cpus (void );
@@ -556,109 +440,6 @@ static void watchdog(unsigned int cpu)
556
440
watchdog_nmi_disable (cpu );
557
441
}
558
442
559
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
560
- /*
561
- * People like the simple clean cpu node info on boot.
562
- * Reduce the watchdog noise by only printing messages
563
- * that are different from what cpu0 displayed.
564
- */
565
- static unsigned long cpu0_err ;
566
-
567
- static int watchdog_nmi_enable (unsigned int cpu )
568
- {
569
- struct perf_event_attr * wd_attr ;
570
- struct perf_event * event = per_cpu (watchdog_ev , cpu );
571
-
572
- /* nothing to do if the hard lockup detector is disabled */
573
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED ))
574
- goto out ;
575
-
576
- /* is it already setup and enabled? */
577
- if (event && event -> state > PERF_EVENT_STATE_OFF )
578
- goto out ;
579
-
580
- /* it is setup but not enabled */
581
- if (event != NULL )
582
- goto out_enable ;
583
-
584
- wd_attr = & wd_hw_attr ;
585
- wd_attr -> sample_period = hw_nmi_get_sample_period (watchdog_thresh );
586
-
587
- /* Try to register using hardware perf events */
588
- event = perf_event_create_kernel_counter (wd_attr , cpu , NULL , watchdog_overflow_callback , NULL );
589
-
590
- /* save cpu0 error for future comparision */
591
- if (cpu == 0 && IS_ERR (event ))
592
- cpu0_err = PTR_ERR (event );
593
-
594
- if (!IS_ERR (event )) {
595
- /* only print for cpu0 or different than cpu0 */
596
- if (cpu == 0 || cpu0_err )
597
- pr_info ("enabled on all CPUs, permanently consumes one hw-PMU counter.\n" );
598
- goto out_save ;
599
- }
600
-
601
- /*
602
- * Disable the hard lockup detector if _any_ CPU fails to set up
603
- * set up the hardware perf event. The watchdog() function checks
604
- * the NMI_WATCHDOG_ENABLED bit periodically.
605
- *
606
- * The barriers are for syncing up watchdog_enabled across all the
607
- * cpus, as clear_bit() does not use barriers.
608
- */
609
- smp_mb__before_atomic ();
610
- clear_bit (NMI_WATCHDOG_ENABLED_BIT , & watchdog_enabled );
611
- smp_mb__after_atomic ();
612
-
613
- /* skip displaying the same error again */
614
- if (cpu > 0 && (PTR_ERR (event ) == cpu0_err ))
615
- return PTR_ERR (event );
616
-
617
- /* vary the KERN level based on the returned errno */
618
- if (PTR_ERR (event ) == - EOPNOTSUPP )
619
- pr_info ("disabled (cpu%i): not supported (no LAPIC?)\n" , cpu );
620
- else if (PTR_ERR (event ) == - ENOENT )
621
- pr_warn ("disabled (cpu%i): hardware events not enabled\n" ,
622
- cpu );
623
- else
624
- pr_err ("disabled (cpu%i): unable to create perf event: %ld\n" ,
625
- cpu , PTR_ERR (event ));
626
-
627
- pr_info ("Shutting down hard lockup detector on all cpus\n" );
628
-
629
- return PTR_ERR (event );
630
-
631
- /* success path */
632
- out_save :
633
- per_cpu (watchdog_ev , cpu ) = event ;
634
- out_enable :
635
- perf_event_enable (per_cpu (watchdog_ev , cpu ));
636
- out :
637
- return 0 ;
638
- }
639
-
640
- static void watchdog_nmi_disable (unsigned int cpu )
641
- {
642
- struct perf_event * event = per_cpu (watchdog_ev , cpu );
643
-
644
- if (event ) {
645
- perf_event_disable (event );
646
- per_cpu (watchdog_ev , cpu ) = NULL ;
647
-
648
- /* should be in cleanup, but blocks oprofile */
649
- perf_event_release_kernel (event );
650
- }
651
- if (cpu == 0 ) {
652
- /* watchdog_nmi_enable() expects this to be zero initially. */
653
- cpu0_err = 0 ;
654
- }
655
- }
656
-
657
- #else
658
- static int watchdog_nmi_enable (unsigned int cpu ) { return 0 ; }
659
- static void watchdog_nmi_disable (unsigned int cpu ) { return ; }
660
- #endif /* CONFIG_HARDLOCKUP_DETECTOR */
661
-
662
443
static struct smp_hotplug_thread watchdog_threads = {
663
444
.store = & softlockup_watchdog ,
664
445
.thread_should_run = watchdog_should_run ,
0 commit comments