Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
watchdog.c
Go to the documentation of this file.
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * Note: Most of this code is borrowed heavily from the original softlockup
7  * detector, so thanks to Ingo for the initial implementation.
8  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9  * to those contributors as well.
10  */
11 
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13 
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/lockdep.h>
22 #include <linux/notifier.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/smpboot.h>
26 
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
29 #include <linux/perf_event.h>
30 
33 static int __read_mostly watchdog_disabled;
34 
35 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
36 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
37 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
38 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
39 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
40 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
41 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
42 #ifdef CONFIG_HARDLOCKUP_DETECTOR
43 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
44 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
45 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
46 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
47 #endif
48 
49 /* boot commands */
50 /*
51  * Should we panic when a soft-lockup or hard-lockup occurs:
52  */
53 #ifdef CONFIG_HARDLOCKUP_DETECTOR
54 static int hardlockup_panic =
55  CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
56 
57 static int __init hardlockup_panic_setup(char *str)
58 {
59  if (!strncmp(str, "panic", 5))
60  hardlockup_panic = 1;
61  else if (!strncmp(str, "nopanic", 7))
62  hardlockup_panic = 0;
63  else if (!strncmp(str, "0", 1))
64  watchdog_enabled = 0;
65  return 1;
66 }
67 __setup("nmi_watchdog=", hardlockup_panic_setup);
68 #endif
69 
71  CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
72 
73 static int __init softlockup_panic_setup(char *str)
74 {
75  softlockup_panic = simple_strtoul(str, NULL, 0);
76 
77  return 1;
78 }
79 __setup("softlockup_panic=", softlockup_panic_setup);
80 
81 static int __init nowatchdog_setup(char *str)
82 {
83  watchdog_enabled = 0;
84  return 1;
85 }
86 __setup("nowatchdog", nowatchdog_setup);
87 
88 /* deprecated */
89 static int __init nosoftlockup_setup(char *str)
90 {
91  watchdog_enabled = 0;
92  return 1;
93 }
94 __setup("nosoftlockup", nosoftlockup_setup);
95 /* */
96 
97 /*
98  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
99  * lockups can have false positives under extreme conditions. So we generally
100  * want a higher threshold for soft lockups than for hard lockups. So we couple
101  * the thresholds with a factor: we make the soft threshold twice the amount of
102  * time the hard threshold is.
103  */
104 static int get_softlockup_thresh(void)
105 {
106  return watchdog_thresh * 2;
107 }
108 
109 /*
110  * Returns seconds, approximately. We don't need nanosecond
111  * resolution, and we don't need to waste time with a big divide when
112  * 2^30ns == 1.074s.
113  */
114 static unsigned long get_timestamp(int this_cpu)
115 {
116  return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
117 }
118 
119 static u64 get_sample_period(void)
120 {
121  /*
122  * convert watchdog_thresh from seconds to ns
123  * the divide by 5 is to give hrtimer several chances (two
124  * or three with the current relation between the soft
125  * and hard thresholds) to increment before the
126  * hardlockup detector generates a warning
127  */
128  return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
129 }
130 
131 /* Commands for resetting the watchdog */
132 static void __touch_watchdog(void)
133 {
134  int this_cpu = smp_processor_id();
135 
136  __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
137 }
138 
140 {
141  __this_cpu_write(watchdog_touch_ts, 0);
142 }
144 
146 {
147  int cpu;
148 
149  /*
150  * this is done lockless
151  * do we care if a 0 races with a timestamp?
152  * all it means is the softlock check starts one cycle later
153  */
155  per_cpu(watchdog_touch_ts, cpu) = 0;
156 }
157 
158 #ifdef CONFIG_HARDLOCKUP_DETECTOR
159 void touch_nmi_watchdog(void)
160 {
161  if (watchdog_enabled) {
162  unsigned cpu;
163 
164  for_each_present_cpu(cpu) {
165  if (per_cpu(watchdog_nmi_touch, cpu) != true)
166  per_cpu(watchdog_nmi_touch, cpu) = true;
167  }
168  }
170 }
172 
173 #endif
174 
176 {
177  __raw_get_cpu_var(softlockup_touch_sync) = true;
178  __raw_get_cpu_var(watchdog_touch_ts) = 0;
179 }
180 
181 #ifdef CONFIG_HARDLOCKUP_DETECTOR
182 /* watchdog detector functions */
183 static int is_hardlockup(void)
184 {
185  unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
186 
187  if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
188  return 1;
189 
190  __this_cpu_write(hrtimer_interrupts_saved, hrint);
191  return 0;
192 }
193 #endif
194 
195 static int is_softlockup(unsigned long touch_ts)
196 {
197  unsigned long now = get_timestamp(smp_processor_id());
198 
199  /* Warn about unreasonable delays: */
200  if (time_after(now, touch_ts + get_softlockup_thresh()))
201  return now - touch_ts;
202 
203  return 0;
204 }
205 
206 #ifdef CONFIG_HARDLOCKUP_DETECTOR
207 
208 static struct perf_event_attr wd_hw_attr = {
210  .config = PERF_COUNT_HW_CPU_CYCLES,
211  .size = sizeof(struct perf_event_attr),
212  .pinned = 1,
213  .disabled = 1,
214 };
215 
216 /* Callback function for perf event subsystem */
217 static void watchdog_overflow_callback(struct perf_event *event,
218  struct perf_sample_data *data,
219  struct pt_regs *regs)
220 {
221  /* Ensure the watchdog never gets throttled */
222  event->hw.interrupts = 0;
223 
224  if (__this_cpu_read(watchdog_nmi_touch) == true) {
225  __this_cpu_write(watchdog_nmi_touch, false);
226  return;
227  }
228 
229  /* check for a hardlockup
230  * This is done by making sure our timer interrupt
231  * is incrementing. The timer interrupt should have
232  * fired multiple times before we overflow'd. If it hasn't
233  * then this is a good indication the cpu is stuck
234  */
235  if (is_hardlockup()) {
236  int this_cpu = smp_processor_id();
237 
238  /* only print hardlockups once */
239  if (__this_cpu_read(hard_watchdog_warn) == true)
240  return;
241 
242  if (hardlockup_panic)
243  panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
244  else
245  WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
246 
247  __this_cpu_write(hard_watchdog_warn, true);
248  return;
249  }
250 
251  __this_cpu_write(hard_watchdog_warn, false);
252  return;
253 }
254 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
255 
256 static void watchdog_interrupt_count(void)
257 {
258  __this_cpu_inc(hrtimer_interrupts);
259 }
260 
261 static int watchdog_nmi_enable(unsigned int cpu);
262 static void watchdog_nmi_disable(unsigned int cpu);
263 
264 /* watchdog kicker functions */
265 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
266 {
267  unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
268  struct pt_regs *regs = get_irq_regs();
269  int duration;
270 
271  /* kick the hardlockup detector */
272  watchdog_interrupt_count();
273 
274  /* kick the softlockup detector */
275  wake_up_process(__this_cpu_read(softlockup_watchdog));
276 
277  /* .. and repeat */
278  hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
279 
280  if (touch_ts == 0) {
281  if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
282  /*
283  * If the time stamp was touched atomically
284  * make sure the scheduler tick is up to date.
285  */
286  __this_cpu_write(softlockup_touch_sync, false);
287  sched_clock_tick();
288  }
289 
290  /* Clear the guest paused flag on watchdog reset */
292  __touch_watchdog();
293  return HRTIMER_RESTART;
294  }
295 
296  /* check for a softlockup
297  * This is done by making sure a high priority task is
298  * being scheduled. The task touches the watchdog to
299  * indicate it is getting cpu time. If it hasn't then
300  * this is a good indication some task is hogging the cpu
301  */
302  duration = is_softlockup(touch_ts);
303  if (unlikely(duration)) {
304  /*
305  * If a virtual machine is stopped by the host it can look to
306  * the watchdog like a soft lockup, check to see if the host
307  * stopped the vm before we issue the warning
308  */
310  return HRTIMER_RESTART;
311 
312  /* only warn once */
313  if (__this_cpu_read(soft_watchdog_warn) == true)
314  return HRTIMER_RESTART;
315 
316  printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
317  smp_processor_id(), duration,
318  current->comm, task_pid_nr(current));
319  print_modules();
320  print_irqtrace_events(current);
321  if (regs)
322  show_regs(regs);
323  else
324  dump_stack();
325 
326  if (softlockup_panic)
327  panic("softlockup: hung tasks");
328  __this_cpu_write(soft_watchdog_warn, true);
329  } else
330  __this_cpu_write(soft_watchdog_warn, false);
331 
332  return HRTIMER_RESTART;
333 }
334 
335 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
336 {
337  struct sched_param param = { .sched_priority = prio };
338 
339  sched_setscheduler(current, policy, &param);
340 }
341 
342 static void watchdog_enable(unsigned int cpu)
343 {
344  struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
345 
346  if (!watchdog_enabled) {
348  return;
349  }
350 
351  /* Enable the perf event */
352  watchdog_nmi_enable(cpu);
353 
354  /* kick off the timer for the hardlockup detector */
356  hrtimer->function = watchdog_timer_fn;
357 
358  /* done here because hrtimer_start can only pin to smp_processor_id() */
359  hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
361 
362  /* initialize timestamp */
363  watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
364  __touch_watchdog();
365 }
366 
367 static void watchdog_disable(unsigned int cpu)
368 {
369  struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
370 
371  if (!watchdog_enabled)
372  return;
373 
374  watchdog_set_prio(SCHED_NORMAL, 0);
375  hrtimer_cancel(hrtimer);
376  /* disable the perf event */
377  watchdog_nmi_disable(cpu);
378 }
379 
380 static int watchdog_should_run(unsigned int cpu)
381 {
382  return __this_cpu_read(hrtimer_interrupts) !=
383  __this_cpu_read(soft_lockup_hrtimer_cnt);
384 }
385 
386 /*
387  * The watchdog thread function - touches the timestamp.
388  *
389  * It only runs once every get_sample_period() seconds (4 seconds by
390  * default) to reset the softlockup timestamp. If this gets delayed
391  * for more than 2*watchdog_thresh seconds then the debug-printout
392  * triggers in watchdog_timer_fn().
393  */
394 static void watchdog(unsigned int cpu)
395 {
396  __this_cpu_write(soft_lockup_hrtimer_cnt,
397  __this_cpu_read(hrtimer_interrupts));
398  __touch_watchdog();
399 }
400 
401 #ifdef CONFIG_HARDLOCKUP_DETECTOR
402 /*
403  * People like the simple clean cpu node info on boot.
404  * Reduce the watchdog noise by only printing messages
405  * that are different from what cpu0 displayed.
406  */
407 static unsigned long cpu0_err;
408 
409 static int watchdog_nmi_enable(unsigned int cpu)
410 {
411  struct perf_event_attr *wd_attr;
412  struct perf_event *event = per_cpu(watchdog_ev, cpu);
413 
414  /* is it already setup and enabled? */
415  if (event && event->state > PERF_EVENT_STATE_OFF)
416  goto out;
417 
418  /* it is setup but not enabled */
419  if (event != NULL)
420  goto out_enable;
421 
422  wd_attr = &wd_hw_attr;
423  wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
424 
425  /* Try to register using hardware perf events */
426  event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
427 
428  /* save cpu0 error for future comparision */
429  if (cpu == 0 && IS_ERR(event))
430  cpu0_err = PTR_ERR(event);
431 
432  if (!IS_ERR(event)) {
433  /* only print for cpu0 or different than cpu0 */
434  if (cpu == 0 || cpu0_err)
435  pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
436  goto out_save;
437  }
438 
439  /* skip displaying the same error again */
440  if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
441  return PTR_ERR(event);
442 
443  /* vary the KERN level based on the returned errno */
444  if (PTR_ERR(event) == -EOPNOTSUPP)
445  pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
446  else if (PTR_ERR(event) == -ENOENT)
447  pr_warning("disabled (cpu%i): hardware events not enabled\n",
448  cpu);
449  else
450  pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
451  cpu, PTR_ERR(event));
452  return PTR_ERR(event);
453 
454  /* success path */
455 out_save:
456  per_cpu(watchdog_ev, cpu) = event;
457 out_enable:
458  perf_event_enable(per_cpu(watchdog_ev, cpu));
459 out:
460  return 0;
461 }
462 
463 static void watchdog_nmi_disable(unsigned int cpu)
464 {
465  struct perf_event *event = per_cpu(watchdog_ev, cpu);
466 
467  if (event) {
468  perf_event_disable(event);
469  per_cpu(watchdog_ev, cpu) = NULL;
470 
471  /* should be in cleanup, but blocks oprofile */
473  }
474  return;
475 }
476 #else
477 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
478 static void watchdog_nmi_disable(unsigned int cpu) { return; }
479 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
480 
481 /* prepare/enable/disable routines */
482 /* sysctl functions */
483 #ifdef CONFIG_SYSCTL
484 static void watchdog_enable_all_cpus(void)
485 {
486  unsigned int cpu;
487 
488  if (watchdog_disabled) {
489  watchdog_disabled = 0;
491  kthread_unpark(per_cpu(softlockup_watchdog, cpu));
492  }
493 }
494 
495 static void watchdog_disable_all_cpus(void)
496 {
497  unsigned int cpu;
498 
499  if (!watchdog_disabled) {
500  watchdog_disabled = 1;
502  kthread_park(per_cpu(softlockup_watchdog, cpu));
503  }
504 }
505 
506 /*
507  * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
508  */
509 
510 int proc_dowatchdog(struct ctl_table *table, int write,
511  void __user *buffer, size_t *lenp, loff_t *ppos)
512 {
513  int ret;
514 
515  if (watchdog_disabled < 0)
516  return -ENODEV;
517 
518  ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
519  if (ret || !write)
520  return ret;
521 
523  watchdog_enable_all_cpus();
524  else
525  watchdog_disable_all_cpus();
526 
527  return ret;
528 }
529 #endif /* CONFIG_SYSCTL */
530 
531 static struct smp_hotplug_thread watchdog_threads = {
532  .store = &softlockup_watchdog,
533  .thread_should_run = watchdog_should_run,
534  .thread_fn = watchdog,
535  .thread_comm = "watchdog/%u",
536  .setup = watchdog_enable,
537  .park = watchdog_disable,
538  .unpark = watchdog_enable,
539 };
540 
542 {
543  if (smpboot_register_percpu_thread(&watchdog_threads)) {
544  pr_err("Failed to create watchdog threads, disabled\n");
545  watchdog_disabled = -ENODEV;
546  }
547 }