Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Detect hard and soft lockups on a system |
| 3 | * |
| 4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
| 5 | * |
Fernando Luis Vázquez Cao | 86f5e6a | 2012-02-09 17:42:22 -0500 | [diff] [blame] | 6 | * Note: Most of this code is borrowed heavily from the original softlockup |
| 7 | * detector, so thanks to Ingo for the initial implementation. |
| 8 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 9 | * to those contributors as well. |
| 10 | */ |
| 11 | |
Kefeng Wang | 5f92a7b | 2017-07-14 14:49:46 -0700 | [diff] [blame] | 12 | #define pr_fmt(fmt) "watchdog: " fmt |
Andrew Morton | 4501980 | 2012-03-23 15:01:55 -0700 | [diff] [blame] | 13 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/cpu.h> |
| 16 | #include <linux/nmi.h> |
| 17 | #include <linux/init.h> |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 18 | #include <linux/module.h> |
| 19 | #include <linux/sysctl.h> |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 20 | #include <linux/smpboot.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 21 | #include <linux/sched/rt.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 22 | #include <uapi/linux/sched/types.h> |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 23 | #include <linux/tick.h> |
Tejun Heo | 82607adc | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 24 | #include <linux/workqueue.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 25 | #include <linux/sched/clock.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 26 | #include <linux/sched/debug.h> |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 27 | |
| 28 | #include <asm/irq_regs.h> |
Eric B Munson | 5d1c0f4 | 2012-03-10 14:37:28 -0500 | [diff] [blame] | 29 | #include <linux/kvm_para.h> |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 30 | #include <linux/kthread.h> |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 31 | |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 32 | static DEFINE_MUTEX(watchdog_mutex); |
Peter Zijlstra | ab992dc | 2015-05-18 11:31:50 +0200 | [diff] [blame] | 33 | |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 34 | int __read_mostly nmi_watchdog_enabled; |
| 35 | |
| 36 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) |
| 37 | unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED | |
| 38 | NMI_WATCHDOG_ENABLED; |
Ulrich Obergfell | 84d56e6 | 2015-04-14 15:43:55 -0700 | [diff] [blame] | 39 | #else |
Babu Moger | 249e52e | 2016-12-14 15:06:21 -0800 | [diff] [blame] | 40 | unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; |
Ulrich Obergfell | 84d56e6 | 2015-04-14 15:43:55 -0700 | [diff] [blame] | 41 | #endif |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 42 | |
| 43 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 44 | /* boot commands */ |
| 45 | /* |
| 46 | * Should we panic when a soft-lockup or hard-lockup occurs: |
| 47 | */ |
| 48 | unsigned int __read_mostly hardlockup_panic = |
| 49 | CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; |
| 50 | /* |
| 51 | * We may not want to enable hard lockup detection by default in all cases, |
| 52 | * for example when running the kernel as a guest on a hypervisor. In these |
| 53 | * cases this function can be called to disable hard lockup detection. This |
| 54 | * function should only be executed once by the boot processor before the |
| 55 | * kernel command line parameters are parsed, because otherwise it is not |
| 56 | * possible to override this in hardlockup_panic_setup(). |
| 57 | */ |
Thomas Gleixner | 7a35582 | 2017-09-12 21:37:02 +0200 | [diff] [blame] | 58 | void __init hardlockup_detector_disable(void) |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 59 | { |
| 60 | watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; |
| 61 | } |
| 62 | |
| 63 | static int __init hardlockup_panic_setup(char *str) |
| 64 | { |
| 65 | if (!strncmp(str, "panic", 5)) |
| 66 | hardlockup_panic = 1; |
| 67 | else if (!strncmp(str, "nopanic", 7)) |
| 68 | hardlockup_panic = 0; |
| 69 | else if (!strncmp(str, "0", 1)) |
| 70 | watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; |
| 71 | else if (!strncmp(str, "1", 1)) |
| 72 | watchdog_enabled |= NMI_WATCHDOG_ENABLED; |
| 73 | return 1; |
| 74 | } |
| 75 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
| 76 | |
| 77 | #endif |
| 78 | |
| 79 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
Ulrich Obergfell | 84d56e6 | 2015-04-14 15:43:55 -0700 | [diff] [blame] | 80 | int __read_mostly soft_watchdog_enabled; |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 81 | #endif |
| 82 | |
Ulrich Obergfell | 84d56e6 | 2015-04-14 15:43:55 -0700 | [diff] [blame] | 83 | int __read_mostly watchdog_user_enabled; |
Mandeep Singh Baines | 4eec42f | 2011-05-22 22:10:23 -0700 | [diff] [blame] | 84 | int __read_mostly watchdog_thresh = 10; |
Ulrich Obergfell | 84d56e6 | 2015-04-14 15:43:55 -0700 | [diff] [blame] | 85 | |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_SMP |
| 87 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; |
Jiri Kosina | 5553787 | 2015-11-05 18:44:41 -0800 | [diff] [blame] | 88 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 89 | #endif |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 90 | struct cpumask watchdog_cpumask __read_mostly; |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 91 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); |
| 92 | |
Ulrich Obergfell | ec6a906 | 2015-09-04 15:45:28 -0700 | [diff] [blame] | 93 | /* |
| 94 | * The 'watchdog_running' variable is set to 1 when the watchdog threads |
| 95 | * are registered/started and is set to 0 when the watchdog threads are |
| 96 | * unregistered/stopped, so it is an indicator whether the threads exist. |
| 97 | */ |
Frederic Weisbecker | 3c00ea8 | 2013-05-19 20:45:15 +0200 | [diff] [blame] | 98 | static int __read_mostly watchdog_running; |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 99 | |
| 100 | /* |
| 101 | * These functions can be overridden if an architecture implements its |
| 102 | * own hardlockup detector. |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 103 | * |
| 104 | * watchdog_nmi_enable/disable can be implemented to start and stop when |
| 105 | * softlockup watchdog threads start and stop. The arch must select the |
| 106 | * SOFTLOCKUP_DETECTOR Kconfig. |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 107 | */ |
| 108 | int __weak watchdog_nmi_enable(unsigned int cpu) |
| 109 | { |
| 110 | return 0; |
| 111 | } |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 112 | |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 113 | void __weak watchdog_nmi_disable(unsigned int cpu) |
| 114 | { |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 115 | hardlockup_detector_perf_disable(); |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 116 | } |
| 117 | |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 118 | /* |
| 119 | * watchdog_nmi_reconfigure can be implemented to be notified after any |
| 120 | * watchdog configuration change. The arch hardlockup watchdog should |
| 121 | * respond to the following variables: |
| 122 | * - nmi_watchdog_enabled |
| 123 | * - watchdog_thresh |
| 124 | * - watchdog_cpumask |
| 125 | * - sysctl_hardlockup_all_cpu_backtrace |
| 126 | * - hardlockup_panic |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 127 | */ |
| 128 | void __weak watchdog_nmi_reconfigure(void) |
| 129 | { |
| 130 | } |
| 131 | |
| 132 | |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 133 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
| 134 | |
| 135 | /* Helper for online, unparked cpus. */ |
| 136 | #define for_each_watchdog_cpu(cpu) \ |
| 137 | for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) |
| 138 | |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 139 | static u64 __read_mostly sample_period; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 140 | |
| 141 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
| 142 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
| 143 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
| 144 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 145 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 146 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
| 147 | static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); |
chai wen | b1a8de1 | 2014-10-09 15:25:17 -0700 | [diff] [blame] | 148 | static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 149 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 150 | static unsigned long soft_lockup_nmi_warn; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 151 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 152 | unsigned int __read_mostly softlockup_panic = |
| 153 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
| 154 | |
| 155 | static int __init softlockup_panic_setup(char *str) |
| 156 | { |
| 157 | softlockup_panic = simple_strtoul(str, NULL, 0); |
| 158 | |
| 159 | return 1; |
| 160 | } |
| 161 | __setup("softlockup_panic=", softlockup_panic_setup); |
| 162 | |
| 163 | static int __init nowatchdog_setup(char *str) |
| 164 | { |
Ulrich Obergfell | 195daf6 | 2015-04-14 15:44:13 -0700 | [diff] [blame] | 165 | watchdog_enabled = 0; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 166 | return 1; |
| 167 | } |
| 168 | __setup("nowatchdog", nowatchdog_setup); |
| 169 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 170 | static int __init nosoftlockup_setup(char *str) |
| 171 | { |
Ulrich Obergfell | 195daf6 | 2015-04-14 15:44:13 -0700 | [diff] [blame] | 172 | watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 173 | return 1; |
| 174 | } |
| 175 | __setup("nosoftlockup", nosoftlockup_setup); |
Ulrich Obergfell | 195daf6 | 2015-04-14 15:44:13 -0700 | [diff] [blame] | 176 | |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 177 | #ifdef CONFIG_SMP |
| 178 | static int __init softlockup_all_cpu_backtrace_setup(char *str) |
| 179 | { |
| 180 | sysctl_softlockup_all_cpu_backtrace = |
| 181 | !!simple_strtol(str, NULL, 0); |
| 182 | return 1; |
| 183 | } |
| 184 | __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 185 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
Jiri Kosina | 5553787 | 2015-11-05 18:44:41 -0800 | [diff] [blame] | 186 | static int __init hardlockup_all_cpu_backtrace_setup(char *str) |
| 187 | { |
| 188 | sysctl_hardlockup_all_cpu_backtrace = |
| 189 | !!simple_strtol(str, NULL, 0); |
| 190 | return 1; |
| 191 | } |
| 192 | __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 193 | #endif |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 194 | #endif |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 195 | |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 196 | static void __lockup_detector_cleanup(void); |
| 197 | |
Mandeep Singh Baines | 4eec42f | 2011-05-22 22:10:23 -0700 | [diff] [blame] | 198 | /* |
| 199 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
| 200 | * lockups can have false positives under extreme conditions. So we generally |
| 201 | * want a higher threshold for soft lockups than for hard lockups. So we couple |
| 202 | * the thresholds with a factor: we make the soft threshold twice the amount of |
| 203 | * time the hard threshold is. |
| 204 | */ |
Ingo Molnar | 6e9101a | 2011-05-24 05:43:18 +0200 | [diff] [blame] | 205 | static int get_softlockup_thresh(void) |
Mandeep Singh Baines | 4eec42f | 2011-05-22 22:10:23 -0700 | [diff] [blame] | 206 | { |
| 207 | return watchdog_thresh * 2; |
| 208 | } |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 209 | |
| 210 | /* |
| 211 | * Returns seconds, approximately. We don't need nanosecond |
| 212 | * resolution, and we don't need to waste time with a big divide when |
| 213 | * 2^30ns == 1.074s. |
| 214 | */ |
Namhyung Kim | c06b4f1 | 2012-12-27 11:49:44 +0900 | [diff] [blame] | 215 | static unsigned long get_timestamp(void) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 216 | { |
Cyril Bur | 545a2bf | 2015-02-12 15:01:24 -0800 | [diff] [blame] | 217 | return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 218 | } |
| 219 | |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 220 | static void set_sample_period(void) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 221 | { |
| 222 | /* |
Mandeep Singh Baines | 586692a | 2011-05-22 22:10:22 -0700 | [diff] [blame] | 223 | * convert watchdog_thresh from seconds to ns |
Fernando Luis Vázquez Cao | 86f5e6a | 2012-02-09 17:42:22 -0500 | [diff] [blame] | 224 | * the divide by 5 is to give hrtimer several chances (two |
| 225 | * or three with the current relation between the soft |
| 226 | * and hard thresholds) to increment before the |
| 227 | * hardlockup detector generates a warning |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 228 | */ |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 229 | sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); |
Thomas Gleixner | 7edaeb6 | 2017-08-15 09:50:13 +0200 | [diff] [blame] | 230 | watchdog_update_hrtimer_threshold(sample_period); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | /* Commands for resetting the watchdog */ |
| 234 | static void __touch_watchdog(void) |
| 235 | { |
Namhyung Kim | c06b4f1 | 2012-12-27 11:49:44 +0900 | [diff] [blame] | 236 | __this_cpu_write(watchdog_touch_ts, get_timestamp()); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 237 | } |
| 238 | |
Tejun Heo | 03e0d46 | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 239 | /** |
| 240 | * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls |
| 241 | * |
| 242 | * Call when the scheduler may have stalled for legitimate reasons |
| 243 | * preventing the watchdog task from executing - e.g. the scheduler |
| 244 | * entering idle state. This should only be used for scheduler events. |
| 245 | * Use touch_softlockup_watchdog() for everything else. |
| 246 | */ |
| 247 | void touch_softlockup_watchdog_sched(void) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 248 | { |
Andrew Morton | 7861144 | 2014-04-18 15:07:12 -0700 | [diff] [blame] | 249 | /* |
| 250 | * Preemption can be enabled. It doesn't matter which CPU's timestamp |
| 251 | * gets zeroed here, so use the raw_ operation. |
| 252 | */ |
| 253 | raw_cpu_write(watchdog_touch_ts, 0); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 254 | } |
Tejun Heo | 03e0d46 | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 255 | |
| 256 | void touch_softlockup_watchdog(void) |
| 257 | { |
| 258 | touch_softlockup_watchdog_sched(); |
Tejun Heo | 82607adc | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 259 | wq_watchdog_touch(raw_smp_processor_id()); |
Tejun Heo | 03e0d46 | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 260 | } |
Ingo Molnar | 0167c78 | 2010-05-13 08:53:33 +0200 | [diff] [blame] | 261 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 262 | |
Don Zickus | 332fbdb | 2010-05-07 17:11:45 -0400 | [diff] [blame] | 263 | void touch_all_softlockup_watchdogs(void) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 264 | { |
| 265 | int cpu; |
| 266 | |
| 267 | /* |
| 268 | * this is done lockless |
| 269 | * do we care if a 0 races with a timestamp? |
| 270 | * all it means is the softlock check starts one cycle later |
| 271 | */ |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 272 | for_each_watchdog_cpu(cpu) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 273 | per_cpu(watchdog_touch_ts, cpu) = 0; |
Tejun Heo | 82607adc | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 274 | wq_watchdog_touch(-1); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 275 | } |
| 276 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 277 | void touch_softlockup_watchdog_sync(void) |
| 278 | { |
Christoph Lameter | f7f66b0 | 2014-08-17 12:30:34 -0500 | [diff] [blame] | 279 | __this_cpu_write(softlockup_touch_sync, true); |
| 280 | __this_cpu_write(watchdog_touch_ts, 0); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 281 | } |
| 282 | |
Don Zickus | 26e09c6 | 2010-05-17 18:06:04 -0400 | [diff] [blame] | 283 | static int is_softlockup(unsigned long touch_ts) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 284 | { |
Namhyung Kim | c06b4f1 | 2012-12-27 11:49:44 +0900 | [diff] [blame] | 285 | unsigned long now = get_timestamp(); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 286 | |
Ulrich Obergfell | 39d2da2 | 2015-11-05 18:44:56 -0800 | [diff] [blame] | 287 | if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){ |
Ulrich Obergfell | 195daf6 | 2015-04-14 15:44:13 -0700 | [diff] [blame] | 288 | /* Warn about unreasonable delays. */ |
| 289 | if (time_after(now, touch_ts + get_softlockup_thresh())) |
| 290 | return now - touch_ts; |
| 291 | } |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 292 | return 0; |
| 293 | } |
| 294 | |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 295 | /* watchdog detector functions */ |
| 296 | bool is_hardlockup(void) |
| 297 | { |
| 298 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
| 299 | |
| 300 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
| 301 | return true; |
| 302 | |
| 303 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
| 304 | return false; |
| 305 | } |
| 306 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 307 | static void watchdog_interrupt_count(void) |
| 308 | { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 309 | __this_cpu_inc(hrtimer_interrupts); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 310 | } |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 311 | |
Ulrich Obergfell | 58cf690 | 2015-11-05 18:44:30 -0800 | [diff] [blame] | 312 | static int watchdog_enable_all_cpus(void); |
| 313 | static void watchdog_disable_all_cpus(void); |
| 314 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 315 | /* watchdog kicker functions */ |
| 316 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
| 317 | { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 318 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 319 | struct pt_regs *regs = get_irq_regs(); |
| 320 | int duration; |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 321 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 322 | |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 323 | if (!watchdog_enabled) |
Don Zickus | b94f511 | 2017-01-24 15:17:53 -0800 | [diff] [blame] | 324 | return HRTIMER_NORESTART; |
| 325 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 326 | /* kick the hardlockup detector */ |
| 327 | watchdog_interrupt_count(); |
| 328 | |
| 329 | /* kick the softlockup detector */ |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 330 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 331 | |
| 332 | /* .. and repeat */ |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 333 | hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 334 | |
| 335 | if (touch_ts == 0) { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 336 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 337 | /* |
| 338 | * If the time stamp was touched atomically |
| 339 | * make sure the scheduler tick is up to date. |
| 340 | */ |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 341 | __this_cpu_write(softlockup_touch_sync, false); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 342 | sched_clock_tick(); |
| 343 | } |
Eric B Munson | 5d1c0f4 | 2012-03-10 14:37:28 -0500 | [diff] [blame] | 344 | |
| 345 | /* Clear the guest paused flag on watchdog reset */ |
| 346 | kvm_check_and_clear_guest_paused(); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 347 | __touch_watchdog(); |
| 348 | return HRTIMER_RESTART; |
| 349 | } |
| 350 | |
| 351 | /* check for a softlockup |
| 352 | * This is done by making sure a high priority task is |
| 353 | * being scheduled. The task touches the watchdog to |
| 354 | * indicate it is getting cpu time. If it hasn't then |
| 355 | * this is a good indication some task is hogging the cpu |
| 356 | */ |
Don Zickus | 26e09c6 | 2010-05-17 18:06:04 -0400 | [diff] [blame] | 357 | duration = is_softlockup(touch_ts); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 358 | if (unlikely(duration)) { |
Eric B Munson | 5d1c0f4 | 2012-03-10 14:37:28 -0500 | [diff] [blame] | 359 | /* |
| 360 | * If a virtual machine is stopped by the host it can look to |
| 361 | * the watchdog like a soft lockup, check to see if the host |
| 362 | * stopped the vm before we issue the warning |
| 363 | */ |
| 364 | if (kvm_check_and_clear_guest_paused()) |
| 365 | return HRTIMER_RESTART; |
| 366 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 367 | /* only warn once */ |
chai wen | b1a8de1 | 2014-10-09 15:25:17 -0700 | [diff] [blame] | 368 | if (__this_cpu_read(soft_watchdog_warn) == true) { |
| 369 | /* |
| 370 | * When multiple processes are causing softlockups the |
| 371 | * softlockup detector only warns on the first one |
| 372 | * because the code relies on a full quiet cycle to |
| 373 | * re-arm. The second process prevents the quiet cycle |
| 374 | * and never gets reported. Use task pointers to detect |
| 375 | * this. |
| 376 | */ |
| 377 | if (__this_cpu_read(softlockup_task_ptr_saved) != |
| 378 | current) { |
| 379 | __this_cpu_write(soft_watchdog_warn, false); |
| 380 | __touch_watchdog(); |
| 381 | } |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 382 | return HRTIMER_RESTART; |
chai wen | b1a8de1 | 2014-10-09 15:25:17 -0700 | [diff] [blame] | 383 | } |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 384 | |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 385 | if (softlockup_all_cpu_backtrace) { |
| 386 | /* Prevent multiple soft-lockup reports if one cpu is already |
| 387 | * engaged in dumping cpu back traces |
| 388 | */ |
| 389 | if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { |
| 390 | /* Someone else will report us. Let's give up */ |
| 391 | __this_cpu_write(soft_watchdog_warn, true); |
| 392 | return HRTIMER_RESTART; |
| 393 | } |
| 394 | } |
| 395 | |
Fabian Frederick | 656c3b7 | 2014-08-06 16:04:03 -0700 | [diff] [blame] | 396 | pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
Don Zickus | 26e09c6 | 2010-05-17 18:06:04 -0400 | [diff] [blame] | 397 | smp_processor_id(), duration, |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 398 | current->comm, task_pid_nr(current)); |
chai wen | b1a8de1 | 2014-10-09 15:25:17 -0700 | [diff] [blame] | 399 | __this_cpu_write(softlockup_task_ptr_saved, current); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 400 | print_modules(); |
| 401 | print_irqtrace_events(current); |
| 402 | if (regs) |
| 403 | show_regs(regs); |
| 404 | else |
| 405 | dump_stack(); |
| 406 | |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 407 | if (softlockup_all_cpu_backtrace) { |
| 408 | /* Avoid generating two back traces for current |
| 409 | * given that one is already made above |
| 410 | */ |
| 411 | trigger_allbutself_cpu_backtrace(); |
| 412 | |
| 413 | clear_bit(0, &soft_lockup_nmi_warn); |
| 414 | /* Barrier to sync with other cpus */ |
| 415 | smp_mb__after_atomic(); |
| 416 | } |
| 417 | |
Josh Hunt | 69361ee | 2014-08-08 14:22:31 -0700 | [diff] [blame] | 418 | add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 419 | if (softlockup_panic) |
| 420 | panic("softlockup: hung tasks"); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 421 | __this_cpu_write(soft_watchdog_warn, true); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 422 | } else |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 423 | __this_cpu_write(soft_watchdog_warn, false); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 424 | |
| 425 | return HRTIMER_RESTART; |
| 426 | } |
| 427 | |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 428 | static void watchdog_set_prio(unsigned int policy, unsigned int prio) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 429 | { |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 430 | struct sched_param param = { .sched_priority = prio }; |
| 431 | |
| 432 | sched_setscheduler(current, policy, ¶m); |
| 433 | } |
| 434 | |
| 435 | static void watchdog_enable(unsigned int cpu) |
| 436 | { |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 437 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 438 | |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 439 | /* |
| 440 | * Start the timer first to prevent the NMI watchdog triggering |
| 441 | * before the timer has a chance to fire. |
| 442 | */ |
Bjørn Mork | 3935e895 | 2012-12-19 20:51:31 +0100 | [diff] [blame] | 443 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 444 | hrtimer->function = watchdog_timer_fn; |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 445 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 446 | HRTIMER_MODE_REL_PINNED); |
| 447 | |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 448 | /* Initialize timestamp */ |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 449 | __touch_watchdog(); |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 450 | /* Enable the perf event */ |
| 451 | watchdog_nmi_enable(cpu); |
| 452 | |
| 453 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 454 | } |
| 455 | |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 456 | static void watchdog_disable(unsigned int cpu) |
| 457 | { |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 458 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 459 | |
| 460 | watchdog_set_prio(SCHED_NORMAL, 0); |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 461 | /* |
| 462 | * Disable the perf event first. That prevents that a large delay |
| 463 | * between disabling the timer and disabling the perf event causes |
| 464 | * the perf NMI to detect a false positive. |
| 465 | */ |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 466 | watchdog_nmi_disable(cpu); |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame^] | 467 | hrtimer_cancel(hrtimer); |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 468 | } |
| 469 | |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 470 | static void watchdog_cleanup(unsigned int cpu, bool online) |
| 471 | { |
| 472 | watchdog_disable(cpu); |
| 473 | } |
| 474 | |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 475 | static int watchdog_should_run(unsigned int cpu) |
| 476 | { |
| 477 | return __this_cpu_read(hrtimer_interrupts) != |
| 478 | __this_cpu_read(soft_lockup_hrtimer_cnt); |
| 479 | } |
| 480 | |
| 481 | /* |
| 482 | * The watchdog thread function - touches the timestamp. |
| 483 | * |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 484 | * It only runs once every sample_period seconds (4 seconds by |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 485 | * default) to reset the softlockup timestamp. If this gets delayed |
| 486 | * for more than 2*watchdog_thresh seconds then the debug-printout |
| 487 | * triggers in watchdog_timer_fn(). |
| 488 | */ |
| 489 | static void watchdog(unsigned int cpu) |
| 490 | { |
| 491 | __this_cpu_write(soft_lockup_hrtimer_cnt, |
| 492 | __this_cpu_read(hrtimer_interrupts)); |
| 493 | __touch_watchdog(); |
| 494 | } |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 495 | |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 496 | static struct smp_hotplug_thread watchdog_threads = { |
| 497 | .store = &softlockup_watchdog, |
| 498 | .thread_should_run = watchdog_should_run, |
| 499 | .thread_fn = watchdog, |
| 500 | .thread_comm = "watchdog/%u", |
| 501 | .setup = watchdog_enable, |
| 502 | .cleanup = watchdog_cleanup, |
| 503 | .park = watchdog_disable, |
| 504 | .unpark = watchdog_enable, |
| 505 | }; |
| 506 | |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 507 | /* |
| 508 | * park all watchdog threads that are specified in 'watchdog_cpumask' |
Ulrich Obergfell | ee7fed5 | 2015-11-05 18:44:39 -0800 | [diff] [blame] | 509 | * |
| 510 | * This function returns an error if kthread_park() of a watchdog thread |
| 511 | * fails. In this situation, the watchdog threads of some CPUs can already |
| 512 | * be parked and the watchdog threads of other CPUs can still be runnable. |
| 513 | * Callers are expected to handle this special condition as appropriate in |
| 514 | * their context. |
Ulrich Obergfell | a2a45b8 | 2015-11-05 18:44:53 -0800 | [diff] [blame] | 515 | * |
| 516 | * This function may only be called in a context that is protected against |
| 517 | * races with CPU hotplug - for example, via get_online_cpus(). |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 518 | */ |
| 519 | static int watchdog_park_threads(void) |
| 520 | { |
| 521 | int cpu, ret = 0; |
| 522 | |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 523 | for_each_watchdog_cpu(cpu) { |
| 524 | ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); |
| 525 | if (ret) |
| 526 | break; |
| 527 | } |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 528 | return ret; |
| 529 | } |
| 530 | |
| 531 | /* |
| 532 | * unpark all watchdog threads that are specified in 'watchdog_cpumask' |
Ulrich Obergfell | a2a45b8 | 2015-11-05 18:44:53 -0800 | [diff] [blame] | 533 | * |
| 534 | * This function may only be called in a context that is protected against |
| 535 | * races with CPU hotplug - for example, via get_online_cpus(). |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 536 | */ |
| 537 | static void watchdog_unpark_threads(void) |
| 538 | { |
| 539 | int cpu; |
| 540 | |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 541 | for_each_watchdog_cpu(cpu) |
| 542 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); |
Ulrich Obergfell | 81a4bee | 2015-09-04 15:45:15 -0700 | [diff] [blame] | 543 | } |
| 544 | |
Ulrich Obergfell | b43cb43 | 2015-11-05 18:44:33 -0800 | [diff] [blame] | 545 | static int update_watchdog_all_cpus(void) |
Michal Hocko | 9809b18 | 2013-09-24 15:27:30 -0700 | [diff] [blame] | 546 | { |
Ulrich Obergfell | b43cb43 | 2015-11-05 18:44:33 -0800 | [diff] [blame] | 547 | int ret; |
| 548 | |
| 549 | ret = watchdog_park_threads(); |
| 550 | if (ret) |
| 551 | return ret; |
| 552 | |
Ulrich Obergfell | d4bdd0b21 | 2015-09-04 15:45:21 -0700 | [diff] [blame] | 553 | watchdog_unpark_threads(); |
Ulrich Obergfell | b43cb43 | 2015-11-05 18:44:33 -0800 | [diff] [blame] | 554 | |
| 555 | return 0; |
Michal Hocko | 9809b18 | 2013-09-24 15:27:30 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Ulrich Obergfell | b2f57c3 | 2015-04-14 15:44:16 -0700 | [diff] [blame] | 558 | static int watchdog_enable_all_cpus(void) |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 559 | { |
| 560 | int err = 0; |
| 561 | |
Frederic Weisbecker | 3c00ea8 | 2013-05-19 20:45:15 +0200 | [diff] [blame] | 562 | if (!watchdog_running) { |
Frederic Weisbecker | 230ec93 | 2015-09-04 15:45:06 -0700 | [diff] [blame] | 563 | err = smpboot_register_percpu_thread_cpumask(&watchdog_threads, |
| 564 | &watchdog_cpumask); |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 565 | if (err) |
| 566 | pr_err("Failed to create watchdog threads, disabled\n"); |
Frederic Weisbecker | 230ec93 | 2015-09-04 15:45:06 -0700 | [diff] [blame] | 567 | else |
Frederic Weisbecker | 3c00ea8 | 2013-05-19 20:45:15 +0200 | [diff] [blame] | 568 | watchdog_running = 1; |
Ulrich Obergfell | b2f57c3 | 2015-04-14 15:44:16 -0700 | [diff] [blame] | 569 | } else { |
| 570 | /* |
| 571 | * Enable/disable the lockup detectors or |
| 572 | * change the sample period 'on the fly'. |
| 573 | */ |
Ulrich Obergfell | b43cb43 | 2015-11-05 18:44:33 -0800 | [diff] [blame] | 574 | err = update_watchdog_all_cpus(); |
| 575 | |
| 576 | if (err) { |
| 577 | watchdog_disable_all_cpus(); |
| 578 | pr_err("Failed to update lockup detectors, disabled\n"); |
| 579 | } |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 580 | } |
| 581 | |
Ulrich Obergfell | b43cb43 | 2015-11-05 18:44:33 -0800 | [diff] [blame] | 582 | if (err) |
| 583 | watchdog_enabled = 0; |
| 584 | |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 585 | return err; |
| 586 | } |
| 587 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 588 | static void watchdog_disable_all_cpus(void) |
| 589 | { |
Frederic Weisbecker | 3c00ea8 | 2013-05-19 20:45:15 +0200 | [diff] [blame] | 590 | if (watchdog_running) { |
| 591 | watchdog_running = 0; |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 592 | smpboot_unregister_percpu_thread(&watchdog_threads); |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 593 | } |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 594 | } |
| 595 | |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 596 | #ifdef CONFIG_SYSCTL |
| 597 | static int watchdog_update_cpus(void) |
| 598 | { |
| 599 | return smpboot_update_cpumask_percpu_thread( |
| 600 | &watchdog_threads, &watchdog_cpumask); |
| 601 | } |
| 602 | #endif |
| 603 | |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 604 | #else /* SOFTLOCKUP */ |
| 605 | static int watchdog_park_threads(void) |
| 606 | { |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | static void watchdog_unpark_threads(void) |
| 611 | { |
| 612 | } |
| 613 | |
| 614 | static int watchdog_enable_all_cpus(void) |
| 615 | { |
| 616 | return 0; |
| 617 | } |
| 618 | |
| 619 | static void watchdog_disable_all_cpus(void) |
| 620 | { |
| 621 | } |
| 622 | |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 623 | #ifdef CONFIG_SYSCTL |
| 624 | static int watchdog_update_cpus(void) |
| 625 | { |
| 626 | return 0; |
| 627 | } |
| 628 | #endif |
| 629 | |
Nicholas Piggin | 05a4a95 | 2017-07-12 14:35:46 -0700 | [diff] [blame] | 630 | static void set_sample_period(void) |
| 631 | { |
| 632 | } |
| 633 | #endif /* SOFTLOCKUP */ |
| 634 | |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 635 | static void __lockup_detector_cleanup(void) |
| 636 | { |
| 637 | lockdep_assert_held(&watchdog_mutex); |
| 638 | hardlockup_detector_perf_cleanup(); |
| 639 | } |
| 640 | |
| 641 | /** |
| 642 | * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes |
| 643 | * |
| 644 | * Caller must not hold the cpu hotplug rwsem. |
| 645 | */ |
| 646 | void lockup_detector_cleanup(void) |
| 647 | { |
| 648 | mutex_lock(&watchdog_mutex); |
| 649 | __lockup_detector_cleanup(); |
| 650 | mutex_unlock(&watchdog_mutex); |
| 651 | } |
| 652 | |
Thomas Gleixner | 6554fd8 | 2017-09-12 21:36:57 +0200 | [diff] [blame] | 653 | /** |
| 654 | * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) |
| 655 | * |
| 656 | * Special interface for parisc. It prevents lockup detector warnings from |
| 657 | * the default pm_poweroff() function which busy loops forever. |
| 658 | */ |
| 659 | void lockup_detector_soft_poweroff(void) |
| 660 | { |
| 661 | watchdog_enabled = 0; |
| 662 | } |
| 663 | |
Ulrich Obergfell | 58cf690 | 2015-11-05 18:44:30 -0800 | [diff] [blame] | 664 | #ifdef CONFIG_SYSCTL |
| 665 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 666 | /* |
Ulrich Obergfell | a0c9cbb | 2015-04-14 15:43:58 -0700 | [diff] [blame] | 667 | * Update the run state of the lockup detectors. |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 668 | */ |
Ulrich Obergfell | a0c9cbb | 2015-04-14 15:43:58 -0700 | [diff] [blame] | 669 | static int proc_watchdog_update(void) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 670 | { |
Ulrich Obergfell | a0c9cbb | 2015-04-14 15:43:58 -0700 | [diff] [blame] | 671 | int err = 0; |
| 672 | |
| 673 | /* |
| 674 | * Watchdog threads won't be started if they are already active. |
| 675 | * The 'watchdog_running' variable in watchdog_*_all_cpus() takes |
| 676 | * care of this. If those threads are already active, the sample |
| 677 | * period will be updated and the lockup detectors will be enabled |
| 678 | * or disabled 'on the fly'. |
| 679 | */ |
| 680 | if (watchdog_enabled && watchdog_thresh) |
Ulrich Obergfell | b2f57c3 | 2015-04-14 15:44:16 -0700 | [diff] [blame] | 681 | err = watchdog_enable_all_cpus(); |
Ulrich Obergfell | a0c9cbb | 2015-04-14 15:43:58 -0700 | [diff] [blame] | 682 | else |
| 683 | watchdog_disable_all_cpus(); |
| 684 | |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 685 | watchdog_nmi_reconfigure(); |
| 686 | |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 687 | __lockup_detector_cleanup(); |
| 688 | |
Ulrich Obergfell | a0c9cbb | 2015-04-14 15:43:58 -0700 | [diff] [blame] | 689 | return err; |
| 690 | |
| 691 | } |
| 692 | |
| 693 | /* |
Ulrich Obergfell | ef246a2 | 2015-04-14 15:44:05 -0700 | [diff] [blame] | 694 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter |
| 695 | * |
| 696 | * caller | table->data points to | 'which' contains the flag(s) |
| 697 | * -------------------|-----------------------|----------------------------- |
| 698 | * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed |
| 699 | * | | with SOFT_WATCHDOG_ENABLED |
| 700 | * -------------------|-----------------------|----------------------------- |
| 701 | * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED |
| 702 | * -------------------|-----------------------|----------------------------- |
| 703 | * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED |
| 704 | */ |
| 705 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
| 706 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 707 | { |
| 708 | int err, old, new; |
| 709 | int *watchdog_param = (int *)table->data; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 710 | |
Thomas Gleixner | b7a3498 | 2017-09-12 21:37:00 +0200 | [diff] [blame] | 711 | cpu_hotplug_disable(); |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 712 | mutex_lock(&watchdog_mutex); |
Thomas Gleixner | bcd951c | 2012-07-16 10:42:38 +0000 | [diff] [blame] | 713 | |
Ulrich Obergfell | ef246a2 | 2015-04-14 15:44:05 -0700 | [diff] [blame] | 714 | /* |
| 715 | * If the parameter is being read return the state of the corresponding |
| 716 | * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the |
| 717 | * run state of the lockup detectors. |
| 718 | */ |
| 719 | if (!write) { |
| 720 | *watchdog_param = (watchdog_enabled & which) != 0; |
| 721 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 722 | } else { |
| 723 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 724 | if (err) |
| 725 | goto out; |
| 726 | |
| 727 | /* |
| 728 | * There is a race window between fetching the current value |
| 729 | * from 'watchdog_enabled' and storing the new value. During |
| 730 | * this race window, watchdog_nmi_enable() can sneak in and |
| 731 | * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'. |
| 732 | * The 'cmpxchg' detects this race and the loop retries. |
| 733 | */ |
| 734 | do { |
| 735 | old = watchdog_enabled; |
| 736 | /* |
| 737 | * If the parameter value is not zero set the |
| 738 | * corresponding bit(s), else clear it(them). |
| 739 | */ |
| 740 | if (*watchdog_param) |
| 741 | new = old | which; |
| 742 | else |
| 743 | new = old & ~which; |
| 744 | } while (cmpxchg(&watchdog_enabled, old, new) != old); |
| 745 | |
| 746 | /* |
Ulrich Obergfell | b43cb43 | 2015-11-05 18:44:33 -0800 | [diff] [blame] | 747 | * Update the run state of the lockup detectors. There is _no_ |
| 748 | * need to check the value returned by proc_watchdog_update() |
| 749 | * and to restore the previous value of 'watchdog_enabled' as |
| 750 | * both lockup detectors are disabled if proc_watchdog_update() |
| 751 | * returns an error. |
Ulrich Obergfell | ef246a2 | 2015-04-14 15:44:05 -0700 | [diff] [blame] | 752 | */ |
Joshua Hunt | a1ee193 | 2016-03-17 14:17:23 -0700 | [diff] [blame] | 753 | if (old == new) |
| 754 | goto out; |
| 755 | |
Ulrich Obergfell | ef246a2 | 2015-04-14 15:44:05 -0700 | [diff] [blame] | 756 | err = proc_watchdog_update(); |
Ulrich Obergfell | ef246a2 | 2015-04-14 15:44:05 -0700 | [diff] [blame] | 757 | } |
| 758 | out: |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 759 | mutex_unlock(&watchdog_mutex); |
Thomas Gleixner | b7a3498 | 2017-09-12 21:37:00 +0200 | [diff] [blame] | 760 | cpu_hotplug_enable(); |
Ulrich Obergfell | ef246a2 | 2015-04-14 15:44:05 -0700 | [diff] [blame] | 761 | return err; |
| 762 | } |
| 763 | |
| 764 | /* |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 765 | * /proc/sys/kernel/watchdog |
| 766 | */ |
| 767 | int proc_watchdog(struct ctl_table *table, int write, |
| 768 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 769 | { |
| 770 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, |
| 771 | table, write, buffer, lenp, ppos); |
| 772 | } |
| 773 | |
| 774 | /* |
| 775 | * /proc/sys/kernel/nmi_watchdog |
| 776 | */ |
| 777 | int proc_nmi_watchdog(struct ctl_table *table, int write, |
| 778 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 779 | { |
| 780 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED, |
| 781 | table, write, buffer, lenp, ppos); |
| 782 | } |
| 783 | |
| 784 | /* |
| 785 | * /proc/sys/kernel/soft_watchdog |
| 786 | */ |
| 787 | int proc_soft_watchdog(struct ctl_table *table, int write, |
| 788 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 789 | { |
| 790 | return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, |
| 791 | table, write, buffer, lenp, ppos); |
| 792 | } |
| 793 | |
| 794 | /* |
| 795 | * /proc/sys/kernel/watchdog_thresh |
| 796 | */ |
| 797 | int proc_watchdog_thresh(struct ctl_table *table, int write, |
| 798 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 799 | { |
Joshua Hunt | a1ee193 | 2016-03-17 14:17:23 -0700 | [diff] [blame] | 800 | int err, old, new; |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 801 | |
Thomas Gleixner | b7a3498 | 2017-09-12 21:37:00 +0200 | [diff] [blame] | 802 | cpu_hotplug_disable(); |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 803 | mutex_lock(&watchdog_mutex); |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 804 | |
| 805 | old = ACCESS_ONCE(watchdog_thresh); |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 806 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 807 | |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 808 | if (err || !write) |
Michal Hocko | 359e6fa | 2013-09-24 15:27:29 -0700 | [diff] [blame] | 809 | goto out; |
Mandeep Singh Baines | e04ab2b | 2011-05-22 22:10:21 -0700 | [diff] [blame] | 810 | |
anish kumar | b66a2356 | 2013-03-12 14:44:08 -0400 | [diff] [blame] | 811 | /* |
Ulrich Obergfell | d283c64 | 2015-11-05 18:44:27 -0800 | [diff] [blame] | 812 | * Update the sample period. Restore on failure. |
anish kumar | b66a2356 | 2013-03-12 14:44:08 -0400 | [diff] [blame] | 813 | */ |
Joshua Hunt | a1ee193 | 2016-03-17 14:17:23 -0700 | [diff] [blame] | 814 | new = ACCESS_ONCE(watchdog_thresh); |
| 815 | if (old == new) |
| 816 | goto out; |
| 817 | |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 818 | set_sample_period(); |
| 819 | err = proc_watchdog_update(); |
Ulrich Obergfell | d283c64 | 2015-11-05 18:44:27 -0800 | [diff] [blame] | 820 | if (err) { |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 821 | watchdog_thresh = old; |
Ulrich Obergfell | d283c64 | 2015-11-05 18:44:27 -0800 | [diff] [blame] | 822 | set_sample_period(); |
| 823 | } |
Michal Hocko | 359e6fa | 2013-09-24 15:27:29 -0700 | [diff] [blame] | 824 | out: |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 825 | mutex_unlock(&watchdog_mutex); |
Thomas Gleixner | b7a3498 | 2017-09-12 21:37:00 +0200 | [diff] [blame] | 826 | cpu_hotplug_enable(); |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 827 | return err; |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 828 | } |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 829 | |
| 830 | /* |
| 831 | * The cpumask is the mask of possible cpus that the watchdog can run |
| 832 | * on, not the mask of cpus it is actually running on. This allows the |
| 833 | * user to specify a mask that will include cpus that have not yet |
| 834 | * been brought online, if desired. |
| 835 | */ |
| 836 | int proc_watchdog_cpumask(struct ctl_table *table, int write, |
| 837 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 838 | { |
| 839 | int err; |
| 840 | |
Thomas Gleixner | b7a3498 | 2017-09-12 21:37:00 +0200 | [diff] [blame] | 841 | cpu_hotplug_disable(); |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 842 | mutex_lock(&watchdog_mutex); |
Ulrich Obergfell | 8c073d2 | 2015-09-04 15:45:18 -0700 | [diff] [blame] | 843 | |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 844 | err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); |
| 845 | if (!err && write) { |
| 846 | /* Remove impossible cpus to keep sysctl output cleaner. */ |
| 847 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, |
| 848 | cpu_possible_mask); |
| 849 | |
| 850 | if (watchdog_running) { |
| 851 | /* |
| 852 | * Failure would be due to being unable to allocate |
| 853 | * a temporary cpumask, so we are likely not in a |
| 854 | * position to do much else to make things better. |
| 855 | */ |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 856 | if (watchdog_update_cpus() != 0) |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 857 | pr_err("cpumask update failed\n"); |
| 858 | } |
Nicholas Piggin | a10a842 | 2017-07-12 14:35:49 -0700 | [diff] [blame] | 859 | |
| 860 | watchdog_nmi_reconfigure(); |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 861 | __lockup_detector_cleanup(); |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 862 | } |
Thomas Gleixner | 5490125 | 2017-09-12 21:36:59 +0200 | [diff] [blame] | 863 | |
Thomas Gleixner | 946d197 | 2017-09-12 21:37:01 +0200 | [diff] [blame] | 864 | mutex_unlock(&watchdog_mutex); |
Thomas Gleixner | b7a3498 | 2017-09-12 21:37:00 +0200 | [diff] [blame] | 865 | cpu_hotplug_enable(); |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 866 | return err; |
| 867 | } |
| 868 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 869 | #endif /* CONFIG_SYSCTL */ |
| 870 | |
Peter Zijlstra | 004417a | 2010-11-25 18:38:29 +0100 | [diff] [blame] | 871 | void __init lockup_detector_init(void) |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 872 | { |
Chuansheng Liu | 0f34c40 | 2012-12-17 15:59:50 -0800 | [diff] [blame] | 873 | set_sample_period(); |
Frederic Weisbecker | b8900bc | 2013-06-06 15:42:53 +0200 | [diff] [blame] | 874 | |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 875 | #ifdef CONFIG_NO_HZ_FULL |
| 876 | if (tick_nohz_full_enabled()) { |
Frederic Weisbecker | 314b08ff | 2015-09-04 15:45:09 -0700 | [diff] [blame] | 877 | pr_info("Disabling watchdog on nohz_full cores by default\n"); |
| 878 | cpumask_copy(&watchdog_cpumask, housekeeping_mask); |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 879 | } else |
| 880 | cpumask_copy(&watchdog_cpumask, cpu_possible_mask); |
| 881 | #else |
| 882 | cpumask_copy(&watchdog_cpumask, cpu_possible_mask); |
| 883 | #endif |
| 884 | |
Ulrich Obergfell | 195daf6 | 2015-04-14 15:44:13 -0700 | [diff] [blame] | 885 | if (watchdog_enabled) |
Ulrich Obergfell | b2f57c3 | 2015-04-14 15:44:16 -0700 | [diff] [blame] | 886 | watchdog_enable_all_cpus(); |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 887 | } |