blob: 9d2512dd263c2cdb8c7048c8372d7fa7a9891020 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010010#include <linux/sched/signal.h>
Ingo Molnaref8bd772017-02-08 18:51:36 +010011#include <linux/sched/hotplug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010012#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/unistd.h>
14#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070015#include <linux/oom.h>
16#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040017#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070018#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/kthread.h>
20#include <linux/stop_machine.h>
Ingo Molnar81615b622006-06-26 00:24:32 -070021#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010023#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053024#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053025#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000026#include <linux/irq.h>
Thomas Gleixner941154b2017-09-12 21:37:04 +020027#include <linux/nmi.h>
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000028#include <linux/smpboot.h>
Richard Weinbergere6d49892016-08-18 14:57:17 +020029#include <linux/relay.h>
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +020030#include <linux/slab.h>
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +020031#include <linux/percpu-rwsem.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000032
Todd E Brandtbb3632c2014-06-06 05:40:17 -070033#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000034#define CREATE_TRACE_POINTS
35#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Thomas Gleixner38498a62012-04-20 13:05:44 +000037#include "smpboot.h"
38
Thomas Gleixnercff7d372016-02-26 18:43:28 +000039/**
40 * cpuhp_cpu_state - Per cpu hotplug state storage
41 * @state: The current cpu state
42 * @target: The target state
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000043 * @thread: Pointer to the hotplug thread
44 * @should_run: Thread should execute
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020045 * @rollback: Perform a rollback
Thomas Gleixnera7246322016-08-12 19:49:38 +020046 * @single: Single callback invocation
47 * @bringup: Single callback bringup or teardown selector
48 * @cb_state: The state for a single callback (install/uninstall)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000049 * @result: Result of the operation
Peter Zijlstra5ebe7742017-09-20 19:00:19 +020050 * @done_up: Signal completion to the issuer of the task for cpu-up
51 * @done_down: Signal completion to the issuer of the task for cpu-down
Thomas Gleixnercff7d372016-02-26 18:43:28 +000052 */
53struct cpuhp_cpu_state {
54 enum cpuhp_state state;
55 enum cpuhp_state target;
Peter Zijlstra1db49482017-09-20 19:00:21 +020056 enum cpuhp_state fail;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000057#ifdef CONFIG_SMP
58 struct task_struct *thread;
59 bool should_run;
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020060 bool rollback;
Thomas Gleixnera7246322016-08-12 19:49:38 +020061 bool single;
62 bool bringup;
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +020063 bool booted_once;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020064 struct hlist_node *node;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +020065 struct hlist_node *last;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000066 enum cpuhp_state cb_state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000067 int result;
Peter Zijlstra5ebe7742017-09-20 19:00:19 +020068 struct completion done_up;
69 struct completion done_down;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000070#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +000071};
72
Peter Zijlstra1db49482017-09-20 19:00:21 +020073static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
74 .fail = CPUHP_INVALID,
75};
Thomas Gleixnercff7d372016-02-26 18:43:28 +000076
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +020077#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020078static struct lockdep_map cpuhp_state_up_map =
79 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
80static struct lockdep_map cpuhp_state_down_map =
81 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
82
83
Mathieu Malaterre76dc6c02017-12-26 15:08:53 +010084static inline void cpuhp_lock_acquire(bool bringup)
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020085{
86 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
87}
88
Mathieu Malaterre76dc6c02017-12-26 15:08:53 +010089static inline void cpuhp_lock_release(bool bringup)
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020090{
91 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
92}
93#else
94
Mathieu Malaterre76dc6c02017-12-26 15:08:53 +010095static inline void cpuhp_lock_acquire(bool bringup) { }
96static inline void cpuhp_lock_release(bool bringup) { }
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020097
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +020098#endif
99
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000100/**
101 * cpuhp_step - Hotplug state machine step
102 * @name: Name of the step
103 * @startup: Startup function of the step
104 * @teardown: Teardown function of the step
105 * @skip_onerr: Do not invoke the functions on error rollback
106 * Will go away once the notifiers are gone
Thomas Gleixner757c9892016-02-26 18:43:32 +0000107 * @cant_stop: Bringup/teardown can't be stopped at this step
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000108 */
109struct cpuhp_step {
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200110 const char *name;
111 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200112 int (*single)(unsigned int cpu);
113 int (*multi)(unsigned int cpu,
114 struct hlist_node *node);
115 } startup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200116 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200117 int (*single)(unsigned int cpu);
118 int (*multi)(unsigned int cpu,
119 struct hlist_node *node);
120 } teardown;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200121 struct hlist_head list;
122 bool skip_onerr;
123 bool cant_stop;
124 bool multi_instance;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000125};
126
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +0000127static DEFINE_MUTEX(cpuhp_state_mutex);
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +0800128static struct cpuhp_step cpuhp_hp_states[];
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000129
Thomas Gleixnera7246322016-08-12 19:49:38 +0200130static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
131{
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +0800132 return cpuhp_hp_states + state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200133}
134
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000135/**
136 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
137 * @cpu: The cpu for which the callback should be invoked
Peter Zijlstra96abb962017-09-20 19:00:16 +0200138 * @state: The state to do callbacks for
Thomas Gleixnera7246322016-08-12 19:49:38 +0200139 * @bringup: True if the bringup callback should be invoked
Peter Zijlstra96abb962017-09-20 19:00:16 +0200140 * @node: For multi-instance, do a single entry callback for install/remove
141 * @lastp: For multi-instance rollback, remember how far we got
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000142 *
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200143 * Called from cpu hotplug and from the state register machinery.
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000144 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200145static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
Peter Zijlstra96abb962017-09-20 19:00:16 +0200146 bool bringup, struct hlist_node *node,
147 struct hlist_node **lastp)
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000148{
149 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200150 struct cpuhp_step *step = cpuhp_get_step(state);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200151 int (*cbm)(unsigned int cpu, struct hlist_node *node);
152 int (*cb)(unsigned int cpu);
153 int ret, cnt;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000154
Peter Zijlstra1db49482017-09-20 19:00:21 +0200155 if (st->fail == state) {
156 st->fail = CPUHP_INVALID;
157
158 if (!(bringup ? step->startup.single : step->teardown.single))
159 return 0;
160
161 return -EAGAIN;
162 }
163
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200164 if (!step->multi_instance) {
Peter Zijlstra96abb962017-09-20 19:00:16 +0200165 WARN_ON_ONCE(lastp && *lastp);
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200166 cb = bringup ? step->startup.single : step->teardown.single;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200167 if (!cb)
168 return 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200169 trace_cpuhp_enter(cpu, st->target, state, cb);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000170 ret = cb(cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200171 trace_cpuhp_exit(cpu, st->state, state, ret);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200172 return ret;
173 }
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200174 cbm = bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200175 if (!cbm)
176 return 0;
177
178 /* Single invocation for instance add/remove */
179 if (node) {
Peter Zijlstra96abb962017-09-20 19:00:16 +0200180 WARN_ON_ONCE(lastp && *lastp);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200181 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
182 ret = cbm(cpu, node);
183 trace_cpuhp_exit(cpu, st->state, state, ret);
184 return ret;
185 }
186
187 /* State transition. Invoke on all instances */
188 cnt = 0;
189 hlist_for_each(node, &step->list) {
Peter Zijlstra96abb962017-09-20 19:00:16 +0200190 if (lastp && node == *lastp)
191 break;
192
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200193 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
194 ret = cbm(cpu, node);
195 trace_cpuhp_exit(cpu, st->state, state, ret);
Peter Zijlstra96abb962017-09-20 19:00:16 +0200196 if (ret) {
197 if (!lastp)
198 goto err;
199
200 *lastp = node;
201 return ret;
202 }
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200203 cnt++;
204 }
Peter Zijlstra96abb962017-09-20 19:00:16 +0200205 if (lastp)
206 *lastp = NULL;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200207 return 0;
208err:
209 /* Rollback the instances if one failed */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200210 cbm = !bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200211 if (!cbm)
212 return ret;
213
214 hlist_for_each(node, &step->list) {
215 if (!cnt--)
216 break;
Peter Zijlstra724a8682017-09-20 19:00:18 +0200217
218 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
219 ret = cbm(cpu, node);
220 trace_cpuhp_exit(cpu, st->state, state, ret);
221 /*
222 * Rollback must not fail,
223 */
224 WARN_ON_ONCE(ret);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000225 }
226 return ret;
227}
228
Rusty Russell98a79d62008-12-13 21:19:41 +1030229#ifdef CONFIG_SMP
Arnd Bergmannfcb30292018-03-15 16:38:04 +0100230static bool cpuhp_is_ap_state(enum cpuhp_state state)
231{
232 /*
233 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
234 * purposes as that state is handled explicitly in cpu_down.
235 */
236 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
237}
238
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200239static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
240{
241 struct completion *done = bringup ? &st->done_up : &st->done_down;
242 wait_for_completion(done);
243}
244
245static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
246{
247 struct completion *done = bringup ? &st->done_up : &st->done_down;
248 complete(done);
249}
250
251/*
252 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
253 */
254static bool cpuhp_is_atomic_state(enum cpuhp_state state)
255{
256 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
257}
258
Rusty Russellb3199c02008-12-30 09:05:14 +1030259/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -0700260static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000261bool cpuhp_tasks_frozen;
262EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700264/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530265 * The following two APIs (cpu_maps_update_begin/done) must be used when
266 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700267 */
268void cpu_maps_update_begin(void)
269{
270 mutex_lock(&cpu_add_remove_lock);
271}
272
273void cpu_maps_update_done(void)
274{
275 mutex_unlock(&cpu_add_remove_lock);
276}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200278/*
279 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700280 * Should always be manipulated under cpu_add_remove_lock
281 */
282static int cpu_hotplug_disabled;
283
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700284#ifdef CONFIG_HOTPLUG_CPU
285
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200286DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530287
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200288void cpus_read_lock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800289{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200290 percpu_down_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800291}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200292EXPORT_SYMBOL_GPL(cpus_read_lock);
Ashok Raj90d45d12005-11-08 21:34:24 -0800293
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200294void cpus_read_unlock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800295{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200296 percpu_up_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800297}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200298EXPORT_SYMBOL_GPL(cpus_read_unlock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800299
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200300void cpus_write_lock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100301{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200302 percpu_down_write(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100303}
304
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200305void cpus_write_unlock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100306{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200307 percpu_up_write(&cpu_hotplug_lock);
308}
309
310void lockdep_assert_cpus_held(void)
311{
312 percpu_rwsem_assert_held(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100313}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700314
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700315/*
316 * Wait for currently running CPU hotplug operations to complete (if any) and
317 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
318 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
319 * hotplug path before performing hotplug operations. So acquiring that lock
320 * guarantees mutual exclusion from any currently running hotplug operations.
321 */
322void cpu_hotplug_disable(void)
323{
324 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700325 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700326 cpu_maps_update_done();
327}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700328EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700329
Lianwei Wang01b41152016-06-09 23:43:28 -0700330static void __cpu_hotplug_enable(void)
331{
332 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
333 return;
334 cpu_hotplug_disabled--;
335}
336
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700337void cpu_hotplug_enable(void)
338{
339 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700340 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700341 cpu_maps_update_done();
342}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700343EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600344#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700345
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200346#ifdef CONFIG_HOTPLUG_SMT
347enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
Konrad Rzeszutek Wilk26acfb62018-06-20 11:29:53 -0400348EXPORT_SYMBOL_GPL(cpu_smt_control);
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200349
Thomas Gleixnerbc2d8d262018-08-07 08:19:57 +0200350static bool cpu_smt_available __read_mostly;
351
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200352void __init cpu_smt_disable(bool force)
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200353{
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200354 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
355 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
356 return;
357
358 if (force) {
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200359 pr_info("SMT: Force disabled\n");
360 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200361 } else {
362 cpu_smt_control = CPU_SMT_DISABLED;
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200363 }
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200364}
365
Thomas Gleixnerfee0aed2018-07-13 16:23:24 +0200366/*
367 * The decision whether SMT is supported can only be done after the full
Thomas Gleixnerbc2d8d262018-08-07 08:19:57 +0200368 * CPU identification. Called from architecture code before non boot CPUs
369 * are brought up.
370 */
371void __init cpu_smt_check_topology_early(void)
372{
373 if (!topology_smt_supported())
374 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
375}
376
377/*
378 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
379 * brought online. This ensures the smt/l1tf sysfs entries are consistent
380 * with reality. cpu_smt_available is set to true during the bringup of non
381 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
382 * cpu_smt_control's previous setting.
Thomas Gleixnerfee0aed2018-07-13 16:23:24 +0200383 */
384void __init cpu_smt_check_topology(void)
385{
Thomas Gleixnerbc2d8d262018-08-07 08:19:57 +0200386 if (!cpu_smt_available)
Thomas Gleixnerfee0aed2018-07-13 16:23:24 +0200387 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
388}
389
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200390static int __init smt_cmdline_disable(char *str)
391{
392 cpu_smt_disable(str && !strcmp(str, "force"));
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200393 return 0;
394}
395early_param("nosmt", smt_cmdline_disable);
396
397static inline bool cpu_smt_allowed(unsigned int cpu)
398{
Thomas Gleixnerbc2d8d262018-08-07 08:19:57 +0200399 if (topology_is_primary_thread(cpu))
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200400 return true;
401
Thomas Gleixnerbc2d8d262018-08-07 08:19:57 +0200402 /*
403 * If the CPU is not a 'primary' thread and the booted_once bit is
404 * set then the processor has SMT support. Store this information
405 * for the late check of SMT support in cpu_smt_check_topology().
406 */
407 if (per_cpu(cpuhp_state, cpu).booted_once)
408 cpu_smt_available = true;
409
410 if (cpu_smt_control == CPU_SMT_ENABLED)
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200411 return true;
412
413 /*
414 * On x86 it's required to boot all logical CPUs at least once so
415 * that the init code can get a chance to set CR4.MCE on each
416 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
417 * core will shutdown the machine.
418 */
419 return !per_cpu(cpuhp_state, cpu).booted_once;
420}
421#else
422static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
423#endif
424
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200425static inline enum cpuhp_state
426cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
427{
428 enum cpuhp_state prev_state = st->state;
429
430 st->rollback = false;
431 st->last = NULL;
432
433 st->target = target;
434 st->single = false;
435 st->bringup = st->state < target;
436
437 return prev_state;
438}
439
440static inline void
441cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
442{
443 st->rollback = true;
444
445 /*
446 * If we have st->last we need to undo partial multi_instance of this
447 * state first. Otherwise start undo at the previous state.
448 */
449 if (!st->last) {
450 if (st->bringup)
451 st->state--;
452 else
453 st->state++;
454 }
455
456 st->target = prev_state;
457 st->bringup = !st->bringup;
458}
459
460/* Regular hotplug invocation of the AP hotplug thread */
461static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
462{
463 if (!st->single && st->state == st->target)
464 return;
465
466 st->result = 0;
467 /*
468 * Make sure the above stores are visible before should_run becomes
469 * true. Paired with the mb() above in cpuhp_thread_fun()
470 */
471 smp_mb();
472 st->should_run = true;
473 wake_up_process(st->thread);
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200474 wait_for_ap_thread(st, st->bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200475}
476
477static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
478{
479 enum cpuhp_state prev_state;
480 int ret;
481
482 prev_state = cpuhp_set_state(st, target);
483 __cpuhp_kick_ap(st);
484 if ((ret = st->result)) {
485 cpuhp_reset_state(st, prev_state);
486 __cpuhp_kick_ap(st);
487 }
488
489 return ret;
490}
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200491
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000492static int bringup_wait_for_ap(unsigned int cpu)
493{
494 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
495
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200496 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200497 wait_for_ap_thread(st, true);
Thomas Gleixnerdea1d0f2017-07-11 22:06:24 +0200498 if (WARN_ON_ONCE((!cpu_online(cpu))))
499 return -ECANCELED;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200500
501 /* Unpark the stopper thread and the hotplug thread of the target cpu */
502 stop_machine_unpark(cpu);
503 kthread_unpark(st->thread);
504
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200505 /*
506 * SMT soft disabling on X86 requires to bring the CPU out of the
507 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
508 * CPU marked itself as booted_once in cpu_notify_starting() so the
509 * cpu_smt_allowed() check will now return false if this is not the
510 * primary sibling.
511 */
512 if (!cpu_smt_allowed(cpu))
513 return -ECANCELED;
514
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200515 if (st->target <= CPUHP_AP_ONLINE_IDLE)
516 return 0;
517
518 return cpuhp_kick_ap(st, st->target);
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000519}
520
Thomas Gleixnerba997462016-02-26 18:43:24 +0000521static int bringup_cpu(unsigned int cpu)
522{
523 struct task_struct *idle = idle_thread_get(cpu);
524 int ret;
525
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400526 /*
527 * Some architectures have to walk the irq descriptors to
528 * setup the vector space for the cpu which comes online.
529 * Prevent irq alloc/free across the bringup.
530 */
531 irq_lock_sparse();
532
Thomas Gleixnerba997462016-02-26 18:43:24 +0000533 /* Arch-specific enabling code. */
534 ret = __cpu_up(cpu, idle);
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400535 irq_unlock_sparse();
Thomas Gleixner530e9b72016-12-21 20:19:53 +0100536 if (ret)
Thomas Gleixnerba997462016-02-26 18:43:24 +0000537 return ret;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200538 return bringup_wait_for_ap(cpu);
Thomas Gleixnerba997462016-02-26 18:43:24 +0000539}
540
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000541/*
542 * Hotplug state machine related functions
543 */
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000544
Thomas Gleixnera7246322016-08-12 19:49:38 +0200545static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000546{
547 for (st->state--; st->state > st->target; st->state--) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200548 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000549
550 if (!step->skip_onerr)
Peter Zijlstra96abb962017-09-20 19:00:16 +0200551 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000552 }
553}
554
555static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200556 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000557{
558 enum cpuhp_state prev_state = st->state;
559 int ret = 0;
560
561 while (st->state < target) {
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000562 st->state++;
Peter Zijlstra96abb962017-09-20 19:00:16 +0200563 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000564 if (ret) {
565 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200566 undo_cpu_up(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000567 break;
568 }
569 }
570 return ret;
571}
572
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000573/*
574 * The cpu hotplug threads manage the bringup and teardown of the cpus
575 */
576static void cpuhp_create(unsigned int cpu)
577{
578 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
579
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200580 init_completion(&st->done_up);
581 init_completion(&st->done_down);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000582}
583
584static int cpuhp_should_run(unsigned int cpu)
585{
586 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
587
588 return st->should_run;
589}
590
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000591/*
592 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
593 * callbacks when a state gets [un]installed at runtime.
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200594 *
595 * Each invocation of this function by the smpboot thread does a single AP
596 * state callback.
597 *
598 * It has 3 modes of operation:
599 * - single: runs st->cb_state
600 * - up: runs ++st->state, while st->state < st->target
601 * - down: runs st->state--, while st->state > st->target
602 *
603 * When complete or on error, should_run is cleared and the completion is fired.
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000604 */
605static void cpuhp_thread_fun(unsigned int cpu)
606{
607 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200608 bool bringup = st->bringup;
609 enum cpuhp_state state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000610
611 /*
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200612 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
613 * that if we see ->should_run we also see the rest of the state.
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000614 */
615 smp_mb();
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200616
617 if (WARN_ON_ONCE(!st->should_run))
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000618 return;
619
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200620 cpuhp_lock_acquire(bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200621
Thomas Gleixnera7246322016-08-12 19:49:38 +0200622 if (st->single) {
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200623 state = st->cb_state;
624 st->should_run = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000625 } else {
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200626 if (bringup) {
627 st->state++;
628 state = st->state;
629 st->should_run = (st->state < st->target);
630 WARN_ON_ONCE(st->state > st->target);
631 } else {
632 state = st->state;
633 st->state--;
634 st->should_run = (st->state > st->target);
635 WARN_ON_ONCE(st->state < st->target);
636 }
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000637 }
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200638
639 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
640
641 if (st->rollback) {
642 struct cpuhp_step *step = cpuhp_get_step(state);
643 if (step->skip_onerr)
644 goto next;
645 }
646
647 if (cpuhp_is_atomic_state(state)) {
648 local_irq_disable();
649 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
650 local_irq_enable();
651
652 /*
653 * STARTING/DYING must not fail!
654 */
655 WARN_ON_ONCE(st->result);
656 } else {
657 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
658 }
659
660 if (st->result) {
661 /*
662 * If we fail on a rollback, we're up a creek without no
663 * paddle, no way forward, no way back. We loose, thanks for
664 * playing.
665 */
666 WARN_ON_ONCE(st->rollback);
667 st->should_run = false;
668 }
669
670next:
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200671 cpuhp_lock_release(bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200672
673 if (!st->should_run)
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200674 complete_ap_thread(st, bringup);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000675}
676
677/* Invoke a single callback on a remote cpu */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200678static int
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200679cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
680 struct hlist_node *node)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000681{
682 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200683 int ret;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000684
685 if (!cpu_online(cpu))
686 return 0;
687
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200688 cpuhp_lock_acquire(false);
689 cpuhp_lock_release(false);
690
691 cpuhp_lock_acquire(true);
692 cpuhp_lock_release(true);
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200693
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000694 /*
695 * If we are up and running, use the hotplug thread. For early calls
696 * we invoke the thread function directly.
697 */
698 if (!st->thread)
Peter Zijlstra96abb962017-09-20 19:00:16 +0200699 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000700
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200701 st->rollback = false;
702 st->last = NULL;
703
704 st->node = node;
705 st->bringup = bringup;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000706 st->cb_state = state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200707 st->single = true;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200708
709 __cpuhp_kick_ap(st);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200710
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000711 /*
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200712 * If we failed and did a partial, do a rollback.
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000713 */
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200714 if ((ret = st->result) && st->last) {
715 st->rollback = true;
716 st->bringup = !bringup;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000717
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200718 __cpuhp_kick_ap(st);
719 }
720
Thomas Gleixner1f7c70d2017-10-21 16:06:52 +0200721 /*
722 * Clean up the leftovers so the next hotplug operation wont use stale
723 * data.
724 */
725 st->node = st->last = NULL;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200726 return ret;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000727}
728
729static int cpuhp_kick_ap_work(unsigned int cpu)
730{
731 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200732 enum cpuhp_state prev_state = st->state;
733 int ret;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000734
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200735 cpuhp_lock_acquire(false);
736 cpuhp_lock_release(false);
737
738 cpuhp_lock_acquire(true);
739 cpuhp_lock_release(true);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200740
741 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
742 ret = cpuhp_kick_ap(st, st->target);
743 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
744
745 return ret;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000746}
747
748static struct smp_hotplug_thread cpuhp_threads = {
749 .store = &cpuhp_state.thread,
750 .create = &cpuhp_create,
751 .thread_should_run = cpuhp_should_run,
752 .thread_fn = cpuhp_thread_fun,
753 .thread_comm = "cpuhp/%u",
754 .selfparking = true,
755};
756
757void __init cpuhp_threads_init(void)
758{
759 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
760 kthread_unpark(this_cpu_read(cpuhp_state.thread));
761}
762
Michal Hocko777c6e02016-12-07 14:54:38 +0100763#ifdef CONFIG_HOTPLUG_CPU
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700764/**
765 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
766 * @cpu: a CPU id
767 *
768 * This function walks all processes, finds a valid mm struct for each one and
769 * then clears a corresponding bit in mm's cpumask. While this all sounds
770 * trivial, there are various non-obvious corner cases, which this function
771 * tries to solve in a safe manner.
772 *
773 * Also note that the function uses a somewhat relaxed locking scheme, so it may
774 * be called only for an already offlined CPU.
775 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700776void clear_tasks_mm_cpumask(int cpu)
777{
778 struct task_struct *p;
779
780 /*
781 * This function is called after the cpu is taken down and marked
782 * offline, so its not like new tasks will ever get this cpu set in
783 * their mm mask. -- Peter Zijlstra
784 * Thus, we may use rcu_read_lock() here, instead of grabbing
785 * full-fledged tasklist_lock.
786 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700787 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700788 rcu_read_lock();
789 for_each_process(p) {
790 struct task_struct *t;
791
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700792 /*
793 * Main thread might exit, but other threads may still have
794 * a valid mm. Find one.
795 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700796 t = find_lock_task_mm(p);
797 if (!t)
798 continue;
799 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
800 task_unlock(t);
801 }
802 rcu_read_unlock();
803}
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200806static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000808 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
809 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000810 int err, cpu = smp_processor_id();
Peter Zijlstra724a8682017-09-20 19:00:18 +0200811 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 /* Ensure this CPU doesn't handle any more interrupts. */
814 err = __cpu_disable();
815 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700816 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Thomas Gleixnera7246322016-08-12 19:49:38 +0200818 /*
819 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
820 * do this step again.
821 */
822 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
823 st->state--;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000824 /* Invoke the former CPU_DYING callbacks */
Peter Zijlstra724a8682017-09-20 19:00:18 +0200825 for (; st->state > target; st->state--) {
826 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
827 /*
828 * DYING must not fail!
829 */
830 WARN_ON_ONCE(ret);
831 }
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000832
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200833 /* Give up timekeeping duties */
834 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000835 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000836 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700837 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
839
Thomas Gleixner98458172016-02-26 18:43:25 +0000840static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000842 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000843 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100845 /* Park the smpboot threads */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000846 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
847
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200848 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000849 * Prevent irq alloc/free while the dying cpu reorganizes the
850 * interrupt affinities.
851 */
852 irq_lock_sparse();
853
854 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200855 * So now all preempt/rcu users must observe !cpu_active().
856 */
Sebastian Andrzej Siewior210e2132017-05-24 10:15:28 +0200857 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500858 if (err) {
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200859 /* CPU refused to die */
Thomas Gleixnera8994182015-07-05 17:12:30 +0000860 irq_unlock_sparse();
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200861 /* Unpark the hotplug thread so we can rollback there */
862 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner98458172016-02-26 18:43:25 +0000863 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700864 }
Rusty Russell04321582008-07-28 12:16:29 -0500865 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100867 /*
Brendan Jackman5b1ead62017-12-06 10:59:11 +0000868 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
869 * all runnable tasks from the CPU, there's only the idle task left now
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100870 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100871 *
872 * Wait for the stop thread to go away.
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100873 */
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200874 wait_for_ap_thread(st, false);
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000875 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Thomas Gleixnera8994182015-07-05 17:12:30 +0000877 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
878 irq_unlock_sparse();
879
Preeti U Murthy345527b2015-03-30 14:59:19 +0530880 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 /* This actually kills the CPU. */
882 __cpu_die(cpu);
883
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200884 tick_cleanup_dead_cpu(cpu);
Paul E. McKenneya58163d2017-06-20 12:11:34 -0700885 rcutree_migrate_callbacks(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000886 return 0;
887}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100889static void cpuhp_complete_idle_dead(void *arg)
890{
891 struct cpuhp_cpu_state *st = arg;
892
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200893 complete_ap_thread(st, false);
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100894}
895
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000896void cpuhp_report_idle_dead(void)
897{
898 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
899
900 BUG_ON(st->state != CPUHP_AP_OFFLINE);
Thomas Gleixner27d50c72016-02-26 18:43:44 +0000901 rcu_report_dead(smp_processor_id());
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100902 st->state = CPUHP_AP_IDLE_DEAD;
903 /*
904 * We cannot call complete after rcu_report_dead() so we delegate it
905 * to an online cpu.
906 */
907 smp_call_function_single(cpumask_first(cpu_online_mask),
908 cpuhp_complete_idle_dead, st, 0);
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000909}
910
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200911static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
912{
913 for (st->state++; st->state < st->target; st->state++) {
914 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000915
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200916 if (!step->skip_onerr)
917 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
918 }
919}
920
921static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
922 enum cpuhp_state target)
923{
924 enum cpuhp_state prev_state = st->state;
925 int ret = 0;
926
927 for (; st->state > target; st->state--) {
928 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
929 if (ret) {
930 st->target = prev_state;
931 undo_cpu_down(cpu, st);
932 break;
933 }
934 }
935 return ret;
936}
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000937
Thomas Gleixner98458172016-02-26 18:43:25 +0000938/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000939static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
940 enum cpuhp_state target)
Thomas Gleixner98458172016-02-26 18:43:25 +0000941{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000942 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
943 int prev_state, ret = 0;
Thomas Gleixner98458172016-02-26 18:43:25 +0000944
945 if (num_online_cpus() == 1)
946 return -EBUSY;
947
Thomas Gleixner757c9892016-02-26 18:43:32 +0000948 if (!cpu_present(cpu))
Thomas Gleixner98458172016-02-26 18:43:25 +0000949 return -EINVAL;
950
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200951 cpus_write_lock();
Thomas Gleixner98458172016-02-26 18:43:25 +0000952
953 cpuhp_tasks_frozen = tasks_frozen;
954
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200955 prev_state = cpuhp_set_state(st, target);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000956 /*
957 * If the current CPU state is in the range of the AP hotplug thread,
958 * then we need to kick the thread.
959 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000960 if (st->state > CPUHP_TEARDOWN_CPU) {
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200961 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000962 ret = cpuhp_kick_ap_work(cpu);
963 /*
964 * The AP side has done the error rollback already. Just
965 * return the error code..
966 */
967 if (ret)
968 goto out;
969
970 /*
971 * We might have stopped still in the range of the AP hotplug
972 * thread. Nothing to do anymore.
973 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000974 if (st->state > CPUHP_TEARDOWN_CPU)
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000975 goto out;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200976
977 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000978 }
979 /*
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000980 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000981 * to do the further cleanups.
982 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200983 ret = cpuhp_down_callbacks(cpu, st, target);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200984 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200985 cpuhp_reset_state(st, prev_state);
986 __cpuhp_kick_ap(st);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200987 }
Thomas Gleixner98458172016-02-26 18:43:25 +0000988
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000989out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200990 cpus_write_unlock();
Thomas Gleixner941154b2017-09-12 21:37:04 +0200991 /*
992 * Do post unplug cleanup. This is still protected against
993 * concurrent CPU hotplug via cpu_add_remove_lock.
994 */
995 lockup_detector_cleanup();
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000996 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700997}
998
Thomas Gleixnercc1fe212018-05-29 17:49:05 +0200999static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1000{
1001 if (cpu_hotplug_disabled)
1002 return -EBUSY;
1003 return _cpu_down(cpu, 0, target);
1004}
1005
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001006static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001007{
Heiko Carstens9ea09af2008-12-22 12:36:30 +01001008 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001009
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001010 cpu_maps_update_begin();
Thomas Gleixnercc1fe212018-05-29 17:49:05 +02001011 err = cpu_down_maps_locked(cpu, target);
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001012 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 return err;
1014}
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001015
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001016int cpu_down(unsigned int cpu)
1017{
1018 return do_cpu_down(cpu, CPUHP_OFFLINE);
1019}
Zhang Ruib62b8ef2008-04-29 02:35:56 -04001020EXPORT_SYMBOL(cpu_down);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001021
1022#else
1023#define takedown_cpu NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024#endif /*CONFIG_HOTPLUG_CPU*/
1025
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001026/**
Thomas Gleixneree1e7142016-08-18 14:57:16 +02001027 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001028 * @cpu: cpu that just started
1029 *
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001030 * It must be called by the arch code on the new cpu, before the new cpu
1031 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1032 */
1033void notify_cpu_starting(unsigned int cpu)
1034{
1035 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1036 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
Peter Zijlstra724a8682017-09-20 19:00:18 +02001037 int ret;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001038
Sebastian Andrzej Siewior0c6d4572016-08-17 14:21:04 +02001039 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +02001040 st->booted_once = true;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001041 while (st->state < target) {
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001042 st->state++;
Peter Zijlstra724a8682017-09-20 19:00:18 +02001043 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1044 /*
1045 * STARTING must not fail!
1046 */
1047 WARN_ON_ONCE(ret);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001048 }
1049}
1050
Thomas Gleixner949338e2016-02-26 18:43:35 +00001051/*
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +02001052 * Called from the idle task. Wake up the controlling task which brings the
1053 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1054 * the rest of the online bringup to the hotplug thread.
Thomas Gleixner949338e2016-02-26 18:43:35 +00001055 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001056void cpuhp_online_idle(enum cpuhp_state state)
Thomas Gleixner949338e2016-02-26 18:43:35 +00001057{
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001058 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001059
1060 /* Happens for the boot cpu */
1061 if (state != CPUHP_AP_ONLINE_IDLE)
1062 return;
1063
1064 st->state = CPUHP_AP_ONLINE_IDLE;
Peter Zijlstra5ebe7742017-09-20 19:00:19 +02001065 complete_ap_thread(st, true);
Thomas Gleixner949338e2016-02-26 18:43:35 +00001066}
1067
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001068/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001069static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001071 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001072 struct task_struct *idle;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +00001073 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001075 cpus_write_lock();
Thomas Gleixner38498a62012-04-20 13:05:44 +00001076
Thomas Gleixner757c9892016-02-26 18:43:32 +00001077 if (!cpu_present(cpu)) {
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +02001078 ret = -EINVAL;
1079 goto out;
1080 }
1081
Thomas Gleixner757c9892016-02-26 18:43:32 +00001082 /*
1083 * The caller of do_cpu_up might have raced with another
1084 * caller. Ignore it for now.
1085 */
1086 if (st->state >= target)
Thomas Gleixner38498a62012-04-20 13:05:44 +00001087 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001088
1089 if (st->state == CPUHP_OFFLINE) {
1090 /* Let it fail before we try to bring the cpu up */
1091 idle = idle_thread_get(cpu);
1092 if (IS_ERR(idle)) {
1093 ret = PTR_ERR(idle);
1094 goto out;
1095 }
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001096 }
Thomas Gleixner38498a62012-04-20 13:05:44 +00001097
Thomas Gleixnerba997462016-02-26 18:43:24 +00001098 cpuhp_tasks_frozen = tasks_frozen;
1099
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001100 cpuhp_set_state(st, target);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001101 /*
1102 * If the current CPU state is in the range of the AP hotplug thread,
1103 * then we need to kick the thread once more.
1104 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001105 if (st->state > CPUHP_BRINGUP_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001106 ret = cpuhp_kick_ap_work(cpu);
1107 /*
1108 * The AP side has done the error rollback already. Just
1109 * return the error code..
1110 */
1111 if (ret)
1112 goto out;
1113 }
1114
1115 /*
1116 * Try to reach the target state. We max out on the BP at
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001117 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001118 * responsible for bringing it up to the target state.
1119 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001120 target = min((int)target, CPUHP_BRINGUP_CPU);
Thomas Gleixnera7246322016-08-12 19:49:38 +02001121 ret = cpuhp_up_callbacks(cpu, st, target);
Thomas Gleixner38498a62012-04-20 13:05:44 +00001122out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001123 cpus_write_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 return ret;
1125}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001126
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001127static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001128{
1129 int err = 0;
minskey guocf234222010-05-24 14:32:41 -07001130
Rusty Russelle0b582e2009-01-01 10:12:28 +10301131 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001132 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1133 cpu);
Chen Gong87d5e0232010-03-05 13:42:38 -08001134#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -07001135 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -07001136#endif
1137 return -EINVAL;
1138 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001139
Toshi Kani01b0f192013-11-12 15:07:25 -08001140 err = try_online_node(cpu_to_node(cpu));
1141 if (err)
1142 return err;
minskey guocf234222010-05-24 14:32:41 -07001143
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001144 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001145
Max Krasnyanskye761b772008-07-15 04:43:49 -07001146 if (cpu_hotplug_disabled) {
1147 err = -EBUSY;
1148 goto out;
1149 }
Thomas Gleixner05736e42018-05-29 17:48:27 +02001150 if (!cpu_smt_allowed(cpu)) {
1151 err = -EPERM;
1152 goto out;
1153 }
Max Krasnyanskye761b772008-07-15 04:43:49 -07001154
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001155 err = _cpu_up(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -07001156out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001157 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001158 return err;
1159}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001160
1161int cpu_up(unsigned int cpu)
1162{
1163 return do_cpu_up(cpu, CPUHP_ONLINE);
1164}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -08001165EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001166
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001167#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +10301168static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001169
James Morsed391e552016-08-17 13:50:25 +01001170int freeze_secondary_cpus(int primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001171{
James Morsed391e552016-08-17 13:50:25 +01001172 int cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001173
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001174 cpu_maps_update_begin();
James Morsed391e552016-08-17 13:50:25 +01001175 if (!cpu_online(primary))
1176 primary = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +01001177 /*
1178 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001179 * with the userspace trying to use the CPU hotplug at the same time
1180 */
Rusty Russelle0b582e2009-01-01 10:12:28 +10301181 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01001182
Fabian Frederick84117da2014-06-04 16:11:17 -07001183 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001184 for_each_online_cpu(cpu) {
James Morsed391e552016-08-17 13:50:25 +01001185 if (cpu == primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001186 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001187 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001188 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001189 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -06001190 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301191 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -06001192 else {
Fabian Frederick84117da2014-06-04 16:11:17 -07001193 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001194 break;
1195 }
1196 }
Joseph Cihula86886e52009-06-30 19:31:07 -07001197
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001198 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001199 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001200 else
Fabian Frederick84117da2014-06-04 16:11:17 -07001201 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001202
1203 /*
1204 * Make sure the CPUs won't be enabled by someone else. We need to do
1205 * this even in case of failure as all disable_nonboot_cpus() users are
1206 * supposed to do enable_nonboot_cpus() on the failure path.
1207 */
1208 cpu_hotplug_disabled++;
1209
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001210 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001211 return error;
1212}
1213
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001214void __weak arch_enable_nonboot_cpus_begin(void)
1215{
1216}
1217
1218void __weak arch_enable_nonboot_cpus_end(void)
1219{
1220}
1221
Mathias Krause71cf5ae2015-07-19 20:06:22 +02001222void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001223{
1224 int cpu, error;
1225
1226 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001227 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -07001228 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +10301229 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001230 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001231
Fabian Frederick84117da2014-06-04 16:11:17 -07001232 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001233
1234 arch_enable_nonboot_cpus_begin();
1235
Rusty Russelle0b582e2009-01-01 10:12:28 +10301236 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001237 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001238 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001239 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001240 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001241 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001242 continue;
1243 }
Fabian Frederick84117da2014-06-04 16:11:17 -07001244 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001245 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001246
1247 arch_enable_nonboot_cpus_end();
1248
Rusty Russelle0b582e2009-01-01 10:12:28 +10301249 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001250out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001251 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001252}
Rusty Russelle0b582e2009-01-01 10:12:28 +10301253
Fenghua Yud7268a32011-11-15 21:59:31 +01001254static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301255{
1256 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1257 return -ENOMEM;
1258 return 0;
1259}
1260core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001261
1262/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001263 * When callbacks for CPU hotplug notifications are being executed, we must
1264 * ensure that the state of the system with respect to the tasks being frozen
1265 * or not, as reported by the notification, remains unchanged *throughout the
1266 * duration* of the execution of the callbacks.
1267 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1268 *
1269 * This synchronization is implemented by mutually excluding regular CPU
1270 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1271 * Hibernate notifications.
1272 */
1273static int
1274cpu_hotplug_pm_callback(struct notifier_block *nb,
1275 unsigned long action, void *ptr)
1276{
1277 switch (action) {
1278
1279 case PM_SUSPEND_PREPARE:
1280 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001281 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001282 break;
1283
1284 case PM_POST_SUSPEND:
1285 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001286 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001287 break;
1288
1289 default:
1290 return NOTIFY_DONE;
1291 }
1292
1293 return NOTIFY_OK;
1294}
1295
1296
Fenghua Yud7268a32011-11-15 21:59:31 +01001297static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001298{
Fenghua Yu6e32d472012-11-13 11:32:43 -08001299 /*
1300 * cpu_hotplug_pm_callback has higher priority than x86
1301 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1302 * to disable cpu hotplug to avoid cpu hotplug race.
1303 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001304 pm_notifier(cpu_hotplug_pm_callback, 0);
1305 return 0;
1306}
1307core_initcall(cpu_hotplug_pm_sync_init);
1308
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001309#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001310
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01001311int __boot_cpu_id;
1312
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001313#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -07001314
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001315/* Boot processor state steps */
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001316static struct cpuhp_step cpuhp_hp_states[] = {
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001317 [CPUHP_OFFLINE] = {
1318 .name = "offline",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001319 .startup.single = NULL,
1320 .teardown.single = NULL,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001321 },
1322#ifdef CONFIG_SMP
1323 [CPUHP_CREATE_THREADS]= {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001324 .name = "threads:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001325 .startup.single = smpboot_create_threads,
1326 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001327 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001328 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001329 [CPUHP_PERF_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001330 .name = "perf:prepare",
1331 .startup.single = perf_event_init_cpu,
1332 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001333 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001334 [CPUHP_WORKQUEUE_PREP] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001335 .name = "workqueue:prepare",
1336 .startup.single = workqueue_prepare_cpu,
1337 .teardown.single = NULL,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001338 },
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001339 [CPUHP_HRTIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001340 .name = "hrtimers:prepare",
1341 .startup.single = hrtimers_prepare_cpu,
1342 .teardown.single = hrtimers_dead_cpu,
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001343 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001344 [CPUHP_SMPCFD_PREPARE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001345 .name = "smpcfd:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001346 .startup.single = smpcfd_prepare_cpu,
1347 .teardown.single = smpcfd_dead_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001348 },
Richard Weinbergere6d49892016-08-18 14:57:17 +02001349 [CPUHP_RELAY_PREPARE] = {
1350 .name = "relay:prepare",
1351 .startup.single = relay_prepare_cpu,
1352 .teardown.single = NULL,
1353 },
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +02001354 [CPUHP_SLAB_PREPARE] = {
1355 .name = "slab:prepare",
1356 .startup.single = slab_prepare_cpu,
1357 .teardown.single = slab_dead_cpu,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001358 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001359 [CPUHP_RCUTREE_PREP] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001360 .name = "RCU/tree:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001361 .startup.single = rcutree_prepare_cpu,
1362 .teardown.single = rcutree_dead_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001363 },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001364 /*
Richard Cochran4fae16d2016-07-27 11:08:18 +02001365 * On the tear-down path, timers_dead_cpu() must be invoked
1366 * before blk_mq_queue_reinit_notify() from notify_dead(),
1367 * otherwise a RCU stall occurs.
1368 */
Thomas Gleixner26456f82017-12-27 21:37:25 +01001369 [CPUHP_TIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001370 .name = "timers:dead",
Thomas Gleixner26456f82017-12-27 21:37:25 +01001371 .startup.single = timers_prepare_cpu,
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001372 .teardown.single = timers_dead_cpu,
Richard Cochran4fae16d2016-07-27 11:08:18 +02001373 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001374 /* Kicks the plugged cpu into life */
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001375 [CPUHP_BRINGUP_CPU] = {
1376 .name = "cpu:bringup",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001377 .startup.single = bringup_cpu,
1378 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001379 .cant_stop = true,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001380 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001381 /* Final state before CPU kills itself */
1382 [CPUHP_AP_IDLE_DEAD] = {
1383 .name = "idle:dead",
1384 },
1385 /*
1386 * Last state before CPU enters the idle loop to die. Transient state
1387 * for synchronization.
1388 */
1389 [CPUHP_AP_OFFLINE] = {
1390 .name = "ap:offline",
1391 .cant_stop = true,
1392 },
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001393 /* First state is scheduler control. Interrupts are disabled */
1394 [CPUHP_AP_SCHED_STARTING] = {
1395 .name = "sched:starting",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001396 .startup.single = sched_cpu_starting,
1397 .teardown.single = sched_cpu_dying,
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001398 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001399 [CPUHP_AP_RCUTREE_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001400 .name = "RCU/tree:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001401 .startup.single = NULL,
1402 .teardown.single = rcutree_dying_cpu,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001403 },
Lai Jiangshan46febd32017-11-28 21:19:53 +08001404 [CPUHP_AP_SMPCFD_DYING] = {
1405 .name = "smpcfd:dying",
1406 .startup.single = NULL,
1407 .teardown.single = smpcfd_dying_cpu,
1408 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001409 /* Entry state on starting. Interrupts enabled from here on. Transient
1410 * state for synchronsization */
1411 [CPUHP_AP_ONLINE] = {
1412 .name = "ap:online",
1413 },
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001414 /*
1415 * Handled on controll processor until the plugged processor manages
1416 * this itself.
1417 */
1418 [CPUHP_TEARDOWN_CPU] = {
1419 .name = "cpu:teardown",
1420 .startup.single = NULL,
1421 .teardown.single = takedown_cpu,
1422 .cant_stop = true,
1423 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001424 /* Handle smpboot threads park/unpark */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001425 [CPUHP_AP_SMPBOOT_THREADS] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001426 .name = "smpboot/threads:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001427 .startup.single = smpboot_unpark_threads,
Thomas Gleixnerc4de6562018-05-29 19:05:25 +02001428 .teardown.single = smpboot_park_threads,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001429 },
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +02001430 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1431 .name = "irq/affinity:online",
1432 .startup.single = irq_affinity_online_cpu,
1433 .teardown.single = NULL,
1434 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001435 [CPUHP_AP_PERF_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001436 .name = "perf:online",
1437 .startup.single = perf_event_init_cpu,
1438 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001439 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001440 [CPUHP_AP_WORKQUEUE_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001441 .name = "workqueue:online",
1442 .startup.single = workqueue_online_cpu,
1443 .teardown.single = workqueue_offline_cpu,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001444 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001445 [CPUHP_AP_RCUTREE_ONLINE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001446 .name = "RCU/tree:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001447 .startup.single = rcutree_online_cpu,
1448 .teardown.single = rcutree_offline_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001449 },
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001450#endif
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001451 /*
1452 * The dynamically registered state space is here
1453 */
1454
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001455#ifdef CONFIG_SMP
1456 /* Last state is scheduler control setting the cpu active */
1457 [CPUHP_AP_ACTIVE] = {
1458 .name = "sched:active",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001459 .startup.single = sched_cpu_activate,
1460 .teardown.single = sched_cpu_deactivate,
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001461 },
1462#endif
1463
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001464 /* CPU is fully up and running. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001465 [CPUHP_ONLINE] = {
1466 .name = "online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001467 .startup.single = NULL,
1468 .teardown.single = NULL,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001469 },
1470};
1471
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001472/* Sanity check for callbacks */
1473static int cpuhp_cb_check(enum cpuhp_state state)
1474{
1475 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1476 return -EINVAL;
1477 return 0;
1478}
1479
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001480/*
1481 * Returns a free for dynamic slot assignment of the Online state. The states
1482 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1483 * by having no name assigned.
1484 */
1485static int cpuhp_reserve_state(enum cpuhp_state state)
1486{
Thomas Gleixner4205e472017-01-10 14:01:05 +01001487 enum cpuhp_state i, end;
1488 struct cpuhp_step *step;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001489
Thomas Gleixner4205e472017-01-10 14:01:05 +01001490 switch (state) {
1491 case CPUHP_AP_ONLINE_DYN:
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001492 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
Thomas Gleixner4205e472017-01-10 14:01:05 +01001493 end = CPUHP_AP_ONLINE_DYN_END;
1494 break;
1495 case CPUHP_BP_PREPARE_DYN:
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001496 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
Thomas Gleixner4205e472017-01-10 14:01:05 +01001497 end = CPUHP_BP_PREPARE_DYN_END;
1498 break;
1499 default:
1500 return -EINVAL;
1501 }
1502
1503 for (i = state; i <= end; i++, step++) {
1504 if (!step->name)
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001505 return i;
1506 }
1507 WARN(1, "No more dynamic states available for CPU hotplug\n");
1508 return -ENOSPC;
1509}
1510
1511static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1512 int (*startup)(unsigned int cpu),
1513 int (*teardown)(unsigned int cpu),
1514 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001515{
1516 /* (Un)Install the callbacks for further cpu hotplug operations */
1517 struct cpuhp_step *sp;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001518 int ret = 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001519
Ethan Barnes0c96b272017-07-19 22:36:00 +00001520 /*
1521 * If name is NULL, then the state gets removed.
1522 *
1523 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1524 * the first allocation from these dynamic ranges, so the removal
1525 * would trigger a new allocation and clear the wrong (already
1526 * empty) state, leaving the callbacks of the to be cleared state
1527 * dangling, which causes wreckage on the next hotplug operation.
1528 */
1529 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1530 state == CPUHP_BP_PREPARE_DYN)) {
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001531 ret = cpuhp_reserve_state(state);
1532 if (ret < 0)
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001533 return ret;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001534 state = ret;
1535 }
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001536 sp = cpuhp_get_step(state);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001537 if (name && sp->name)
1538 return -EBUSY;
1539
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001540 sp->startup.single = startup;
1541 sp->teardown.single = teardown;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001542 sp->name = name;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001543 sp->multi_instance = multi_instance;
1544 INIT_HLIST_HEAD(&sp->list);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001545 return ret;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001546}
1547
1548static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1549{
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001550 return cpuhp_get_step(state)->teardown.single;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001551}
1552
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001553/*
1554 * Call the startup/teardown function for a step either on the AP or
1555 * on the current CPU.
1556 */
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001557static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1558 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001559{
Thomas Gleixnera7246322016-08-12 19:49:38 +02001560 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001561 int ret;
1562
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001563 /*
1564 * If there's nothing to do, we done.
1565 * Relies on the union for multi_instance.
1566 */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001567 if ((bringup && !sp->startup.single) ||
1568 (!bringup && !sp->teardown.single))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001569 return 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001570 /*
1571 * The non AP bound callbacks can fail on bringup. On teardown
1572 * e.g. module removal we crash for now.
1573 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001574#ifdef CONFIG_SMP
1575 if (cpuhp_is_ap_state(state))
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001576 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001577 else
Peter Zijlstra96abb962017-09-20 19:00:16 +02001578 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001579#else
Peter Zijlstra96abb962017-09-20 19:00:16 +02001580 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001581#endif
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001582 BUG_ON(ret && !bringup);
1583 return ret;
1584}
1585
1586/*
1587 * Called from __cpuhp_setup_state on a recoverable failure.
1588 *
1589 * Note: The teardown callbacks for rollback are not allowed to fail!
1590 */
1591static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001592 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001593{
1594 int cpu;
1595
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001596 /* Roll back the already executed steps on the other cpus */
1597 for_each_present_cpu(cpu) {
1598 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1599 int cpustate = st->state;
1600
1601 if (cpu >= failedcpu)
1602 break;
1603
1604 /* Did we invoke the startup call on that cpu ? */
1605 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001606 cpuhp_issue_call(cpu, state, false, node);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001607 }
1608}
1609
Thomas Gleixner9805c672017-05-24 10:15:15 +02001610int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1611 struct hlist_node *node,
1612 bool invoke)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001613{
1614 struct cpuhp_step *sp;
1615 int cpu;
1616 int ret;
1617
Thomas Gleixner9805c672017-05-24 10:15:15 +02001618 lockdep_assert_cpus_held();
1619
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001620 sp = cpuhp_get_step(state);
1621 if (sp->multi_instance == false)
1622 return -EINVAL;
1623
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001624 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001625
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001626 if (!invoke || !sp->startup.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001627 goto add_node;
1628
1629 /*
1630 * Try to call the startup callback for each present cpu
1631 * depending on the hotplug state of the cpu.
1632 */
1633 for_each_present_cpu(cpu) {
1634 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1635 int cpustate = st->state;
1636
1637 if (cpustate < state)
1638 continue;
1639
1640 ret = cpuhp_issue_call(cpu, state, true, node);
1641 if (ret) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001642 if (sp->teardown.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001643 cpuhp_rollback_install(cpu, state, node);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001644 goto unlock;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001645 }
1646 }
1647add_node:
1648 ret = 0;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001649 hlist_add_head(node, &sp->list);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001650unlock:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001651 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner9805c672017-05-24 10:15:15 +02001652 return ret;
1653}
1654
1655int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1656 bool invoke)
1657{
1658 int ret;
1659
1660 cpus_read_lock();
1661 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001662 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001663 return ret;
1664}
1665EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1666
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001667/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001668 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001669 * @state: The state to setup
1670 * @invoke: If true, the startup function is invoked for cpus where
1671 * cpu state >= @state
1672 * @startup: startup callback function
1673 * @teardown: teardown callback function
1674 * @multi_instance: State is set up for multiple instances which get
1675 * added afterwards.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001676 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001677 * The caller needs to hold cpus read locked while calling this function.
Boris Ostrovsky512f0982016-12-15 10:00:57 -05001678 * Returns:
1679 * On success:
1680 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1681 * 0 for all other states
1682 * On failure: proper (negative) error code
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001683 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001684int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1685 const char *name, bool invoke,
1686 int (*startup)(unsigned int cpu),
1687 int (*teardown)(unsigned int cpu),
1688 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001689{
1690 int cpu, ret = 0;
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001691 bool dynstate;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001692
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001693 lockdep_assert_cpus_held();
1694
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001695 if (cpuhp_cb_check(state) || !name)
1696 return -EINVAL;
1697
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001698 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001699
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001700 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1701 multi_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001702
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001703 dynstate = state == CPUHP_AP_ONLINE_DYN;
1704 if (ret > 0 && dynstate) {
1705 state = ret;
1706 ret = 0;
1707 }
1708
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001709 if (ret || !invoke || !startup)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001710 goto out;
1711
1712 /*
1713 * Try to call the startup callback for each present cpu
1714 * depending on the hotplug state of the cpu.
1715 */
1716 for_each_present_cpu(cpu) {
1717 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1718 int cpustate = st->state;
1719
1720 if (cpustate < state)
1721 continue;
1722
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001723 ret = cpuhp_issue_call(cpu, state, true, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001724 if (ret) {
Thomas Gleixnera7246322016-08-12 19:49:38 +02001725 if (teardown)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001726 cpuhp_rollback_install(cpu, state, NULL);
1727 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001728 goto out;
1729 }
1730 }
1731out:
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001732 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001733 /*
1734 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1735 * dynamically allocated state in case of success.
1736 */
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001737 if (!ret && dynstate)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001738 return state;
1739 return ret;
1740}
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001741EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1742
1743int __cpuhp_setup_state(enum cpuhp_state state,
1744 const char *name, bool invoke,
1745 int (*startup)(unsigned int cpu),
1746 int (*teardown)(unsigned int cpu),
1747 bool multi_instance)
1748{
1749 int ret;
1750
1751 cpus_read_lock();
1752 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1753 teardown, multi_instance);
1754 cpus_read_unlock();
1755 return ret;
1756}
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001757EXPORT_SYMBOL(__cpuhp_setup_state);
1758
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001759int __cpuhp_state_remove_instance(enum cpuhp_state state,
1760 struct hlist_node *node, bool invoke)
1761{
1762 struct cpuhp_step *sp = cpuhp_get_step(state);
1763 int cpu;
1764
1765 BUG_ON(cpuhp_cb_check(state));
1766
1767 if (!sp->multi_instance)
1768 return -EINVAL;
1769
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001770 cpus_read_lock();
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001771 mutex_lock(&cpuhp_state_mutex);
1772
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001773 if (!invoke || !cpuhp_get_teardown_cb(state))
1774 goto remove;
1775 /*
1776 * Call the teardown callback for each present cpu depending
1777 * on the hotplug state of the cpu. This function is not
1778 * allowed to fail currently!
1779 */
1780 for_each_present_cpu(cpu) {
1781 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1782 int cpustate = st->state;
1783
1784 if (cpustate >= state)
1785 cpuhp_issue_call(cpu, state, false, node);
1786 }
1787
1788remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001789 hlist_del(node);
1790 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001791 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001792
1793 return 0;
1794}
1795EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001796
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001797/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001798 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001799 * @state: The state to remove
1800 * @invoke: If true, the teardown function is invoked for cpus where
1801 * cpu state >= @state
1802 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001803 * The caller needs to hold cpus read locked while calling this function.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001804 * The teardown callback is currently not allowed to fail. Think
1805 * about module removal!
1806 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001807void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001808{
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001809 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001810 int cpu;
1811
1812 BUG_ON(cpuhp_cb_check(state));
1813
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001814 lockdep_assert_cpus_held();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001815
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001816 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001817 if (sp->multi_instance) {
1818 WARN(!hlist_empty(&sp->list),
1819 "Error: Removing state %d which has instances left.\n",
1820 state);
1821 goto remove;
1822 }
1823
Thomas Gleixnera7246322016-08-12 19:49:38 +02001824 if (!invoke || !cpuhp_get_teardown_cb(state))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001825 goto remove;
1826
1827 /*
1828 * Call the teardown callback for each present cpu depending
1829 * on the hotplug state of the cpu. This function is not
1830 * allowed to fail currently!
1831 */
1832 for_each_present_cpu(cpu) {
1833 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1834 int cpustate = st->state;
1835
1836 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001837 cpuhp_issue_call(cpu, state, false, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001838 }
1839remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001840 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001841 mutex_unlock(&cpuhp_state_mutex);
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001842}
1843EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1844
1845void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1846{
1847 cpus_read_lock();
1848 __cpuhp_remove_state_cpuslocked(state, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001849 cpus_read_unlock();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001850}
1851EXPORT_SYMBOL(__cpuhp_remove_state);
1852
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001853#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1854static ssize_t show_cpuhp_state(struct device *dev,
1855 struct device_attribute *attr, char *buf)
1856{
1857 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1858
1859 return sprintf(buf, "%d\n", st->state);
1860}
1861static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1862
Thomas Gleixner757c9892016-02-26 18:43:32 +00001863static ssize_t write_cpuhp_target(struct device *dev,
1864 struct device_attribute *attr,
1865 const char *buf, size_t count)
1866{
1867 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1868 struct cpuhp_step *sp;
1869 int target, ret;
1870
1871 ret = kstrtoint(buf, 10, &target);
1872 if (ret)
1873 return ret;
1874
1875#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1876 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1877 return -EINVAL;
1878#else
1879 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1880 return -EINVAL;
1881#endif
1882
1883 ret = lock_device_hotplug_sysfs();
1884 if (ret)
1885 return ret;
1886
1887 mutex_lock(&cpuhp_state_mutex);
1888 sp = cpuhp_get_step(target);
1889 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1890 mutex_unlock(&cpuhp_state_mutex);
1891 if (ret)
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02001892 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001893
1894 if (st->state < target)
1895 ret = do_cpu_up(dev->id, target);
1896 else
1897 ret = do_cpu_down(dev->id, target);
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02001898out:
Thomas Gleixner757c9892016-02-26 18:43:32 +00001899 unlock_device_hotplug();
1900 return ret ? ret : count;
1901}
1902
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001903static ssize_t show_cpuhp_target(struct device *dev,
1904 struct device_attribute *attr, char *buf)
1905{
1906 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1907
1908 return sprintf(buf, "%d\n", st->target);
1909}
Thomas Gleixner757c9892016-02-26 18:43:32 +00001910static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001911
Peter Zijlstra1db49482017-09-20 19:00:21 +02001912
1913static ssize_t write_cpuhp_fail(struct device *dev,
1914 struct device_attribute *attr,
1915 const char *buf, size_t count)
1916{
1917 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1918 struct cpuhp_step *sp;
1919 int fail, ret;
1920
1921 ret = kstrtoint(buf, 10, &fail);
1922 if (ret)
1923 return ret;
1924
1925 /*
1926 * Cannot fail STARTING/DYING callbacks.
1927 */
1928 if (cpuhp_is_atomic_state(fail))
1929 return -EINVAL;
1930
1931 /*
1932 * Cannot fail anything that doesn't have callbacks.
1933 */
1934 mutex_lock(&cpuhp_state_mutex);
1935 sp = cpuhp_get_step(fail);
1936 if (!sp->startup.single && !sp->teardown.single)
1937 ret = -EINVAL;
1938 mutex_unlock(&cpuhp_state_mutex);
1939 if (ret)
1940 return ret;
1941
1942 st->fail = fail;
1943
1944 return count;
1945}
1946
1947static ssize_t show_cpuhp_fail(struct device *dev,
1948 struct device_attribute *attr, char *buf)
1949{
1950 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1951
1952 return sprintf(buf, "%d\n", st->fail);
1953}
1954
1955static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1956
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001957static struct attribute *cpuhp_cpu_attrs[] = {
1958 &dev_attr_state.attr,
1959 &dev_attr_target.attr,
Peter Zijlstra1db49482017-09-20 19:00:21 +02001960 &dev_attr_fail.attr,
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001961 NULL
1962};
1963
Arvind Yadav993647a2017-06-29 17:40:47 +05301964static const struct attribute_group cpuhp_cpu_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001965 .attrs = cpuhp_cpu_attrs,
1966 .name = "hotplug",
1967 NULL
1968};
1969
1970static ssize_t show_cpuhp_states(struct device *dev,
1971 struct device_attribute *attr, char *buf)
1972{
1973 ssize_t cur, res = 0;
1974 int i;
1975
1976 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner757c9892016-02-26 18:43:32 +00001977 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001978 struct cpuhp_step *sp = cpuhp_get_step(i);
1979
1980 if (sp->name) {
1981 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1982 buf += cur;
1983 res += cur;
1984 }
1985 }
1986 mutex_unlock(&cpuhp_state_mutex);
1987 return res;
1988}
1989static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1990
1991static struct attribute *cpuhp_cpu_root_attrs[] = {
1992 &dev_attr_states.attr,
1993 NULL
1994};
1995
Arvind Yadav993647a2017-06-29 17:40:47 +05301996static const struct attribute_group cpuhp_cpu_root_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001997 .attrs = cpuhp_cpu_root_attrs,
1998 .name = "hotplug",
1999 NULL
2000};
2001
Thomas Gleixner05736e42018-05-29 17:48:27 +02002002#ifdef CONFIG_HOTPLUG_SMT
2003
2004static const char *smt_states[] = {
2005 [CPU_SMT_ENABLED] = "on",
2006 [CPU_SMT_DISABLED] = "off",
2007 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2008 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2009};
2010
2011static ssize_t
2012show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2013{
2014 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2015}
2016
2017static void cpuhp_offline_cpu_device(unsigned int cpu)
2018{
2019 struct device *dev = get_cpu_device(cpu);
2020
2021 dev->offline = true;
2022 /* Tell user space about the state change */
2023 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2024}
2025
Thomas Gleixner215af542018-07-07 11:40:18 +02002026static void cpuhp_online_cpu_device(unsigned int cpu)
2027{
2028 struct device *dev = get_cpu_device(cpu);
2029
2030 dev->offline = false;
2031 /* Tell user space about the state change */
2032 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2033}
2034
Thomas Gleixner05736e42018-05-29 17:48:27 +02002035static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2036{
2037 int cpu, ret = 0;
2038
2039 cpu_maps_update_begin();
2040 for_each_online_cpu(cpu) {
2041 if (topology_is_primary_thread(cpu))
2042 continue;
2043 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2044 if (ret)
2045 break;
2046 /*
2047 * As this needs to hold the cpu maps lock it's impossible
2048 * to call device_offline() because that ends up calling
2049 * cpu_down() which takes cpu maps lock. cpu maps lock
2050 * needs to be held as this might race against in kernel
2051 * abusers of the hotplug machinery (thermal management).
2052 *
2053 * So nothing would update device:offline state. That would
2054 * leave the sysfs entry stale and prevent onlining after
2055 * smt control has been changed to 'off' again. This is
2056 * called under the sysfs hotplug lock, so it is properly
2057 * serialized against the regular offline usage.
2058 */
2059 cpuhp_offline_cpu_device(cpu);
2060 }
2061 if (!ret)
2062 cpu_smt_control = ctrlval;
2063 cpu_maps_update_done();
2064 return ret;
2065}
2066
Thomas Gleixner215af542018-07-07 11:40:18 +02002067static int cpuhp_smt_enable(void)
Thomas Gleixner05736e42018-05-29 17:48:27 +02002068{
Thomas Gleixner215af542018-07-07 11:40:18 +02002069 int cpu, ret = 0;
2070
Thomas Gleixner05736e42018-05-29 17:48:27 +02002071 cpu_maps_update_begin();
2072 cpu_smt_control = CPU_SMT_ENABLED;
Thomas Gleixner215af542018-07-07 11:40:18 +02002073 for_each_present_cpu(cpu) {
2074 /* Skip online CPUs and CPUs on offline nodes */
2075 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2076 continue;
2077 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2078 if (ret)
2079 break;
2080 /* See comment in cpuhp_smt_disable() */
2081 cpuhp_online_cpu_device(cpu);
2082 }
Thomas Gleixner05736e42018-05-29 17:48:27 +02002083 cpu_maps_update_done();
Thomas Gleixner215af542018-07-07 11:40:18 +02002084 return ret;
Thomas Gleixner05736e42018-05-29 17:48:27 +02002085}
2086
2087static ssize_t
2088store_smt_control(struct device *dev, struct device_attribute *attr,
2089 const char *buf, size_t count)
2090{
2091 int ctrlval, ret;
2092
2093 if (sysfs_streq(buf, "on"))
2094 ctrlval = CPU_SMT_ENABLED;
2095 else if (sysfs_streq(buf, "off"))
2096 ctrlval = CPU_SMT_DISABLED;
2097 else if (sysfs_streq(buf, "forceoff"))
2098 ctrlval = CPU_SMT_FORCE_DISABLED;
2099 else
2100 return -EINVAL;
2101
2102 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2103 return -EPERM;
2104
2105 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2106 return -ENODEV;
2107
2108 ret = lock_device_hotplug_sysfs();
2109 if (ret)
2110 return ret;
2111
2112 if (ctrlval != cpu_smt_control) {
2113 switch (ctrlval) {
2114 case CPU_SMT_ENABLED:
Thomas Gleixner215af542018-07-07 11:40:18 +02002115 ret = cpuhp_smt_enable();
Thomas Gleixner05736e42018-05-29 17:48:27 +02002116 break;
2117 case CPU_SMT_DISABLED:
2118 case CPU_SMT_FORCE_DISABLED:
2119 ret = cpuhp_smt_disable(ctrlval);
2120 break;
2121 }
2122 }
2123
2124 unlock_device_hotplug();
2125 return ret ? ret : count;
2126}
2127static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2128
2129static ssize_t
2130show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2131{
2132 bool active = topology_max_smt_threads() > 1;
2133
2134 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2135}
2136static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2137
2138static struct attribute *cpuhp_smt_attrs[] = {
2139 &dev_attr_control.attr,
2140 &dev_attr_active.attr,
2141 NULL
2142};
2143
2144static const struct attribute_group cpuhp_smt_attr_group = {
2145 .attrs = cpuhp_smt_attrs,
2146 .name = "smt",
2147 NULL
2148};
2149
2150static int __init cpu_smt_state_init(void)
2151{
Thomas Gleixner05736e42018-05-29 17:48:27 +02002152 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2153 &cpuhp_smt_attr_group);
2154}
2155
2156#else
2157static inline int cpu_smt_state_init(void) { return 0; }
2158#endif
2159
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002160static int __init cpuhp_sysfs_init(void)
2161{
2162 int cpu, ret;
2163
Thomas Gleixner05736e42018-05-29 17:48:27 +02002164 ret = cpu_smt_state_init();
2165 if (ret)
2166 return ret;
2167
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002168 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2169 &cpuhp_cpu_root_attr_group);
2170 if (ret)
2171 return ret;
2172
2173 for_each_possible_cpu(cpu) {
2174 struct device *dev = get_cpu_device(cpu);
2175
2176 if (!dev)
2177 continue;
2178 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2179 if (ret)
2180 return ret;
2181 }
2182 return 0;
2183}
2184device_initcall(cpuhp_sysfs_init);
2185#endif
2186
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002187/*
2188 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2189 * represents all NR_CPUS bits binary values of 1<<nr.
2190 *
Rusty Russelle0b582e2009-01-01 10:12:28 +10302191 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002192 * mask value that has a single bit set only.
2193 */
Mike Travisb8d317d2008-07-24 18:21:29 -07002194
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002195/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -07002196#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002197#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2198#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2199#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -07002200
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002201const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -07002202
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002203 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2204 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2205#if BITS_PER_LONG > 32
2206 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2207 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -07002208#endif
2209};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002210EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +11002211
2212const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2213EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +10302214
2215#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002216struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002217 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +10302218#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002219struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +10302220#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002221EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302222
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002223struct cpumask __cpu_online_mask __read_mostly;
2224EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302225
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002226struct cpumask __cpu_present_mask __read_mostly;
2227EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302228
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002229struct cpumask __cpu_active_mask __read_mostly;
2230EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +10302231
Rusty Russell3fa41522008-12-30 09:05:16 +10302232void init_cpu_present(const struct cpumask *src)
2233{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002234 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302235}
2236
2237void init_cpu_possible(const struct cpumask *src)
2238{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002239 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302240}
2241
2242void init_cpu_online(const struct cpumask *src)
2243{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002244 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302245}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002246
2247/*
2248 * Activate the first processor.
2249 */
2250void __init boot_cpu_init(void)
2251{
2252 int cpu = smp_processor_id();
2253
2254 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2255 set_cpu_online(cpu, true);
2256 set_cpu_active(cpu, true);
2257 set_cpu_present(cpu, true);
2258 set_cpu_possible(cpu, true);
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01002259
2260#ifdef CONFIG_SMP
2261 __boot_cpu_id = cpu;
2262#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002263}
2264
2265/*
2266 * Must be called _AFTER_ setting up the per_cpu areas
2267 */
2268void __init boot_cpu_state_init(void)
2269{
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +02002270 this_cpu_write(cpuhp_state.booted_once, true);
2271 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002272}