blob: f510c00bda8820077b40effd377f2799b68828b9 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000, 2001 Kanoj Sarcar
5 * Copyright (C) 2000, 2001 Ralf Baechle
6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8 */
9#include <linux/cache.h>
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010013#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/spinlock.h>
15#include <linux/threads.h>
Paul Gortmakerd9d54172016-08-21 15:58:13 -040016#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/time.h>
18#include <linux/timex.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010019#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/cpumask.h>
Rojhalat Ibrahim1e35aab2006-02-20 13:35:27 +000021#include <linux/cpu.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040022#include <linux/err.h>
Wu Zhangjin8f99a162009-11-20 20:34:33 +080023#include <linux/ftrace.h>
Qais Youseffbde2d72015-12-08 13:20:27 +000024#include <linux/irqdomain.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arun Sharma600634972011-07-26 16:09:06 -070028#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/cpu.h>
Paul Burtonc8790d62019-02-02 01:43:28 +000030#include <asm/ginvt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/processor.h>
Ralf Baechlebdc92d742013-05-21 16:59:19 +020032#include <asm/idle.h>
Ralf Baechle39b8d522008-04-28 17:14:26 +010033#include <asm/r4k-timer.h>
Matt Redfearn7f005f12017-10-16 11:06:49 +010034#include <asm/mips-cps.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
Ralf Baechle7bcf7712007-10-11 23:46:09 +010036#include <asm/time.h>
David Howellsb81947c2012-03-28 18:30:02 +010037#include <asm/setup.h>
Paul Burtone060f6e2015-09-25 08:59:38 -070038#include <asm/maar.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Daney7820b842017-09-28 12:34:04 -050040int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
David Daney2dc2ae32010-07-23 18:41:45 -070041EXPORT_SYMBOL(__cpu_number_map);
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
David Daney2dc2ae32010-07-23 18:41:45 -070044EXPORT_SYMBOL(__cpu_logical_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000046/* Number of TCs (or siblings in Intel speak) per CPU core */
47int smp_num_siblings = 1;
48EXPORT_SYMBOL(smp_num_siblings);
49
50/* representing the TCs (or siblings in Intel speak) of each logical CPU */
51cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
52EXPORT_SYMBOL(cpu_sibling_map);
53
Huacai Chenbda45842014-06-26 11:41:26 +080054/* representing the core map of multi-core chips of each logical CPU */
55cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
56EXPORT_SYMBOL(cpu_core_map);
57
Matt Redfearn9e8c3992017-09-27 10:13:25 +010058static DECLARE_COMPLETION(cpu_starting);
Matt Redfearna00eeed2016-11-04 09:28:56 +000059static DECLARE_COMPLETION(cpu_running);
60
Markos Chandrascccf34e2015-07-10 09:29:10 +010061/*
62 * A logcal cpu mask containing only one VPE per core to
63 * reduce the number of IPIs on large MT systems.
64 */
James Hogan640511a2016-07-13 14:12:52 +010065cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
Markos Chandrascccf34e2015-07-10 09:29:10 +010066EXPORT_SYMBOL(cpu_foreign_map);
67
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000068/* representing cpus for which sibling maps can be computed */
69static cpumask_t cpu_sibling_setup_map;
70
Huacai Chenbda45842014-06-26 11:41:26 +080071/* representing cpus for which core maps can be computed */
72static cpumask_t cpu_core_setup_map;
73
Paul Burton76306f42014-02-14 16:30:52 +000074cpumask_t cpu_coherent_mask;
75
Qais Youseffbde2d72015-12-08 13:20:27 +000076#ifdef CONFIG_GENERIC_IRQ_IPI
77static struct irq_desc *call_desc;
78static struct irq_desc *sched_desc;
79#endif
80
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000081static inline void set_cpu_sibling_map(int cpu)
82{
83 int i;
84
Rusty Russell8dd92892015-03-05 10:49:17 +103085 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000086
87 if (smp_num_siblings > 1) {
Rusty Russell8dd92892015-03-05 10:49:17 +103088 for_each_cpu(i, &cpu_sibling_setup_map) {
Paul Burtonfe7a38c2017-08-12 19:49:37 -070089 if (cpus_are_siblings(cpu, i)) {
Rusty Russell8dd92892015-03-05 10:49:17 +103090 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
91 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000092 }
93 }
94 } else
Rusty Russell8dd92892015-03-05 10:49:17 +103095 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000096}
97
Huacai Chenbda45842014-06-26 11:41:26 +080098static inline void set_cpu_core_map(int cpu)
99{
100 int i;
101
Rusty Russell8dd92892015-03-05 10:49:17 +1030102 cpumask_set_cpu(cpu, &cpu_core_setup_map);
Huacai Chenbda45842014-06-26 11:41:26 +0800103
Rusty Russell8dd92892015-03-05 10:49:17 +1030104 for_each_cpu(i, &cpu_core_setup_map) {
Huacai Chenbda45842014-06-26 11:41:26 +0800105 if (cpu_data[cpu].package == cpu_data[i].package) {
Rusty Russell8dd92892015-03-05 10:49:17 +1030106 cpumask_set_cpu(i, &cpu_core_map[cpu]);
107 cpumask_set_cpu(cpu, &cpu_core_map[i]);
Huacai Chenbda45842014-06-26 11:41:26 +0800108 }
109 }
110}
111
Markos Chandrascccf34e2015-07-10 09:29:10 +0100112/*
113 * Calculate a new cpu_foreign_map mask whenever a
114 * new cpu appears or disappears.
115 */
James Hogan826e99b2016-07-13 14:12:45 +0100116void calculate_cpu_foreign_map(void)
Markos Chandrascccf34e2015-07-10 09:29:10 +0100117{
118 int i, k, core_present;
119 cpumask_t temp_foreign_map;
120
121 /* Re-calculate the mask */
James Hogand825c062016-03-04 10:10:51 +0000122 cpumask_clear(&temp_foreign_map);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100123 for_each_online_cpu(i) {
124 core_present = 0;
125 for_each_cpu(k, &temp_foreign_map)
Paul Burtonfe7a38c2017-08-12 19:49:37 -0700126 if (cpus_are_siblings(i, k))
Markos Chandrascccf34e2015-07-10 09:29:10 +0100127 core_present = 1;
128 if (!core_present)
129 cpumask_set_cpu(i, &temp_foreign_map);
130 }
131
James Hogan640511a2016-07-13 14:12:52 +0100132 for_each_online_cpu(i)
133 cpumask_andnot(&cpu_foreign_map[i],
134 &temp_foreign_map, &cpu_sibling_map[i]);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100135}
136
Matt Redfearnff2c8252017-07-19 09:21:03 +0100137const struct plat_smp_ops *mp_ops;
Sanjay Lal82d45de2012-11-21 18:34:14 -0800138EXPORT_SYMBOL(mp_ops);
Ralf Baechle87353d82007-11-19 12:23:51 +0000139
Matt Redfearnff2c8252017-07-19 09:21:03 +0100140void register_smp_ops(const struct plat_smp_ops *ops)
Ralf Baechle87353d82007-11-19 12:23:51 +0000141{
Thiemo Seufer83738e32008-05-06 11:21:22 +0100142 if (mp_ops)
143 printk(KERN_WARNING "Overriding previously set SMP ops\n");
Ralf Baechle87353d82007-11-19 12:23:51 +0000144
145 mp_ops = ops;
146}
147
Qais Youseffbde2d72015-12-08 13:20:27 +0000148#ifdef CONFIG_GENERIC_IRQ_IPI
149void mips_smp_send_ipi_single(int cpu, unsigned int action)
150{
151 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
152}
153
154void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
155{
156 unsigned long flags;
157 unsigned int core;
158 int cpu;
159
160 local_irq_save(flags);
161
162 switch (action) {
163 case SMP_CALL_FUNCTION:
164 __ipi_send_mask(call_desc, mask);
165 break;
166
167 case SMP_RESCHEDULE_YOURSELF:
168 __ipi_send_mask(sched_desc, mask);
169 break;
170
171 default:
172 BUG();
173 }
174
175 if (mips_cpc_present()) {
176 for_each_cpu(cpu, mask) {
Paul Burtonfe7a38c2017-08-12 19:49:37 -0700177 if (cpus_are_siblings(cpu, smp_processor_id()))
Qais Youseffbde2d72015-12-08 13:20:27 +0000178 continue;
179
Paul Burtonfe7a38c2017-08-12 19:49:37 -0700180 core = cpu_core(&cpu_data[cpu]);
181
Qais Youseffbde2d72015-12-08 13:20:27 +0000182 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
Paul Burton68923cd2017-08-12 19:49:39 -0700183 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
Qais Youseffbde2d72015-12-08 13:20:27 +0000184 mips_cpc_lock_other(core);
185 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
186 mips_cpc_unlock_other();
Matt Redfearn4b640132016-09-07 10:45:19 +0100187 mips_cm_unlock_other();
Qais Youseffbde2d72015-12-08 13:20:27 +0000188 }
189 }
190 }
191
192 local_irq_restore(flags);
193}
194
195
196static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
197{
198 scheduler_ipi();
199
200 return IRQ_HANDLED;
201}
202
203static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
204{
205 generic_smp_call_function_interrupt();
206
207 return IRQ_HANDLED;
208}
209
210static struct irqaction irq_resched = {
211 .handler = ipi_resched_interrupt,
212 .flags = IRQF_PERCPU,
213 .name = "IPI resched"
214};
215
216static struct irqaction irq_call = {
217 .handler = ipi_call_interrupt,
218 .flags = IRQF_PERCPU,
219 .name = "IPI call"
220};
221
Matt Redfearn7688c532016-09-20 09:47:26 +0100222static void smp_ipi_init_one(unsigned int virq,
Qais Youseffbde2d72015-12-08 13:20:27 +0000223 struct irqaction *action)
224{
225 int ret;
226
227 irq_set_handler(virq, handle_percpu_irq);
228 ret = setup_irq(virq, action);
229 BUG_ON(ret);
230}
231
Matt Redfearn7688c532016-09-20 09:47:26 +0100232static unsigned int call_virq, sched_virq;
233
234int mips_smp_ipi_allocate(const struct cpumask *mask)
Qais Youseffbde2d72015-12-08 13:20:27 +0000235{
Matt Redfearn7688c532016-09-20 09:47:26 +0100236 int virq;
Qais Youseffbde2d72015-12-08 13:20:27 +0000237 struct irq_domain *ipidomain;
238 struct device_node *node;
239
240 node = of_irq_find_parent(of_root);
241 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
242
243 /*
244 * Some platforms have half DT setup. So if we found irq node but
245 * didn't find an ipidomain, try to search for one that is not in the
246 * DT.
247 */
248 if (node && !ipidomain)
249 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
250
Paul Burton578bffc2016-04-04 10:04:52 +0100251 /*
Paul Burtone6488982017-03-30 12:06:13 -0700252 * There are systems which use IPI IRQ domains, but only have one
253 * registered when some runtime condition is met. For example a Malta
254 * kernel may include support for GIC & CPU interrupt controller IPI
255 * IRQ domains, but if run on a system with no GIC & no MT ASE then
256 * neither will be supported or registered.
257 *
258 * We only have a problem if we're actually using multiple CPUs so fail
259 * loudly if that is the case. Otherwise simply return, skipping IPI
260 * setup, if we're running with only a single CPU.
Paul Burton578bffc2016-04-04 10:04:52 +0100261 */
Paul Burtone6488982017-03-30 12:06:13 -0700262 if (!ipidomain) {
263 BUG_ON(num_present_cpus() > 1);
Paul Burton578bffc2016-04-04 10:04:52 +0100264 return 0;
Paul Burtone6488982017-03-30 12:06:13 -0700265 }
Qais Youseffbde2d72015-12-08 13:20:27 +0000266
Matt Redfearn7688c532016-09-20 09:47:26 +0100267 virq = irq_reserve_ipi(ipidomain, mask);
268 BUG_ON(!virq);
269 if (!call_virq)
270 call_virq = virq;
Qais Youseffbde2d72015-12-08 13:20:27 +0000271
Matt Redfearn7688c532016-09-20 09:47:26 +0100272 virq = irq_reserve_ipi(ipidomain, mask);
273 BUG_ON(!virq);
274 if (!sched_virq)
275 sched_virq = virq;
Qais Youseffbde2d72015-12-08 13:20:27 +0000276
277 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
278 int cpu;
279
Matt Redfearn7688c532016-09-20 09:47:26 +0100280 for_each_cpu(cpu, mask) {
Qais Youseffbde2d72015-12-08 13:20:27 +0000281 smp_ipi_init_one(call_virq + cpu, &irq_call);
282 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
283 }
284 } else {
285 smp_ipi_init_one(call_virq, &irq_call);
286 smp_ipi_init_one(sched_virq, &irq_resched);
287 }
288
Matt Redfearn7688c532016-09-20 09:47:26 +0100289 return 0;
290}
291
292int mips_smp_ipi_free(const struct cpumask *mask)
293{
294 struct irq_domain *ipidomain;
295 struct device_node *node;
296
297 node = of_irq_find_parent(of_root);
298 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
299
300 /*
301 * Some platforms have half DT setup. So if we found irq node but
302 * didn't find an ipidomain, try to search for one that is not in the
303 * DT.
304 */
305 if (node && !ipidomain)
306 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
307
308 BUG_ON(!ipidomain);
309
310 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
311 int cpu;
312
313 for_each_cpu(cpu, mask) {
314 remove_irq(call_virq + cpu, &irq_call);
315 remove_irq(sched_virq + cpu, &irq_resched);
316 }
317 }
318 irq_destroy_ipi(call_virq, mask);
319 irq_destroy_ipi(sched_virq, mask);
320 return 0;
321}
322
323
324static int __init mips_smp_ipi_init(void)
325{
Paul Burton9b03d8a2017-06-02 14:48:49 -0700326 if (num_possible_cpus() == 1)
327 return 0;
328
Matt Redfearn7688c532016-09-20 09:47:26 +0100329 mips_smp_ipi_allocate(cpu_possible_mask);
330
Qais Youseffbde2d72015-12-08 13:20:27 +0000331 call_desc = irq_to_desc(call_virq);
332 sched_desc = irq_to_desc(sched_virq);
333
334 return 0;
335}
336early_initcall(mips_smp_ipi_init);
337#endif
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339/*
340 * First C code run on the secondary CPUs after being started up by
341 * the master.
342 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000343asmlinkage void start_secondary(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800345 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 cpu_probe();
David Daney6650df32012-05-15 00:04:50 -0700348 per_cpu_trap_init(false);
Ralf Baechle7bcf7712007-10-11 23:46:09 +0100349 mips_clockevent_init();
Ralf Baechle87353d82007-11-19 12:23:51 +0000350 mp_ops->init_secondary();
Hemmo Nieminenc7754e72015-01-15 23:01:59 +0200351 cpu_report();
Paul Burtone060f6e2015-09-25 08:59:38 -0700352 maar_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 /*
355 * XXX parity protection should be folded in here when it's converted
356 * to an option instead of something based on .cputype
357 */
358
359 calibrate_delay();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800360 preempt_disable();
361 cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 cpu_data[cpu].udelay_val = loops_per_jiffy;
363
Rusty Russell8dd92892015-03-05 10:49:17 +1030364 cpumask_set_cpu(cpu, &cpu_coherent_mask);
Manfred Spraule545a612008-09-07 16:57:22 +0200365 notify_cpu_starting(cpu);
366
Matt Redfearn9e8c3992017-09-27 10:13:25 +0100367 /* Notify boot CPU that we're starting & ready to sync counters */
368 complete(&cpu_starting);
369
370 synchronise_count_slave(cpu);
371
372 /* The CPU is running and counters synchronised, now mark it online */
Yong Zhangb9a09a02012-07-19 09:13:53 +0200373 set_cpu_online(cpu, true);
374
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000375 set_cpu_sibling_map(cpu);
Huacai Chenbda45842014-06-26 11:41:26 +0800376 set_cpu_core_map(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Markos Chandrascccf34e2015-07-10 09:29:10 +0100378 calculate_cpu_foreign_map();
379
Matt Redfearn9e8c3992017-09-27 10:13:25 +0100380 /*
381 * Notify boot CPU that we're up & online and it can safely return
382 * from __cpu_up
383 */
Matija Glavinic Pecotic6f542eb2017-08-03 08:20:22 +0200384 complete(&cpu_running);
Matija Glavinic Pecotic6f542eb2017-08-03 08:20:22 +0200385
Yong Zhangb789ad62012-07-19 09:13:53 +0200386 /*
387 * irq will be enabled in ->smp_finish(), enabling it too early
388 * is dangerous.
389 */
390 WARN_ON_ONCE(!irqs_disabled());
Yong Zhang5309bda2012-07-19 09:13:53 +0200391 mp_ops->smp_finish();
392
Thomas Gleixnerfc6d73d2016-02-26 18:43:40 +0000393 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394}
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396static void stop_this_cpu(void *dummy)
397{
398 /*
James Hogan92696312016-07-13 14:12:46 +0100399 * Remove this CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 */
Markos Chandrascccf34e2015-07-10 09:29:10 +0100401
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030402 set_cpu_online(smp_processor_id(), false);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100403 calculate_cpu_foreign_map();
Andrew Brestickerea925a722015-03-25 10:25:43 -0700404 local_irq_disable();
405 while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
408void smp_send_stop(void)
409{
Jens Axboe8691e5a2008-06-06 11:18:06 +0200410 smp_call_function(stop_this_cpu, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411}
412
413void __init smp_cpus_done(unsigned int max_cpus)
414{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
417/* called from main before smp_init() */
418void __init smp_prepare_cpus(unsigned int max_cpus)
419{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 init_new_context(current, &init_mm);
421 current_thread_info()->cpu = 0;
Ralf Baechle87353d82007-11-19 12:23:51 +0000422 mp_ops->prepare_cpus(max_cpus);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000423 set_cpu_sibling_map(0);
Huacai Chenbda45842014-06-26 11:41:26 +0800424 set_cpu_core_map(0);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100425 calculate_cpu_foreign_map();
Ralf Baechle320e6ab2006-05-22 14:24:04 +0100426#ifndef CONFIG_HOTPLUG_CPU
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030427 init_cpu_present(cpu_possible_mask);
Ralf Baechle320e6ab2006-05-22 14:24:04 +0100428#endif
Paul Burton76306f42014-02-14 16:30:52 +0000429 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
432/* preload SMP state for boot cpu */
Greg Kroah-Hartman28eb0e42012-12-21 14:04:39 -0800433void smp_prepare_boot_cpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
Thomas Bogendoerfer2c865622019-02-19 16:57:19 +0100435 if (mp_ops->prepare_boot_cpu)
436 mp_ops->prepare_boot_cpu();
Rusty Russell4037ac62009-09-24 09:34:47 -0600437 set_cpu_possible(0, true);
438 set_cpu_online(0, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
440
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000441int __cpu_up(unsigned int cpu, struct task_struct *tidle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
Paul Burtond595d422017-08-12 19:49:40 -0700443 int err;
444
445 err = mp_ops->boot_secondary(cpu, tidle);
446 if (err)
447 return err;
Ralf Baechleb727a602005-02-22 21:18:01 +0000448
Matt Redfearn9e8c3992017-09-27 10:13:25 +0100449 /* Wait for CPU to start and be ready to sync counters */
450 if (!wait_for_completion_timeout(&cpu_starting,
Matt Redfearna00eeed2016-11-04 09:28:56 +0000451 msecs_to_jiffies(1000))) {
452 pr_crit("CPU%u: failed to start\n", cpu);
453 return -EIO;
Ralf Baechlecafb45b2015-05-12 06:43:04 +0200454 }
Ralf Baechleb727a602005-02-22 21:18:01 +0000455
Jayachandran Ccf9bfe52012-08-14 18:56:13 +0530456 synchronise_count_master(cpu);
Matt Redfearn9e8c3992017-09-27 10:13:25 +0100457
458 /* Wait for CPU to finish startup & mark itself online before return */
459 wait_for_completion(&cpu_running);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 return 0;
461}
462
463/* Not really SMP stuff ... */
464int setup_profiling_timer(unsigned int multiplier)
465{
466 return 0;
467}
468
469static void flush_tlb_all_ipi(void *info)
470{
471 local_flush_tlb_all();
472}
473
474void flush_tlb_all(void)
475{
Paul Burtonc8790d62019-02-02 01:43:28 +0000476 if (cpu_has_mmid) {
477 htw_stop();
478 ginvt_full();
479 sync_ginv();
480 instruction_hazard();
481 htw_start();
482 return;
483 }
484
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200485 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
487
488static void flush_tlb_mm_ipi(void *mm)
489{
Paul Burton558ec8a2019-02-02 01:43:22 +0000490 drop_mmu_context((struct mm_struct *)mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
492
493/*
Ralf Baechle25969352006-06-22 22:42:32 +0100494 * Special Variant of smp_call_function for use by TLB functions:
495 *
496 * o No return value
497 * o collapses to normal function call on UP kernels
498 * o collapses to normal function call on systems with a single shared
499 * primary cache.
Ralf Baechle25969352006-06-22 22:42:32 +0100500 */
501static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
502{
Jens Axboe8691e5a2008-06-06 11:18:06 +0200503 smp_call_function(func, info, 1);
Ralf Baechle25969352006-06-22 22:42:32 +0100504}
505
506static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
507{
508 preempt_disable();
509
510 smp_on_other_tlbs(func, info);
511 func(info);
512
513 preempt_enable();
514}
515
516/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 * The following tlb flush calls are invoked when old translations are
518 * being torn down, or pte attributes are changing. For single threaded
519 * address spaces, a new context is obtained on the current cpu, and tlb
520 * context on other cpus are invalidated to force a new context allocation
521 * at switch_mm time, should the mm ever be used on other cpus. For
522 * multithreaded address spaces, intercpu interrupts have to be sent.
523 * Another case where intercpu interrupts are required is when the target
524 * mm might be active on another cpu (eg debuggers doing the flushes on
525 * behalf of debugees, kswapd stealing pages from another process etc).
526 * Kanoj 07/00.
527 */
528
529void flush_tlb_mm(struct mm_struct *mm)
530{
531 preempt_disable();
532
Paul Burtonc8790d62019-02-02 01:43:28 +0000533 if (cpu_has_mmid) {
534 /*
535 * No need to worry about other CPUs - the ginvt in
536 * drop_mmu_context() will be globalized.
537 */
538 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
Ralf Baechlec50cade2007-10-04 16:57:08 +0100539 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100541 unsigned int cpu;
542
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030543 for_each_online_cpu(cpu) {
544 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
Paul Burton0b317c32019-02-02 01:43:25 +0000545 set_cpu_context(cpu, mm, 0);
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030546 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
Paul Burton558ec8a2019-02-02 01:43:22 +0000548 drop_mmu_context(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 preempt_enable();
551}
552
553struct flush_tlb_data {
554 struct vm_area_struct *vma;
555 unsigned long addr1;
556 unsigned long addr2;
557};
558
559static void flush_tlb_range_ipi(void *info)
560{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100561 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
564}
565
566void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
567{
568 struct mm_struct *mm = vma->vm_mm;
Paul Burtonc8790d62019-02-02 01:43:28 +0000569 unsigned long addr;
570 u32 old_mmid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572 preempt_disable();
Paul Burtonc8790d62019-02-02 01:43:28 +0000573 if (cpu_has_mmid) {
574 htw_stop();
575 old_mmid = read_c0_memorymapid();
576 write_c0_memorymapid(cpu_asid(0, mm));
577 mtc0_tlbw_hazard();
578 addr = round_down(start, PAGE_SIZE * 2);
579 end = round_up(end, PAGE_SIZE * 2);
580 do {
581 ginvt_va_mmid(addr);
582 sync_ginv();
583 addr += PAGE_SIZE * 2;
584 } while (addr < end);
585 write_c0_memorymapid(old_mmid);
586 instruction_hazard();
587 htw_start();
588 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100589 struct flush_tlb_data fd = {
590 .vma = vma,
591 .addr1 = start,
592 .addr2 = end,
593 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Ralf Baechlec50cade2007-10-04 16:57:08 +0100595 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
Paul Burtonc8790d62019-02-02 01:43:28 +0000596 local_flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100598 unsigned int cpu;
James Hogana05c3922016-07-13 14:12:44 +0100599 int exec = vma->vm_flags & VM_EXEC;
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100600
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030601 for_each_online_cpu(cpu) {
James Hogana05c3922016-07-13 14:12:44 +0100602 /*
603 * flush_cache_range() will only fully flush icache if
604 * the VMA is executable, otherwise we must invalidate
605 * ASID without it appearing to has_valid_asid() as if
606 * mm has been completely unused by that CPU.
607 */
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030608 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
Paul Burton0b317c32019-02-02 01:43:25 +0000609 set_cpu_context(cpu, mm, !exec);
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030610 }
Paul Burtonc8790d62019-02-02 01:43:28 +0000611 local_flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 preempt_enable();
614}
615
616static void flush_tlb_kernel_range_ipi(void *info)
617{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100618 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
621}
622
623void flush_tlb_kernel_range(unsigned long start, unsigned long end)
624{
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100625 struct flush_tlb_data fd = {
626 .addr1 = start,
627 .addr2 = end,
628 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200630 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
633static void flush_tlb_page_ipi(void *info)
634{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100635 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 local_flush_tlb_page(fd->vma, fd->addr1);
638}
639
640void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
641{
Paul Burtonc8790d62019-02-02 01:43:28 +0000642 u32 old_mmid;
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 preempt_disable();
Paul Burtonc8790d62019-02-02 01:43:28 +0000645 if (cpu_has_mmid) {
646 htw_stop();
647 old_mmid = read_c0_memorymapid();
648 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
649 mtc0_tlbw_hazard();
650 ginvt_va_mmid(page);
651 sync_ginv();
652 write_c0_memorymapid(old_mmid);
653 instruction_hazard();
654 htw_start();
655 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
656 (current->mm != vma->vm_mm)) {
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100657 struct flush_tlb_data fd = {
658 .vma = vma,
659 .addr1 = page,
660 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Ralf Baechlec50cade2007-10-04 16:57:08 +0100662 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
Paul Burtonc8790d62019-02-02 01:43:28 +0000663 local_flush_tlb_page(vma, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100665 unsigned int cpu;
666
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030667 for_each_online_cpu(cpu) {
James Hogana05c3922016-07-13 14:12:44 +0100668 /*
669 * flush_cache_page() only does partial flushes, so
670 * invalidate ASID without it appearing to
671 * has_valid_asid() as if mm has been completely unused
672 * by that CPU.
673 */
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030674 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
Paul Burton0b317c32019-02-02 01:43:25 +0000675 set_cpu_context(cpu, vma->vm_mm, 1);
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030676 }
Paul Burtonc8790d62019-02-02 01:43:28 +0000677 local_flush_tlb_page(vma, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 preempt_enable();
680}
681
682static void flush_tlb_one_ipi(void *info)
683{
684 unsigned long vaddr = (unsigned long) info;
685
686 local_flush_tlb_one(vaddr);
687}
688
689void flush_tlb_one(unsigned long vaddr)
690{
Ralf Baechle25969352006-06-22 22:42:32 +0100691 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692}
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694EXPORT_SYMBOL(flush_tlb_page);
695EXPORT_SYMBOL(flush_tlb_one);
Ralf Baechle7aa1c8f2012-10-11 18:14:58 +0200696
Paul Burtoncc7964a2014-02-14 09:24:58 +0000697#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
698
699static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
Ying Huang966a9672017-08-08 12:30:00 +0800700static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
Paul Burtoncc7964a2014-02-14 09:24:58 +0000701
702void tick_broadcast(const struct cpumask *mask)
703{
704 atomic_t *count;
Ying Huang966a9672017-08-08 12:30:00 +0800705 call_single_data_t *csd;
Paul Burtoncc7964a2014-02-14 09:24:58 +0000706 int cpu;
707
708 for_each_cpu(cpu, mask) {
709 count = &per_cpu(tick_broadcast_count, cpu);
710 csd = &per_cpu(tick_broadcast_csd, cpu);
711
712 if (atomic_inc_return(count) == 1)
713 smp_call_function_single_async(cpu, csd);
714 }
715}
716
717static void tick_broadcast_callee(void *info)
718{
719 int cpu = smp_processor_id();
720 tick_receive_broadcast();
721 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
722}
723
724static int __init tick_broadcast_init(void)
725{
Ying Huang966a9672017-08-08 12:30:00 +0800726 call_single_data_t *csd;
Paul Burtoncc7964a2014-02-14 09:24:58 +0000727 int cpu;
728
729 for (cpu = 0; cpu < NR_CPUS; cpu++) {
730 csd = &per_cpu(tick_broadcast_csd, cpu);
731 csd->func = tick_broadcast_callee;
732 }
733
734 return 0;
735}
736early_initcall(tick_broadcast_init);
737
738#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */