Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Common boot and setup code for both 32-bit and 64-bit. |
| 4 | * Extracted from arch/powerpc/kernel/setup_64.c. |
| 5 | * |
| 6 | * Copyright (C) 2001 PPC64 Team, IBM Corp |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 7 | */ |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 8 | |
| 9 | #undef DEBUG |
| 10 | |
Paul Gortmaker | 4b16f8e | 2011-07-22 18:24:23 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 12 | #include <linux/string.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/reboot.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/initrd.h> |
Michael Neuling | e5c6c8e | 2006-03-14 00:11:50 -0500 | [diff] [blame] | 19 | #include <linux/platform_device.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 20 | #include <linux/seq_file.h> |
| 21 | #include <linux/ioport.h> |
| 22 | #include <linux/console.h> |
Jon Smirl | 894673e | 2006-07-10 04:44:13 -0700 | [diff] [blame] | 23 | #include <linux/screen_info.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 24 | #include <linux/root_dev.h> |
| 25 | #include <linux/notifier.h> |
| 26 | #include <linux/cpu.h> |
| 27 | #include <linux/unistd.h> |
| 28 | #include <linux/serial.h> |
| 29 | #include <linux/serial_8250.h> |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 30 | #include <linux/percpu.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 31 | #include <linux/memblock.h> |
Kumar Gala | d746286 | 2009-03-19 03:40:51 +0000 | [diff] [blame] | 32 | #include <linux/of_platform.h> |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 33 | #include <linux/hugetlb.h> |
Michael Ellerman | 7644d58 | 2017-02-10 12:04:56 +1100 | [diff] [blame] | 34 | #include <asm/debugfs.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 35 | #include <asm/io.h> |
Michael Ellerman | 1426d5a | 2010-01-28 13:23:22 +0000 | [diff] [blame] | 36 | #include <asm/paca.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 37 | #include <asm/prom.h> |
| 38 | #include <asm/processor.h> |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 39 | #include <asm/vdso_datapage.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 40 | #include <asm/pgtable.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 41 | #include <asm/smp.h> |
| 42 | #include <asm/elf.h> |
| 43 | #include <asm/machdep.h> |
| 44 | #include <asm/time.h> |
| 45 | #include <asm/cputable.h> |
| 46 | #include <asm/sections.h> |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 47 | #include <asm/firmware.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 48 | #include <asm/btext.h> |
| 49 | #include <asm/nvram.h> |
| 50 | #include <asm/setup.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 51 | #include <asm/rtas.h> |
| 52 | #include <asm/iommu.h> |
| 53 | #include <asm/serial.h> |
| 54 | #include <asm/cache.h> |
| 55 | #include <asm/page.h> |
| 56 | #include <asm/mmu.h> |
Paul Mackerras | fca5dcd | 2005-11-08 22:55:08 +1100 | [diff] [blame] | 57 | #include <asm/xmon.h> |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 58 | #include <asm/cputhreads.h> |
Becky Bruce | f465df8 | 2008-10-15 08:25:28 +0000 | [diff] [blame] | 59 | #include <mm/mmu_decl.h> |
Mahesh Salgaonkar | ebaeb5a | 2012-02-16 01:14:45 +0000 | [diff] [blame] | 60 | #include <asm/fadump.h> |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 61 | #include <asm/udbg.h> |
| 62 | #include <asm/hugetlb.h> |
| 63 | #include <asm/livepatch.h> |
| 64 | #include <asm/mmu_context.h> |
Kevin Hao | b92a226 | 2016-07-23 14:42:40 +0530 | [diff] [blame] | 65 | #include <asm/cpu_has_feature.h> |
Christophe Leroy | 2edb16e | 2019-04-26 16:23:34 +0000 | [diff] [blame] | 66 | #include <asm/kasan.h> |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 67 | |
| 68 | #include "setup.h" |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 69 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 70 | #ifdef DEBUG |
Michael Ellerman | f9e4ec5 | 2005-11-15 15:16:38 +1100 | [diff] [blame] | 71 | #include <asm/udbg.h> |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 72 | #define DBG(fmt...) udbg_printf(fmt) |
| 73 | #else |
| 74 | #define DBG(fmt...) |
| 75 | #endif |
| 76 | |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 77 | /* The main machine-dep calls structure |
| 78 | */ |
| 79 | struct machdep_calls ppc_md; |
| 80 | EXPORT_SYMBOL(ppc_md); |
| 81 | struct machdep_calls *machine_id; |
| 82 | EXPORT_SYMBOL(machine_id); |
Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 83 | |
Benjamin Herrenschmidt | 36ae37e | 2014-03-28 13:36:27 +1100 | [diff] [blame] | 84 | int boot_cpuid = -1; |
| 85 | EXPORT_SYMBOL_GPL(boot_cpuid); |
| 86 | |
Benjamin Herrenschmidt | 33ec723 | 2017-01-08 17:31:43 -0600 | [diff] [blame] | 87 | /* |
| 88 | * These are used in binfmt_elf.c to put aux entries on the stack |
| 89 | * for each elf executable being started. |
| 90 | */ |
| 91 | int dcache_bsize; |
| 92 | int icache_bsize; |
| 93 | int ucache_bsize; |
| 94 | |
| 95 | |
Paul Mackerras | 49b0985 | 2005-11-10 15:53:40 +1100 | [diff] [blame] | 96 | unsigned long klimit = (unsigned long) _end; |
| 97 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 98 | /* |
| 99 | * This still seems to be needed... -- paulus |
| 100 | */ |
| 101 | struct screen_info screen_info = { |
| 102 | .orig_x = 0, |
| 103 | .orig_y = 25, |
| 104 | .orig_video_cols = 80, |
| 105 | .orig_video_lines = 25, |
| 106 | .orig_video_isVGA = 1, |
| 107 | .orig_video_points = 16 |
| 108 | }; |
Anton Blanchard | e1802b0 | 2014-08-20 08:00:02 +1000 | [diff] [blame] | 109 | #if defined(CONFIG_FB_VGA16_MODULE) |
| 110 | EXPORT_SYMBOL(screen_info); |
| 111 | #endif |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 112 | |
Martyn Welch | 540c6c3 | 2010-05-24 22:09:16 +0000 | [diff] [blame] | 113 | /* Variables required to store legacy IO irq routing */ |
| 114 | int of_i8042_kbd_irq; |
Grant Likely | ee11006 | 2010-08-06 20:49:20 -0600 | [diff] [blame] | 115 | EXPORT_SYMBOL_GPL(of_i8042_kbd_irq); |
Martyn Welch | 540c6c3 | 2010-05-24 22:09:16 +0000 | [diff] [blame] | 116 | int of_i8042_aux_irq; |
Grant Likely | ee11006 | 2010-08-06 20:49:20 -0600 | [diff] [blame] | 117 | EXPORT_SYMBOL_GPL(of_i8042_aux_irq); |
Martyn Welch | 540c6c3 | 2010-05-24 22:09:16 +0000 | [diff] [blame] | 118 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 119 | #ifdef __DO_IRQ_CANON |
| 120 | /* XXX should go elsewhere eventually */ |
| 121 | int ppc_do_canonicalize_irqs; |
| 122 | EXPORT_SYMBOL(ppc_do_canonicalize_irqs); |
| 123 | #endif |
| 124 | |
Hari Bathini | 22bd017 | 2017-05-08 15:56:24 -0700 | [diff] [blame] | 125 | #ifdef CONFIG_CRASH_CORE |
| 126 | /* This keeps a track of which one is the crashing cpu. */ |
| 127 | int crashing_cpu = -1; |
| 128 | #endif |
| 129 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 130 | /* also used by kexec */ |
| 131 | void machine_shutdown(void) |
| 132 | { |
Mahesh Salgaonkar | 67b43b9 | 2012-02-16 01:15:15 +0000 | [diff] [blame] | 133 | /* |
| 134 | * if fadump is active, cleanup the fadump registration before we |
| 135 | * shutdown. |
| 136 | */ |
| 137 | fadump_cleanup(); |
Mahesh Salgaonkar | 67b43b9 | 2012-02-16 01:15:15 +0000 | [diff] [blame] | 138 | |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 139 | if (ppc_md.machine_shutdown) |
| 140 | ppc_md.machine_shutdown(); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 141 | } |
| 142 | |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 143 | static void machine_hang(void) |
| 144 | { |
| 145 | pr_emerg("System Halted, OK to turn off power\n"); |
| 146 | local_irq_disable(); |
| 147 | while (1) |
| 148 | ; |
| 149 | } |
| 150 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 151 | void machine_restart(char *cmd) |
| 152 | { |
| 153 | machine_shutdown(); |
Kumar Gala | b8e383d | 2006-01-13 10:15:17 -0600 | [diff] [blame] | 154 | if (ppc_md.restart) |
| 155 | ppc_md.restart(cmd); |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 156 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 157 | smp_send_stop(); |
Andrey Smirnov | ad24747 | 2016-07-28 16:07:17 -0700 | [diff] [blame] | 158 | |
| 159 | do_kernel_restart(cmd); |
| 160 | mdelay(1000); |
| 161 | |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 162 | machine_hang(); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | void machine_power_off(void) |
| 166 | { |
| 167 | machine_shutdown(); |
Alexander Graf | 9178ba2 | 2014-10-13 16:01:09 +0200 | [diff] [blame] | 168 | if (pm_power_off) |
| 169 | pm_power_off(); |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 170 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 171 | smp_send_stop(); |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 172 | machine_hang(); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 173 | } |
| 174 | /* Used by the G5 thermal driver */ |
| 175 | EXPORT_SYMBOL_GPL(machine_power_off); |
| 176 | |
Alexander Graf | 9178ba2 | 2014-10-13 16:01:09 +0200 | [diff] [blame] | 177 | void (*pm_power_off)(void); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 178 | EXPORT_SYMBOL_GPL(pm_power_off); |
| 179 | |
| 180 | void machine_halt(void) |
| 181 | { |
| 182 | machine_shutdown(); |
Kumar Gala | b8e383d | 2006-01-13 10:15:17 -0600 | [diff] [blame] | 183 | if (ppc_md.halt) |
| 184 | ppc_md.halt(); |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 185 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 186 | smp_send_stop(); |
Andrey Smirnov | d0d738a | 2016-07-28 16:07:16 -0700 | [diff] [blame] | 187 | machine_hang(); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 188 | } |
| 189 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 190 | #ifdef CONFIG_SMP |
Tejun Heo | 6b7487f | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 191 | DEFINE_PER_CPU(unsigned int, cpu_pvr); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 192 | #endif |
| 193 | |
Anton Blanchard | 2c2df03 | 2010-04-26 15:32:39 +0000 | [diff] [blame] | 194 | static void show_cpuinfo_summary(struct seq_file *m) |
| 195 | { |
| 196 | struct device_node *root; |
| 197 | const char *model = NULL; |
Anton Blanchard | 2c2df03 | 2010-04-26 15:32:39 +0000 | [diff] [blame] | 198 | unsigned long bogosum = 0; |
| 199 | int i; |
Christophe Leroy | 65184f2 | 2019-03-22 08:08:45 +0000 | [diff] [blame] | 200 | |
| 201 | if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) { |
| 202 | for_each_online_cpu(i) |
| 203 | bogosum += loops_per_jiffy; |
| 204 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", |
| 205 | bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100); |
| 206 | } |
Anton Blanchard | 2c2df03 | 2010-04-26 15:32:39 +0000 | [diff] [blame] | 207 | seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); |
| 208 | if (ppc_md.name) |
| 209 | seq_printf(m, "platform\t: %s\n", ppc_md.name); |
| 210 | root = of_find_node_by_path("/"); |
| 211 | if (root) |
| 212 | model = of_get_property(root, "model", NULL); |
| 213 | if (model) |
| 214 | seq_printf(m, "model\t\t: %s\n", model); |
| 215 | of_node_put(root); |
| 216 | |
| 217 | if (ppc_md.show_cpuinfo != NULL) |
| 218 | ppc_md.show_cpuinfo(m); |
| 219 | |
Anton Blanchard | 2c2df03 | 2010-04-26 15:32:39 +0000 | [diff] [blame] | 220 | /* Display the amount of memory */ |
Christophe Leroy | 65184f2 | 2019-03-22 08:08:45 +0000 | [diff] [blame] | 221 | if (IS_ENABLED(CONFIG_PPC32)) |
| 222 | seq_printf(m, "Memory\t\t: %d MB\n", |
| 223 | (unsigned int)(total_memory / (1024 * 1024))); |
Anton Blanchard | 2c2df03 | 2010-04-26 15:32:39 +0000 | [diff] [blame] | 224 | } |
| 225 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 226 | static int show_cpuinfo(struct seq_file *m, void *v) |
| 227 | { |
| 228 | unsigned long cpu_id = (unsigned long)v - 1; |
| 229 | unsigned int pvr; |
Gautham R. Shenoy | 2299d03 | 2014-03-11 17:01:18 +0530 | [diff] [blame] | 230 | unsigned long proc_freq; |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 231 | unsigned short maj; |
| 232 | unsigned short min; |
| 233 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 234 | #ifdef CONFIG_SMP |
Tejun Heo | 6b7487f | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 235 | pvr = per_cpu(cpu_pvr, cpu_id); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 236 | #else |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 237 | pvr = mfspr(SPRN_PVR); |
| 238 | #endif |
| 239 | maj = (pvr >> 8) & 0xFF; |
| 240 | min = pvr & 0xFF; |
| 241 | |
| 242 | seq_printf(m, "processor\t: %lu\n", cpu_id); |
| 243 | seq_printf(m, "cpu\t\t: "); |
| 244 | |
Nicholas Piggin | 75bda95 | 2017-05-09 13:17:08 +1000 | [diff] [blame] | 245 | if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name) |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 246 | seq_printf(m, "%s", cur_cpu_spec->cpu_name); |
| 247 | else |
| 248 | seq_printf(m, "unknown (%08x)", pvr); |
| 249 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 250 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 251 | seq_printf(m, ", altivec supported"); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 252 | |
| 253 | seq_printf(m, "\n"); |
| 254 | |
| 255 | #ifdef CONFIG_TAU |
Christophe Leroy | 48018e4 | 2019-03-22 08:08:44 +0000 | [diff] [blame] | 256 | if (cpu_has_feature(CPU_FTR_TAU)) { |
| 257 | if (IS_ENABLED(CONFIG_TAU_AVERAGE)) { |
| 258 | /* more straightforward, but potentially misleading */ |
| 259 | seq_printf(m, "temperature \t: %u C (uncalibrated)\n", |
| 260 | cpu_temp(cpu_id)); |
| 261 | } else { |
| 262 | /* show the actual temp sensor range */ |
| 263 | u32 temp; |
| 264 | temp = cpu_temp_both(cpu_id); |
| 265 | seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", |
| 266 | temp & 0xff, temp >> 16); |
| 267 | } |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 268 | } |
| 269 | #endif /* CONFIG_TAU */ |
| 270 | |
| 271 | /* |
Gautham R. Shenoy | 2299d03 | 2014-03-11 17:01:18 +0530 | [diff] [blame] | 272 | * Platforms that have variable clock rates, should implement |
| 273 | * the method ppc_md.get_proc_freq() that reports the clock |
| 274 | * rate of a given cpu. The rest can use ppc_proc_freq to |
| 275 | * report the clock rate that is same across all cpus. |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 276 | */ |
Gautham R. Shenoy | 2299d03 | 2014-03-11 17:01:18 +0530 | [diff] [blame] | 277 | if (ppc_md.get_proc_freq) |
| 278 | proc_freq = ppc_md.get_proc_freq(cpu_id); |
| 279 | else |
| 280 | proc_freq = ppc_proc_freq; |
| 281 | |
| 282 | if (proc_freq) |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 283 | seq_printf(m, "clock\t\t: %lu.%06luMHz\n", |
Gautham R. Shenoy | 2299d03 | 2014-03-11 17:01:18 +0530 | [diff] [blame] | 284 | proc_freq / 1000000, proc_freq % 1000000); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 285 | |
| 286 | if (ppc_md.show_percpuinfo != NULL) |
| 287 | ppc_md.show_percpuinfo(m, cpu_id); |
| 288 | |
| 289 | /* If we are a Freescale core do a simple check so |
| 290 | * we dont have to keep adding cases in the future */ |
| 291 | if (PVR_VER(pvr) & 0x8000) { |
Martin Langer | a501d8f | 2008-09-07 17:51:32 +1000 | [diff] [blame] | 292 | switch (PVR_VER(pvr)) { |
| 293 | case 0x8000: /* 7441/7450/7451, Voyager */ |
| 294 | case 0x8001: /* 7445/7455, Apollo 6 */ |
| 295 | case 0x8002: /* 7447/7457, Apollo 7 */ |
| 296 | case 0x8003: /* 7447A, Apollo 7 PM */ |
| 297 | case 0x8004: /* 7448, Apollo 8 */ |
| 298 | case 0x800c: /* 7410, Nitro */ |
| 299 | maj = ((pvr >> 8) & 0xF); |
| 300 | min = PVR_MIN(pvr); |
| 301 | break; |
| 302 | default: /* e500/book-e */ |
| 303 | maj = PVR_MAJ(pvr); |
| 304 | min = PVR_MIN(pvr); |
| 305 | break; |
| 306 | } |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 307 | } else { |
| 308 | switch (PVR_VER(pvr)) { |
| 309 | case 0x0020: /* 403 family */ |
| 310 | maj = PVR_MAJ(pvr) + 1; |
| 311 | min = PVR_MIN(pvr); |
| 312 | break; |
| 313 | case 0x1008: /* 740P/750P ?? */ |
| 314 | maj = ((pvr >> 8) & 0xFF) - 1; |
| 315 | min = pvr & 0xFF; |
| 316 | break; |
Michael Neuling | 64ebb9a | 2017-06-15 11:53:16 +1000 | [diff] [blame] | 317 | case 0x004e: /* POWER9 bits 12-15 give chip type */ |
| 318 | maj = (pvr >> 8) & 0x0F; |
| 319 | min = pvr & 0xFF; |
| 320 | break; |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 321 | default: |
| 322 | maj = (pvr >> 8) & 0xFF; |
| 323 | min = pvr & 0xFF; |
| 324 | break; |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", |
| 329 | maj, min, PVR_VER(pvr), PVR_REV(pvr)); |
| 330 | |
Christophe Leroy | 65184f2 | 2019-03-22 08:08:45 +0000 | [diff] [blame] | 331 | if (IS_ENABLED(CONFIG_PPC32)) |
| 332 | seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ), |
| 333 | (loops_per_jiffy / (5000 / HZ)) % 100); |
| 334 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 335 | seq_printf(m, "\n"); |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 336 | |
Anton Blanchard | e6532c6 | 2010-04-26 15:32:40 +0000 | [diff] [blame] | 337 | /* If this is the last cpu, print the summary */ |
| 338 | if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) |
| 339 | show_cpuinfo_summary(m); |
| 340 | |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | static void *c_start(struct seq_file *m, loff_t *pos) |
| 345 | { |
Anton Blanchard | e6532c6 | 2010-04-26 15:32:40 +0000 | [diff] [blame] | 346 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
| 347 | *pos = cpumask_first(cpu_online_mask); |
| 348 | else |
| 349 | *pos = cpumask_next(*pos - 1, cpu_online_mask); |
| 350 | if ((*pos) < nr_cpu_ids) |
| 351 | return (void *)(unsigned long)(*pos + 1); |
| 352 | return NULL; |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
| 356 | { |
Anton Blanchard | e6532c6 | 2010-04-26 15:32:40 +0000 | [diff] [blame] | 357 | (*pos)++; |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 358 | return c_start(m, pos); |
| 359 | } |
| 360 | |
| 361 | static void c_stop(struct seq_file *m, void *v) |
| 362 | { |
| 363 | } |
| 364 | |
James Morris | 88e9d34 | 2009-09-22 16:43:43 -0700 | [diff] [blame] | 365 | const struct seq_operations cpuinfo_op = { |
Benjamin Herrenschmidt | fbadeb6 | 2018-01-10 17:10:14 +1100 | [diff] [blame] | 366 | .start = c_start, |
| 367 | .next = c_next, |
| 368 | .stop = c_stop, |
| 369 | .show = show_cpuinfo, |
Paul Mackerras | 03501da | 2005-10-26 17:11:18 +1000 | [diff] [blame] | 370 | }; |
| 371 | |
David Woodhouse | a82765b | 2005-11-02 22:34:20 +0000 | [diff] [blame] | 372 | void __init check_for_initrd(void) |
| 373 | { |
| 374 | #ifdef CONFIG_BLK_DEV_INITRD |
David Gibson | 30437b3 | 2007-02-28 14:12:29 +1100 | [diff] [blame] | 375 | DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n", |
| 376 | initrd_start, initrd_end); |
David Woodhouse | a82765b | 2005-11-02 22:34:20 +0000 | [diff] [blame] | 377 | |
| 378 | /* If we were passed an initrd, set the ROOT_DEV properly if the values |
| 379 | * look sensible. If not, clear initrd reference. |
| 380 | */ |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 381 | if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && |
David Woodhouse | a82765b | 2005-11-02 22:34:20 +0000 | [diff] [blame] | 382 | initrd_end > initrd_start) |
| 383 | ROOT_DEV = Root_RAM0; |
David Woodhouse | 6761c4a | 2005-11-11 08:07:11 +0000 | [diff] [blame] | 384 | else |
David Woodhouse | a82765b | 2005-11-02 22:34:20 +0000 | [diff] [blame] | 385 | initrd_start = initrd_end = 0; |
David Woodhouse | a82765b | 2005-11-02 22:34:20 +0000 | [diff] [blame] | 386 | |
| 387 | if (initrd_start) |
Anton Blanchard | a7696b3 | 2014-09-17 14:39:39 +1000 | [diff] [blame] | 388 | pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); |
David Woodhouse | a82765b | 2005-11-02 22:34:20 +0000 | [diff] [blame] | 389 | |
| 390 | DBG(" <- check_for_initrd()\n"); |
| 391 | #endif /* CONFIG_BLK_DEV_INITRD */ |
| 392 | } |
| 393 | |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 394 | #ifdef CONFIG_SMP |
| 395 | |
Nicholas Piggin | 10d9161 | 2019-04-13 00:30:52 +1000 | [diff] [blame] | 396 | int threads_per_core, threads_per_subcore, threads_shift __read_mostly; |
| 397 | cpumask_t threads_core_mask __read_mostly; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 398 | EXPORT_SYMBOL_GPL(threads_per_core); |
Michael Ellerman | 5853aef | 2014-05-23 18:15:27 +1000 | [diff] [blame] | 399 | EXPORT_SYMBOL_GPL(threads_per_subcore); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 400 | EXPORT_SYMBOL_GPL(threads_shift); |
| 401 | EXPORT_SYMBOL_GPL(threads_core_mask); |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 402 | |
| 403 | static void __init cpu_init_thread_core_maps(int tpc) |
| 404 | { |
| 405 | int i; |
| 406 | |
| 407 | threads_per_core = tpc; |
Michael Ellerman | 5853aef | 2014-05-23 18:15:27 +1000 | [diff] [blame] | 408 | threads_per_subcore = tpc; |
KOSAKI Motohiro | 104699c | 2011-04-28 05:07:23 +0000 | [diff] [blame] | 409 | cpumask_clear(&threads_core_mask); |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 410 | |
| 411 | /* This implementation only supports power of 2 number of threads |
| 412 | * for simplicity and performance |
| 413 | */ |
| 414 | threads_shift = ilog2(tpc); |
| 415 | BUG_ON(tpc != (1 << threads_shift)); |
| 416 | |
| 417 | for (i = 0; i < tpc; i++) |
KOSAKI Motohiro | 104699c | 2011-04-28 05:07:23 +0000 | [diff] [blame] | 418 | cpumask_set_cpu(i, &threads_core_mask); |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 419 | |
| 420 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", |
| 421 | tpc, tpc > 1 ? "s" : ""); |
| 422 | printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); |
| 423 | } |
| 424 | |
| 425 | |
Nicholas Piggin | 9f593f1 | 2018-02-14 01:08:18 +1000 | [diff] [blame] | 426 | u32 *cpu_to_phys_id = NULL; |
| 427 | |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 428 | /** |
| 429 | * setup_cpu_maps - initialize the following cpu maps: |
Anton Blanchard | 828a698 | 2010-04-26 15:32:44 +0000 | [diff] [blame] | 430 | * cpu_possible_mask |
| 431 | * cpu_present_mask |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 432 | * |
| 433 | * Having the possible map set up early allows us to restrict allocations |
Milton Miller | 8657ae2 | 2011-05-10 19:28:48 +0000 | [diff] [blame] | 434 | * of things like irqstacks to nr_cpu_ids rather than NR_CPUS. |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 435 | * |
| 436 | * We do not initialize the online map here; cpus set their own bits in |
Anton Blanchard | 828a698 | 2010-04-26 15:32:44 +0000 | [diff] [blame] | 437 | * cpu_online_mask as they come up. |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 438 | * |
| 439 | * This function is valid only for Open Firmware systems. finish_device_tree |
| 440 | * must be called before using this. |
| 441 | * |
| 442 | * While we're here, we may as well set the "physical" cpu ids in the paca. |
Anton Blanchard | 4df2046 | 2006-03-25 17:25:17 +1100 | [diff] [blame] | 443 | * |
| 444 | * NOTE: This must match the parsing done in early_init_dt_scan_cpus. |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 445 | */ |
| 446 | void __init smp_setup_cpu_maps(void) |
| 447 | { |
Dmitry Torokhov | 9625e69 | 2017-01-31 17:54:38 -0800 | [diff] [blame] | 448 | struct device_node *dn; |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 449 | int cpu = 0; |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 450 | int nthreads = 1; |
| 451 | |
| 452 | DBG("smp_setup_cpu_maps()\n"); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 453 | |
Mike Rapoport | b63a07d | 2019-03-07 16:31:06 -0800 | [diff] [blame] | 454 | cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), |
| 455 | __alignof__(u32)); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 456 | if (!cpu_to_phys_id) |
| 457 | panic("%s: Failed to allocate %zu bytes align=0x%zx\n", |
| 458 | __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); |
Nicholas Piggin | 9f593f1 | 2018-02-14 01:08:18 +1000 | [diff] [blame] | 459 | |
Dmitry Torokhov | 9625e69 | 2017-01-31 17:54:38 -0800 | [diff] [blame] | 460 | for_each_node_by_type(dn, "cpu") { |
Anton Blanchard | ac13282 | 2013-08-07 02:01:32 +1000 | [diff] [blame] | 461 | const __be32 *intserv; |
Alistair Popple | 43f8812 | 2013-08-07 02:01:33 +1000 | [diff] [blame] | 462 | __be32 cpu_be; |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 463 | int j, len; |
| 464 | |
Rob Herring | b7c670d | 2017-08-21 10:16:47 -0500 | [diff] [blame] | 465 | DBG(" * %pOF...\n", dn); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 466 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 467 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", |
| 468 | &len); |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 469 | if (intserv) { |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 470 | DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", |
| 471 | nthreads); |
| 472 | } else { |
| 473 | DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 474 | intserv = of_get_property(dn, "reg", &len); |
Alistair Popple | 43f8812 | 2013-08-07 02:01:33 +1000 | [diff] [blame] | 475 | if (!intserv) { |
| 476 | cpu_be = cpu_to_be32(cpu); |
Nicholas Piggin | 9f593f1 | 2018-02-14 01:08:18 +1000 | [diff] [blame] | 477 | /* XXX: what is this? uninitialized?? */ |
Alistair Popple | 43f8812 | 2013-08-07 02:01:33 +1000 | [diff] [blame] | 478 | intserv = &cpu_be; /* assume logical == phys */ |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 479 | len = 4; |
Alistair Popple | 43f8812 | 2013-08-07 02:01:33 +1000 | [diff] [blame] | 480 | } |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 481 | } |
| 482 | |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 483 | nthreads = len / sizeof(int); |
| 484 | |
Milton Miller | 8657ae2 | 2011-05-10 19:28:48 +0000 | [diff] [blame] | 485 | for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { |
Scott Wood | 6663a4f | 2014-06-24 20:15:51 -0500 | [diff] [blame] | 486 | bool avail; |
| 487 | |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 488 | DBG(" thread %d -> cpu %d (hard id %d)\n", |
Anton Blanchard | ac13282 | 2013-08-07 02:01:32 +1000 | [diff] [blame] | 489 | j, cpu, be32_to_cpu(intserv[j])); |
Scott Wood | 6663a4f | 2014-06-24 20:15:51 -0500 | [diff] [blame] | 490 | |
| 491 | avail = of_device_is_available(dn); |
| 492 | if (!avail) |
| 493 | avail = !of_property_match_string(dn, |
| 494 | "enable-method", "spin-table"); |
| 495 | |
| 496 | set_cpu_present(cpu, avail); |
Rusty Russell | ea0f1ca | 2009-09-24 09:34:48 -0600 | [diff] [blame] | 497 | set_cpu_possible(cpu, true); |
Nicholas Piggin | 9f593f1 | 2018-02-14 01:08:18 +1000 | [diff] [blame] | 498 | cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 499 | cpu++; |
| 500 | } |
Dmitry Torokhov | 9625e69 | 2017-01-31 17:54:38 -0800 | [diff] [blame] | 501 | |
| 502 | if (cpu >= nr_cpu_ids) { |
| 503 | of_node_put(dn); |
| 504 | break; |
| 505 | } |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 506 | } |
| 507 | |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 508 | /* If no SMT supported, nthreads is forced to 1 */ |
| 509 | if (!cpu_has_feature(CPU_FTR_SMT)) { |
| 510 | DBG(" SMT disabled ! nthreads forced to 1\n"); |
| 511 | nthreads = 1; |
| 512 | } |
| 513 | |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 514 | #ifdef CONFIG_PPC64 |
| 515 | /* |
| 516 | * On pSeries LPAR, we need to know how many cpus |
| 517 | * could possibly be added to this partition. |
| 518 | */ |
Benjamin Herrenschmidt | 0f2b344 | 2016-07-05 15:03:55 +1000 | [diff] [blame] | 519 | if (firmware_has_feature(FW_FEATURE_LPAR) && |
Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 520 | (dn = of_find_node_by_path("/rtas"))) { |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 521 | int num_addr_cell, num_size_cell, maxcpus; |
Anton Blanchard | 01666c8 | 2013-12-12 15:59:35 +1100 | [diff] [blame] | 522 | const __be32 *ireg; |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 523 | |
Stephen Rothwell | a8bda5d | 2007-04-03 10:56:50 +1000 | [diff] [blame] | 524 | num_addr_cell = of_n_addr_cells(dn); |
Stephen Rothwell | 9213fee | 2007-04-03 10:57:48 +1000 | [diff] [blame] | 525 | num_size_cell = of_n_size_cells(dn); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 526 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 527 | ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 528 | |
| 529 | if (!ireg) |
| 530 | goto out; |
| 531 | |
Anton Blanchard | 01666c8 | 2013-12-12 15:59:35 +1100 | [diff] [blame] | 532 | maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 533 | |
| 534 | /* Double maxcpus for processors which have SMT capability */ |
| 535 | if (cpu_has_feature(CPU_FTR_SMT)) |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 536 | maxcpus *= nthreads; |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 537 | |
Milton Miller | 8657ae2 | 2011-05-10 19:28:48 +0000 | [diff] [blame] | 538 | if (maxcpus > nr_cpu_ids) { |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 539 | printk(KERN_WARNING |
| 540 | "Partition configured for %d cpus, " |
Alexey Dobriyan | 9b130ad | 2017-09-08 16:14:18 -0700 | [diff] [blame] | 541 | "operating system maximum is %u.\n", |
Milton Miller | 8657ae2 | 2011-05-10 19:28:48 +0000 | [diff] [blame] | 542 | maxcpus, nr_cpu_ids); |
| 543 | maxcpus = nr_cpu_ids; |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 544 | } else |
| 545 | printk(KERN_INFO "Partition configured for %d cpus.\n", |
| 546 | maxcpus); |
| 547 | |
| 548 | for (cpu = 0; cpu < maxcpus; cpu++) |
Rusty Russell | ea0f1ca | 2009-09-24 09:34:48 -0600 | [diff] [blame] | 549 | set_cpu_possible(cpu, true); |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 550 | out: |
| 551 | of_node_put(dn); |
| 552 | } |
Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 553 | vdso_data->processorCount = num_present_cpus(); |
| 554 | #endif /* CONFIG_PPC64 */ |
Benjamin Herrenschmidt | 8d08908 | 2007-10-25 15:27:44 +1000 | [diff] [blame] | 555 | |
| 556 | /* Initialize CPU <=> thread mapping/ |
| 557 | * |
| 558 | * WARNING: We assume that the number of threads is the same for |
| 559 | * every CPU in the system. If that is not the case, then some code |
| 560 | * here will have to be reworked |
| 561 | */ |
| 562 | cpu_init_thread_core_maps(nthreads); |
Michael Ellerman | 1426d5a | 2010-01-28 13:23:22 +0000 | [diff] [blame] | 563 | |
Ryan Grimm | c1854e0 | 2011-03-31 19:33:02 +0000 | [diff] [blame] | 564 | /* Now that possible cpus are set, set nr_cpu_ids for later use */ |
Milton Miller | aa79bc2 | 2011-05-10 19:28:55 +0000 | [diff] [blame] | 565 | setup_nr_cpu_ids(); |
Ryan Grimm | c1854e0 | 2011-03-31 19:33:02 +0000 | [diff] [blame] | 566 | |
Michael Ellerman | 1426d5a | 2010-01-28 13:23:22 +0000 | [diff] [blame] | 567 | free_unused_pacas(); |
Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 568 | } |
Paul Mackerras | 5ad5707 | 2005-11-05 10:33:55 +1100 | [diff] [blame] | 569 | #endif /* CONFIG_SMP */ |
Paul Mackerras | fca5dcd | 2005-11-08 22:55:08 +1100 | [diff] [blame] | 570 | |
Emil Medve | d33b78d | 2008-05-23 08:40:16 +1000 | [diff] [blame] | 571 | #ifdef CONFIG_PCSPKR_PLATFORM |
Michael Neuling | e5c6c8e | 2006-03-14 00:11:50 -0500 | [diff] [blame] | 572 | static __init int add_pcspkr(void) |
| 573 | { |
| 574 | struct device_node *np; |
| 575 | struct platform_device *pd; |
| 576 | int ret; |
| 577 | |
| 578 | np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); |
| 579 | of_node_put(np); |
| 580 | if (!np) |
| 581 | return -ENODEV; |
| 582 | |
| 583 | pd = platform_device_alloc("pcspkr", -1); |
| 584 | if (!pd) |
| 585 | return -ENOMEM; |
| 586 | |
| 587 | ret = platform_device_add(pd); |
| 588 | if (ret) |
| 589 | platform_device_put(pd); |
| 590 | |
| 591 | return ret; |
| 592 | } |
| 593 | device_initcall(add_pcspkr); |
Emil Medve | d33b78d | 2008-05-23 08:40:16 +1000 | [diff] [blame] | 594 | #endif /* CONFIG_PCSPKR_PLATFORM */ |
Dmitry Torokhov | 95d465f | 2006-04-02 00:08:05 -0500 | [diff] [blame] | 595 | |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 596 | void probe_machine(void) |
| 597 | { |
| 598 | extern struct machdep_calls __machine_desc_start; |
| 599 | extern struct machdep_calls __machine_desc_end; |
Benjamin Herrenschmidt | 84b62c7 | 2016-07-05 15:03:59 +1000 | [diff] [blame] | 600 | unsigned int i; |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 601 | |
| 602 | /* |
| 603 | * Iterate all ppc_md structures until we find the proper |
| 604 | * one for the current machine type |
| 605 | */ |
| 606 | DBG("Probing machine type ...\n"); |
| 607 | |
Benjamin Herrenschmidt | 84b62c7 | 2016-07-05 15:03:59 +1000 | [diff] [blame] | 608 | /* |
| 609 | * Check ppc_md is empty, if not we have a bug, ie, we setup an |
| 610 | * entry before probe_machine() which will be overwritten |
| 611 | */ |
| 612 | for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) { |
| 613 | if (((void **)&ppc_md)[i]) { |
| 614 | printk(KERN_ERR "Entry %d in ppc_md non empty before" |
| 615 | " machine probe !\n", i); |
| 616 | } |
| 617 | } |
| 618 | |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 619 | for (machine_id = &__machine_desc_start; |
| 620 | machine_id < &__machine_desc_end; |
| 621 | machine_id++) { |
| 622 | DBG(" %s ...", machine_id->name); |
| 623 | memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); |
| 624 | if (ppc_md.probe()) { |
| 625 | DBG(" match !\n"); |
| 626 | break; |
| 627 | } |
| 628 | DBG("\n"); |
| 629 | } |
| 630 | /* What can we do if we didn't find ? */ |
| 631 | if (machine_id >= &__machine_desc_end) { |
Christophe Leroy | e995265 | 2018-12-18 06:53:41 +0000 | [diff] [blame] | 632 | pr_err("No suitable machine description found !\n"); |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 633 | for (;;); |
| 634 | } |
| 635 | |
| 636 | printk(KERN_INFO "Using %s machine description\n", ppc_md.name); |
| 637 | } |
David Woodhouse | 1269277a | 2006-04-24 23:22:17 +0100 | [diff] [blame] | 638 | |
Olaf Hering | 8d8a024 | 2007-04-26 06:36:56 +1000 | [diff] [blame] | 639 | /* Match a class of boards, not a specific device configuration. */ |
David Woodhouse | 1269277a | 2006-04-24 23:22:17 +0100 | [diff] [blame] | 640 | int check_legacy_ioport(unsigned long base_port) |
| 641 | { |
Olaf Hering | 8d8a024 | 2007-04-26 06:36:56 +1000 | [diff] [blame] | 642 | struct device_node *parent, *np = NULL; |
| 643 | int ret = -ENODEV; |
| 644 | |
| 645 | switch(base_port) { |
| 646 | case I8042_DATA_REG: |
Wade Farnsworth | db0dbae | 2007-06-20 10:15:10 +1000 | [diff] [blame] | 647 | if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303"))) |
| 648 | np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); |
| 649 | if (np) { |
| 650 | parent = of_get_parent(np); |
Martyn Welch | 540c6c3 | 2010-05-24 22:09:16 +0000 | [diff] [blame] | 651 | |
| 652 | of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0); |
| 653 | if (!of_i8042_kbd_irq) |
| 654 | of_i8042_kbd_irq = 1; |
| 655 | |
| 656 | of_i8042_aux_irq = irq_of_parse_and_map(parent, 1); |
| 657 | if (!of_i8042_aux_irq) |
| 658 | of_i8042_aux_irq = 12; |
| 659 | |
Wade Farnsworth | db0dbae | 2007-06-20 10:15:10 +1000 | [diff] [blame] | 660 | of_node_put(np); |
| 661 | np = parent; |
| 662 | break; |
| 663 | } |
Olaf Hering | 8d8a024 | 2007-04-26 06:36:56 +1000 | [diff] [blame] | 664 | np = of_find_node_by_type(NULL, "8042"); |
Alan Curry | f5d834f | 2007-07-25 11:28:32 +1000 | [diff] [blame] | 665 | /* Pegasos has no device_type on its 8042 node, look for the |
| 666 | * name instead */ |
| 667 | if (!np) |
| 668 | np = of_find_node_by_name(NULL, "8042"); |
Gabriel Paubert | 2c78027 | 2011-05-13 01:03:13 +0000 | [diff] [blame] | 669 | if (np) { |
| 670 | of_i8042_kbd_irq = 1; |
| 671 | of_i8042_aux_irq = 12; |
| 672 | } |
Olaf Hering | 8d8a024 | 2007-04-26 06:36:56 +1000 | [diff] [blame] | 673 | break; |
| 674 | case FDC_BASE: /* FDC1 */ |
| 675 | np = of_find_node_by_type(NULL, "fdc"); |
| 676 | break; |
Olaf Hering | 8d8a024 | 2007-04-26 06:36:56 +1000 | [diff] [blame] | 677 | default: |
| 678 | /* ipmi is supposed to fail here */ |
| 679 | break; |
| 680 | } |
| 681 | if (!np) |
| 682 | return ret; |
| 683 | parent = of_get_parent(np); |
| 684 | if (parent) { |
Rob Herring | e5480bd | 2018-11-16 16:11:00 -0600 | [diff] [blame] | 685 | if (of_node_is_type(parent, "isa")) |
Olaf Hering | 8d8a024 | 2007-04-26 06:36:56 +1000 | [diff] [blame] | 686 | ret = 0; |
| 687 | of_node_put(parent); |
| 688 | } |
| 689 | of_node_put(np); |
| 690 | return ret; |
David Woodhouse | 1269277a | 2006-04-24 23:22:17 +0100 | [diff] [blame] | 691 | } |
| 692 | EXPORT_SYMBOL(check_legacy_ioport); |
Kumar Gala | 7e99026 | 2006-05-05 00:02:08 -0500 | [diff] [blame] | 693 | |
David Gibson | ab9dbf7 | 2017-12-04 16:27:25 +1100 | [diff] [blame] | 694 | static int ppc_panic_event(struct notifier_block *this, |
| 695 | unsigned long event, void *ptr) |
| 696 | { |
| 697 | /* |
Nicholas Piggin | 855b623 | 2018-05-19 14:35:54 +1000 | [diff] [blame] | 698 | * panic does a local_irq_disable, but we really |
| 699 | * want interrupts to be hard disabled. |
| 700 | */ |
| 701 | hard_irq_disable(); |
| 702 | |
| 703 | /* |
David Gibson | ab9dbf7 | 2017-12-04 16:27:25 +1100 | [diff] [blame] | 704 | * If firmware-assisted dump has been registered then trigger |
| 705 | * firmware-assisted dump and let firmware handle everything else. |
| 706 | */ |
| 707 | crash_fadump(NULL, ptr); |
Nicholas Piggin | 855b623 | 2018-05-19 14:35:54 +1000 | [diff] [blame] | 708 | if (ppc_md.panic) |
| 709 | ppc_md.panic(ptr); /* May not return */ |
David Gibson | ab9dbf7 | 2017-12-04 16:27:25 +1100 | [diff] [blame] | 710 | return NOTIFY_DONE; |
| 711 | } |
| 712 | |
| 713 | static struct notifier_block ppc_panic_block = { |
| 714 | .notifier_call = ppc_panic_event, |
| 715 | .priority = INT_MIN /* may not return; must be done last */ |
| 716 | }; |
| 717 | |
| 718 | void __init setup_panic(void) |
| 719 | { |
Nicholas Piggin | 855b623 | 2018-05-19 14:35:54 +1000 | [diff] [blame] | 720 | /* PPC64 always does a hard irq disable in its panic handler */ |
| 721 | if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic) |
David Gibson | ab9dbf7 | 2017-12-04 16:27:25 +1100 | [diff] [blame] | 722 | return; |
| 723 | atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); |
| 724 | } |
| 725 | |
Dale Farnsworth | 06cce43 | 2007-05-12 10:57:35 +1000 | [diff] [blame] | 726 | #ifdef CONFIG_CHECK_CACHE_COHERENCY |
| 727 | /* |
| 728 | * For platforms that have configurable cache-coherency. This function |
| 729 | * checks that the cache coherency setting of the kernel matches the setting |
| 730 | * left by the firmware, as indicated in the device tree. Since a mismatch |
| 731 | * will eventually result in DMA failures, we print * and error and call |
| 732 | * BUG() in that case. |
| 733 | */ |
| 734 | |
Christophe Leroy | b5064ef | 2019-03-22 08:08:43 +0000 | [diff] [blame] | 735 | #define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE)) |
Dale Farnsworth | 06cce43 | 2007-05-12 10:57:35 +1000 | [diff] [blame] | 736 | |
| 737 | static int __init check_cache_coherency(void) |
| 738 | { |
| 739 | struct device_node *np; |
| 740 | const void *prop; |
Christophe Leroy | b5064ef | 2019-03-22 08:08:43 +0000 | [diff] [blame] | 741 | bool devtree_coherency; |
Dale Farnsworth | 06cce43 | 2007-05-12 10:57:35 +1000 | [diff] [blame] | 742 | |
| 743 | np = of_find_node_by_path("/"); |
| 744 | prop = of_get_property(np, "coherency-off", NULL); |
| 745 | of_node_put(np); |
| 746 | |
Christophe Leroy | b5064ef | 2019-03-22 08:08:43 +0000 | [diff] [blame] | 747 | devtree_coherency = prop ? false : true; |
Dale Farnsworth | 06cce43 | 2007-05-12 10:57:35 +1000 | [diff] [blame] | 748 | |
| 749 | if (devtree_coherency != KERNEL_COHERENCY) { |
| 750 | printk(KERN_ERR |
| 751 | "kernel coherency:%s != device tree_coherency:%s\n", |
| 752 | KERNEL_COHERENCY ? "on" : "off", |
| 753 | devtree_coherency ? "on" : "off"); |
| 754 | BUG(); |
| 755 | } |
| 756 | |
| 757 | return 0; |
| 758 | } |
| 759 | |
| 760 | late_initcall(check_cache_coherency); |
| 761 | #endif /* CONFIG_CHECK_CACHE_COHERENCY */ |
Michael Ellerman | 94a3807 | 2007-06-20 10:54:19 +1000 | [diff] [blame] | 762 | |
| 763 | #ifdef CONFIG_DEBUG_FS |
| 764 | struct dentry *powerpc_debugfs_root; |
Anton Blanchard | 907b1f4 | 2009-10-26 18:52:24 +0000 | [diff] [blame] | 765 | EXPORT_SYMBOL(powerpc_debugfs_root); |
Michael Ellerman | 94a3807 | 2007-06-20 10:54:19 +1000 | [diff] [blame] | 766 | |
| 767 | static int powerpc_debugfs_init(void) |
| 768 | { |
| 769 | powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL); |
| 770 | |
| 771 | return powerpc_debugfs_root == NULL; |
| 772 | } |
| 773 | arch_initcall(powerpc_debugfs_init); |
| 774 | #endif |
Kumar Gala | d746286 | 2009-03-19 03:40:51 +0000 | [diff] [blame] | 775 | |
Dave Carroll | a9c0f41 | 2011-06-18 07:36:40 +0000 | [diff] [blame] | 776 | void ppc_printk_progress(char *s, unsigned short hex) |
Kumar Gala | d746286 | 2009-03-19 03:40:51 +0000 | [diff] [blame] | 777 | { |
Dave Carroll | a9c0f41 | 2011-06-18 07:36:40 +0000 | [diff] [blame] | 778 | pr_info("%s\n", s); |
Kumar Gala | d746286 | 2009-03-19 03:40:51 +0000 | [diff] [blame] | 779 | } |
| 780 | |
Kumar Gala | 314b02f | 2011-07-08 00:17:27 -0500 | [diff] [blame] | 781 | void arch_setup_pdev_archdata(struct platform_device *pdev) |
Kumar Gala | d746286 | 2009-03-19 03:40:51 +0000 | [diff] [blame] | 782 | { |
Kumar Gala | 314b02f | 2011-07-08 00:17:27 -0500 | [diff] [blame] | 783 | pdev->archdata.dma_mask = DMA_BIT_MASK(32); |
| 784 | pdev->dev.dma_mask = &pdev->archdata.dma_mask; |
Kumar Gala | d746286 | 2009-03-19 03:40:51 +0000 | [diff] [blame] | 785 | } |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 786 | |
| 787 | static __init void print_system_info(void) |
| 788 | { |
| 789 | pr_info("-----------------------------------------------------\n"); |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 790 | pr_info("phys_mem_size = 0x%llx\n", |
| 791 | (unsigned long long)memblock_phys_mem_size()); |
| 792 | |
| 793 | pr_info("dcache_bsize = 0x%x\n", dcache_bsize); |
| 794 | pr_info("icache_bsize = 0x%x\n", icache_bsize); |
| 795 | if (ucache_bsize != 0) |
| 796 | pr_info("ucache_bsize = 0x%x\n", ucache_bsize); |
| 797 | |
| 798 | pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); |
| 799 | pr_info(" possible = 0x%016lx\n", |
| 800 | (unsigned long)CPU_FTRS_POSSIBLE); |
| 801 | pr_info(" always = 0x%016lx\n", |
| 802 | (unsigned long)CPU_FTRS_ALWAYS); |
| 803 | pr_info("cpu_user_features = 0x%08x 0x%08x\n", |
| 804 | cur_cpu_spec->cpu_user_features, |
| 805 | cur_cpu_spec->cpu_user_features2); |
| 806 | pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); |
| 807 | #ifdef CONFIG_PPC64 |
| 808 | pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features); |
| 809 | #endif |
| 810 | |
Christophe Leroy | e4dccf9 | 2019-04-26 16:36:39 +0000 | [diff] [blame] | 811 | print_system_hash_info(); |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 812 | |
| 813 | if (PHYSICAL_START > 0) |
| 814 | pr_info("physical_start = 0x%llx\n", |
| 815 | (unsigned long long)PHYSICAL_START); |
| 816 | pr_info("-----------------------------------------------------\n"); |
| 817 | } |
| 818 | |
Nicholas Piggin | 59f5777 | 2018-02-14 01:08:19 +1000 | [diff] [blame] | 819 | #ifdef CONFIG_SMP |
| 820 | static void smp_setup_pacas(void) |
| 821 | { |
| 822 | int cpu; |
| 823 | |
| 824 | for_each_possible_cpu(cpu) { |
| 825 | if (cpu == smp_processor_id()) |
| 826 | continue; |
| 827 | allocate_paca(cpu); |
| 828 | set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]); |
| 829 | } |
| 830 | |
| 831 | memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32)); |
| 832 | cpu_to_phys_id = NULL; |
| 833 | } |
| 834 | #endif |
| 835 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 836 | /* |
| 837 | * Called into from start_kernel this initializes memblock, which is used |
| 838 | * to manage page allocation until mem_init is called. |
| 839 | */ |
| 840 | void __init setup_arch(char **cmdline_p) |
| 841 | { |
Christophe Leroy | 2edb16e | 2019-04-26 16:23:34 +0000 | [diff] [blame] | 842 | kasan_init(); |
| 843 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 844 | *cmdline_p = boot_command_line; |
| 845 | |
| 846 | /* Set a half-reasonable default so udelay does something sensible */ |
| 847 | loops_per_jiffy = 500000000 / HZ; |
| 848 | |
| 849 | /* Unflatten the device-tree passed by prom_init or kexec */ |
| 850 | unflatten_device_tree(); |
| 851 | |
| 852 | /* |
| 853 | * Initialize cache line/block info from device-tree (on ppc64) or |
| 854 | * just cputable (on ppc32). |
| 855 | */ |
| 856 | initialize_cache_info(); |
| 857 | |
| 858 | /* Initialize RTAS if available. */ |
| 859 | rtas_initialize(); |
| 860 | |
| 861 | /* Check if we have an initrd provided via the device-tree. */ |
| 862 | check_for_initrd(); |
| 863 | |
| 864 | /* Probe the machine type, establish ppc_md. */ |
| 865 | probe_machine(); |
| 866 | |
David Gibson | ab9dbf7 | 2017-12-04 16:27:25 +1100 | [diff] [blame] | 867 | /* Setup panic notifier if requested by the platform. */ |
| 868 | setup_panic(); |
| 869 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 870 | /* |
| 871 | * Configure ppc_md.power_save (ppc32 only, 64-bit machines do |
| 872 | * it from their respective probe() function. |
| 873 | */ |
| 874 | setup_power_save(); |
| 875 | |
| 876 | /* Discover standard serial ports. */ |
| 877 | find_legacy_serial_ports(); |
| 878 | |
| 879 | /* Register early console with the printk subsystem. */ |
| 880 | register_early_udbg_console(); |
| 881 | |
| 882 | /* Setup the various CPU maps based on the device-tree. */ |
| 883 | smp_setup_cpu_maps(); |
| 884 | |
| 885 | /* Initialize xmon. */ |
| 886 | xmon_setup(); |
| 887 | |
| 888 | /* Check the SMT related command line arguments (ppc64). */ |
| 889 | check_smt_enabled(); |
| 890 | |
Nicholas Piggin | 9bd9be0 | 2018-02-14 01:08:16 +1000 | [diff] [blame] | 891 | /* Parse memory topology */ |
| 892 | mem_topology_setup(); |
| 893 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 894 | /* |
| 895 | * Release secondary cpus out of their spinloops at 0x60 now that |
| 896 | * we can map physical -> logical CPU ids. |
| 897 | * |
| 898 | * Freescale Book3e parts spin in a loop provided by firmware, |
| 899 | * so smp_release_cpus() does nothing for them. |
| 900 | */ |
| 901 | #ifdef CONFIG_SMP |
Nicholas Piggin | 59f5777 | 2018-02-14 01:08:19 +1000 | [diff] [blame] | 902 | smp_setup_pacas(); |
Michael Ellerman | 1d0afc0 | 2018-03-31 20:57:10 +1100 | [diff] [blame] | 903 | |
| 904 | /* On BookE, setup per-core TLB data structures. */ |
| 905 | setup_tlb_core_data(); |
| 906 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 907 | smp_release_cpus(); |
| 908 | #endif |
| 909 | |
| 910 | /* Print various info about the machine that has been gathered so far. */ |
| 911 | print_system_info(); |
| 912 | |
| 913 | /* Reserve large chunks of memory for use by CMA for KVM. */ |
| 914 | kvm_cma_reserve(); |
| 915 | |
Christophe Leroy | ed1cd6d | 2019-01-31 10:08:58 +0000 | [diff] [blame] | 916 | klp_init_thread_info(&init_task); |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 917 | |
| 918 | init_mm.start_code = (unsigned long)_stext; |
| 919 | init_mm.end_code = (unsigned long) _etext; |
| 920 | init_mm.end_data = (unsigned long) _edata; |
| 921 | init_mm.brk = klimit; |
Aneesh Kumar K.V | 957b778 | 2017-03-22 09:06:58 +0530 | [diff] [blame] | 922 | |
Alexey Kardashevskiy | 88f54a3 | 2016-11-30 17:51:59 +1100 | [diff] [blame] | 923 | mm_iommu_init(&init_mm); |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 924 | irqstack_early_init(); |
| 925 | exc_lvl_early_init(); |
| 926 | emergency_stack_init(); |
| 927 | |
| 928 | initmem_init(); |
| 929 | |
Christophe Leroy | d90fe2a | 2018-09-28 15:39:20 +0000 | [diff] [blame] | 930 | early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); |
| 931 | |
Christophe Leroy | 65184f2 | 2019-03-22 08:08:45 +0000 | [diff] [blame] | 932 | if (IS_ENABLED(CONFIG_DUMMY_CONSOLE)) |
| 933 | conswitchp = &dummy_con; |
| 934 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 935 | if (ppc_md.setup_arch) |
| 936 | ppc_md.setup_arch(); |
| 937 | |
Michael Ellerman | af375ee | 2018-07-28 09:06:35 +1000 | [diff] [blame] | 938 | setup_barrier_nospec(); |
Diana Craciun | 3bc8ea8 | 2018-12-12 16:03:08 +0200 | [diff] [blame] | 939 | setup_spectre_v2(); |
Michael Ellerman | af375ee | 2018-07-28 09:06:35 +1000 | [diff] [blame] | 940 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 941 | paging_init(); |
| 942 | |
| 943 | /* Initialize the MMU context management stuff. */ |
| 944 | mmu_context_init(); |
| 945 | |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 946 | /* Interrupt code needs to be 64K-aligned. */ |
Christophe Leroy | 65184f2 | 2019-03-22 08:08:45 +0000 | [diff] [blame] | 947 | if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff) |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 948 | panic("Kernelbase not 64K-aligned (0x%lx)!\n", |
| 949 | (unsigned long)_stext); |
Benjamin Herrenschmidt | b1923ca | 2016-07-05 15:07:51 +1000 | [diff] [blame] | 950 | } |