blob: cd46a595422cf2ce37917c1b1933c14ae79dea9d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Ard Biesheuvelda58fb62015-09-24 13:49:52 -070010#include <linux/efi.h>
Paul Gortmakerecea4ab2011-07-22 10:58:34 -040011#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
Jon Smirl894673e2006-07-10 04:44:13 -070021#include <linux/screen_info.h>
Arnd Bergmann883a1062013-01-31 17:51:18 +000022#include <linux/of_platform.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/init.h>
Mika Westerberg3c57fb42010-05-10 09:20:22 +010024#include <linux/kexec.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060025#include <linux/of_fdt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/cpu.h>
27#include <linux/interrupt.h>
Russell King7bbb7942006-02-16 11:08:09 +000028#include <linux/smp.h>
Russell Kinge119bff2010-01-10 17:23:29 +000029#include <linux/proc_fs.h>
Russell King2778f622010-07-09 16:27:52 +010030#include <linux/memblock.h>
Dave Martin2ecccf92011-08-19 17:58:35 +010031#include <linux/bug.h>
32#include <linux/compiler.h>
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040033#include <linux/sort.h>
Mark Rutlandbe120392015-07-31 15:46:19 +010034#include <linux/psci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Catalin Marinasb86040a2009-07-24 12:32:54 +010036#include <asm/unified.h>
Russell King15d07dc2012-03-28 18:30:01 +010037#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/cpu.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010039#include <asm/cputype.h>
Ard Biesheuvelda58fb62015-09-24 13:49:52 -070040#include <asm/efi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/elf.h>
Ard Biesheuvel29373672015-09-01 08:59:28 +020042#include <asm/early_ioremap.h>
Stefan Agnera5f4c562015-08-13 00:01:52 +010043#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/procinfo.h>
Stefano Stabellini05774082013-05-21 14:24:11 +000045#include <asm/psci.h>
Russell King37efe642008-12-01 11:53:07 +000046#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/setup.h>
Russell Kingf00ec482010-09-04 10:47:48 +010048#include <asm/smp_plat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/mach-types.h>
50#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010051#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <asm/tlbflush.h>
Stefano Stabellini5882bfe2015-05-06 14:13:31 +000053#include <asm/xen/hypervisor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Grant Likely93c02ab2011-04-28 14:27:21 -060055#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <asm/mach/arch.h>
57#include <asm/mach/irq.h>
58#include <asm/mach/time.h>
David Howells9f97da72012-03-28 18:30:01 +010059#include <asm/system_info.h>
60#include <asm/system_misc.h>
Jason Wessel5cbad0e2008-02-20 13:33:40 -060061#include <asm/traps.h>
Catalin Marinasbff595c2009-02-16 11:41:36 +010062#include <asm/unwind.h>
Tejun Heo1c16d242011-12-08 10:22:06 -080063#include <asm/memblock.h>
Dave Martin4588c342012-02-17 16:54:28 +000064#include <asm/virt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Richard Purdie4cd9d6f2008-01-02 00:56:46 +010066#include "atags.h"
Ben Dooks0fc1c832006-03-15 23:17:30 +000067
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70char fpe_type[8];
71
72static int __init fpe_setup(char *line)
73{
74 memcpy(fpe_type, line, 8);
75 return 1;
76}
77
78__setup("fpe=", fpe_setup);
79#endif
80
Russell Kingca8f0b02014-05-27 20:34:28 +010081extern void init_default_cache_policy(unsigned long);
Russell Kingff69a4c2013-07-26 14:55:59 +010082extern void paging_init(const struct machine_desc *desc);
Jon Medhurstb089c312017-04-10 11:13:59 +010083extern void early_mm_init(const struct machine_desc *);
Laura Abbott374d446d2017-01-13 22:51:08 +010084extern void adjust_lowmem_bounds(void);
Robin Holt16d6d5b2013-07-08 16:01:39 -070085extern enum reboot_mode reboot_mode;
Russell Kingff69a4c2013-07-26 14:55:59 +010086extern void setup_dma_zone(const struct machine_desc *desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88unsigned int processor_id;
Krzysztof Halasac18f6582007-12-18 03:53:27 +010089EXPORT_SYMBOL(processor_id);
Russell King0385ebc2010-12-04 17:45:55 +000090unsigned int __machine_arch_type __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091EXPORT_SYMBOL(__machine_arch_type);
Russell King0385ebc2010-12-04 17:45:55 +000092unsigned int cacheid __read_mostly;
Russell Kingc0e95872008-09-25 15:35:28 +010093EXPORT_SYMBOL(cacheid);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Bill Gatliff9d20fdd2007-05-31 22:02:22 +010095unsigned int __atags_pointer __initdata;
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097unsigned int system_rev;
98EXPORT_SYMBOL(system_rev);
99
Paul Kocialkowski3f599872015-05-06 15:23:56 +0100100const char *system_serial;
101EXPORT_SYMBOL(system_serial);
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103unsigned int system_serial_low;
104EXPORT_SYMBOL(system_serial_low);
105
106unsigned int system_serial_high;
107EXPORT_SYMBOL(system_serial_high);
108
Russell King0385ebc2010-12-04 17:45:55 +0000109unsigned int elf_hwcap __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110EXPORT_SYMBOL(elf_hwcap);
111
Ard Biesheuvelb342ea42014-02-19 22:28:40 +0100112unsigned int elf_hwcap2 __read_mostly;
113EXPORT_SYMBOL(elf_hwcap2);
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116#ifdef MULTI_CPU
Kees Cook76197512016-08-10 22:46:49 +0100117struct processor processor __ro_after_init;
Russell King383fb3e2018-07-19 12:21:31 +0100118#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
119struct processor *cpu_vtable[NR_CPUS] = {
120 [0] = &processor,
121};
122#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#endif
124#ifdef MULTI_TLB
Kees Cook76197512016-08-10 22:46:49 +0100125struct cpu_tlb_fns cpu_tlb __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#endif
127#ifdef MULTI_USER
Kees Cook76197512016-08-10 22:46:49 +0100128struct cpu_user_fns cpu_user __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#endif
130#ifdef MULTI_CACHE
Kees Cook76197512016-08-10 22:46:49 +0100131struct cpu_cache_fns cpu_cache __ro_after_init;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132#endif
Catalin Marinas953233d2007-02-05 14:48:08 +0100133#ifdef CONFIG_OUTER_CACHE
Kees Cook76197512016-08-10 22:46:49 +0100134struct outer_cache_fns outer_cache __ro_after_init;
Santosh Shilimkar6c09f092010-02-16 07:57:43 +0100135EXPORT_SYMBOL(outer_cache);
Catalin Marinas953233d2007-02-05 14:48:08 +0100136#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Dave Martin2ecccf92011-08-19 17:58:35 +0100138/*
139 * Cached cpu_architecture() result for use by assembler code.
140 * C code should use the cpu_architecture() function instead of accessing this
141 * variable directly.
142 */
143int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
144
Russell Kingccea7a12005-05-31 22:22:32 +0100145struct stack {
146 u32 irq[3];
147 u32 abt[3];
148 u32 und[3];
Daniel Thompsonc0e7f7e2014-09-17 17:12:06 +0100149 u32 fiq[3];
Russell Kingccea7a12005-05-31 22:22:32 +0100150} ____cacheline_aligned;
151
Catalin Marinas55bdd692010-05-21 18:06:41 +0100152#ifndef CONFIG_CPU_V7M
Russell Kingccea7a12005-05-31 22:22:32 +0100153static struct stack stacks[NR_CPUS];
Catalin Marinas55bdd692010-05-21 18:06:41 +0100154#endif
Russell Kingccea7a12005-05-31 22:22:32 +0100155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156char elf_platform[ELF_PLATFORM_SIZE];
157EXPORT_SYMBOL(elf_platform);
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159static const char *cpu_name;
160static const char *machine_name;
Jeremy Kerr48ab7e02010-01-27 01:13:31 +0100161static char __initdata cmd_line[COMMAND_LINE_SIZE];
Russell Kingff69a4c2013-07-26 14:55:59 +0100162const struct machine_desc *machine_desc __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
165#define ENDIANNESS ((char)endian_test.l)
166
167DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
168
169/*
170 * Standard memory resources
171 */
172static struct resource mem_res[] = {
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700173 {
174 .name = "Video RAM",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 },
179 {
Kees Cooka36d8e52012-01-18 01:57:21 +0100180 .name = "Kernel code",
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700181 .start = 0,
182 .end = 0,
Toshi Kani35d98e92016-01-26 21:57:22 +0100183 .flags = IORESOURCE_SYSTEM_RAM
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700184 },
185 {
186 .name = "Kernel data",
187 .start = 0,
188 .end = 0,
Toshi Kani35d98e92016-01-26 21:57:22 +0100189 .flags = IORESOURCE_SYSTEM_RAM
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191};
192
193#define video_ram mem_res[0]
194#define kernel_code mem_res[1]
195#define kernel_data mem_res[2]
196
197static struct resource io_res[] = {
Greg Kroah-Hartman740e5182006-06-12 14:47:06 -0700198 {
199 .name = "reserved",
200 .start = 0x3bc,
201 .end = 0x3be,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 },
204 {
205 .name = "reserved",
206 .start = 0x378,
207 .end = 0x37f,
208 .flags = IORESOURCE_IO | IORESOURCE_BUSY
209 },
210 {
211 .name = "reserved",
212 .start = 0x278,
213 .end = 0x27f,
214 .flags = IORESOURCE_IO | IORESOURCE_BUSY
215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216};
217
218#define lp0 io_res[0]
219#define lp1 io_res[1]
220#define lp2 io_res[2]
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222static const char *proc_arch[] = {
223 "undefined/unknown",
224 "3",
225 "4",
226 "4T",
227 "5",
228 "5T",
229 "5TE",
230 "5TEJ",
231 "6TEJ",
Catalin Marinas6b090a22006-01-12 16:28:16 +0000232 "7",
Catalin Marinas55bdd692010-05-21 18:06:41 +0100233 "7M",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 "?(12)",
235 "?(13)",
236 "?(14)",
237 "?(15)",
238 "?(16)",
239 "?(17)",
240};
241
Catalin Marinas55bdd692010-05-21 18:06:41 +0100242#ifdef CONFIG_CPU_V7M
243static int __get_cpu_architecture(void)
244{
245 return CPU_ARCH_ARMv7M;
246}
247#else
Dave Martin2ecccf92011-08-19 17:58:35 +0100248static int __get_cpu_architecture(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
250 int cpu_arch;
251
Russell King0ba8b9b2008-08-10 18:08:10 +0100252 if ((read_cpuid_id() & 0x0008f000) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cpu_arch = CPU_ARCH_UNKNOWN;
Russell King0ba8b9b2008-08-10 18:08:10 +0100254 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
255 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
257 cpu_arch = (read_cpuid_id() >> 16) & 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 if (cpu_arch)
259 cpu_arch += CPU_ARCH_ARMv3;
Russell King0ba8b9b2008-08-10 18:08:10 +0100260 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
Catalin Marinas180005c2007-09-25 16:49:45 +0100261 /* Revised CPUID format. Read the Memory Model Feature
262 * Register 0 and check for VMSAv7 or PMSAv7 */
Mason526299c2015-03-17 21:37:25 +0100263 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
Catalin Marinas315cfe72011-02-15 18:06:57 +0100264 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
265 (mmfr0 & 0x000000f0) >= 0x00000030)
Catalin Marinas180005c2007-09-25 16:49:45 +0100266 cpu_arch = CPU_ARCH_ARMv7;
267 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
268 (mmfr0 & 0x000000f0) == 0x00000020)
269 cpu_arch = CPU_ARCH_ARMv6;
270 else
271 cpu_arch = CPU_ARCH_UNKNOWN;
272 } else
273 cpu_arch = CPU_ARCH_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275 return cpu_arch;
276}
Catalin Marinas55bdd692010-05-21 18:06:41 +0100277#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Dave Martin2ecccf92011-08-19 17:58:35 +0100279int __pure cpu_architecture(void)
280{
281 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
282
283 return __cpu_architecture;
284}
285
Will Deacon8925ec42010-09-13 16:18:30 +0100286static int cpu_has_aliasing_icache(unsigned int arch)
287{
288 int aliasing_icache;
289 unsigned int id_reg, num_sets, line_size;
290
Will Deacon7f94e9c2011-08-23 22:22:11 +0100291 /* PIPT caches never alias. */
292 if (icache_is_pipt())
293 return 0;
294
Will Deacon8925ec42010-09-13 16:18:30 +0100295 /* arch specifies the register format */
296 switch (arch) {
297 case CPU_ARCH_ARMv7:
Jonathan Austin26150aa2016-08-30 17:24:34 +0100298 set_csselr(CSSELR_ICACHE | CSSELR_L1);
Linus Walleij5fb31a92010-10-06 11:07:28 +0100299 isb();
Jonathan Austin26150aa2016-08-30 17:24:34 +0100300 id_reg = read_ccsidr();
Will Deacon8925ec42010-09-13 16:18:30 +0100301 line_size = 4 << ((id_reg & 0x7) + 2);
302 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
303 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
304 break;
305 case CPU_ARCH_ARMv6:
306 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
307 break;
308 default:
309 /* I-cache aliases will be handled by D-cache aliasing code */
310 aliasing_icache = 0;
311 }
312
313 return aliasing_icache;
314}
315
Russell Kingc0e95872008-09-25 15:35:28 +0100316static void __init cacheid_init(void)
317{
Russell Kingc0e95872008-09-25 15:35:28 +0100318 unsigned int arch = cpu_architecture();
319
Jonathan Austinf5a5c892016-08-30 17:27:19 +0100320 if (arch >= CPU_ARCH_ARMv6) {
Uwe Kleine-Königac52e832013-01-30 17:38:21 +0100321 unsigned int cachetype = read_cpuid_cachetype();
Jonathan Austinf5a5c892016-08-30 17:27:19 +0100322
Vladimir Murzind360a682017-06-12 13:35:52 +0100323 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
Jonathan Austinf5a5c892016-08-30 17:27:19 +0100324 cacheid = 0;
325 } else if ((cachetype & (7 << 29)) == 4 << 29) {
Catalin Marinasb57ee992009-03-03 11:44:12 +0100326 /* ARMv7 register format */
Will Deacon72dc53a2011-08-03 12:37:04 +0100327 arch = CPU_ARCH_ARMv7;
Catalin Marinasb57ee992009-03-03 11:44:12 +0100328 cacheid = CACHEID_VIPT_NONALIASING;
Will Deacon7f94e9c2011-08-23 22:22:11 +0100329 switch (cachetype & (3 << 14)) {
330 case (1 << 14):
Catalin Marinasb57ee992009-03-03 11:44:12 +0100331 cacheid |= CACHEID_ASID_TAGGED;
Will Deacon7f94e9c2011-08-23 22:22:11 +0100332 break;
333 case (3 << 14):
334 cacheid |= CACHEID_PIPT;
335 break;
336 }
Will Deacon8925ec42010-09-13 16:18:30 +0100337 } else {
Will Deacon72dc53a2011-08-03 12:37:04 +0100338 arch = CPU_ARCH_ARMv6;
339 if (cachetype & (1 << 23))
340 cacheid = CACHEID_VIPT_ALIASING;
341 else
342 cacheid = CACHEID_VIPT_NONALIASING;
Will Deacon8925ec42010-09-13 16:18:30 +0100343 }
Will Deacon72dc53a2011-08-03 12:37:04 +0100344 if (cpu_has_aliasing_icache(arch))
345 cacheid |= CACHEID_VIPT_I_ALIASING;
Russell Kingc0e95872008-09-25 15:35:28 +0100346 } else {
347 cacheid = CACHEID_VIVT;
348 }
Russell King2b4ae1f2008-09-25 15:39:20 +0100349
Olof Johansson1b0f6682013-12-05 18:29:35 +0100350 pr_info("CPU: %s data cache, %s instruction cache\n",
Russell King2b4ae1f2008-09-25 15:39:20 +0100351 cache_is_vivt() ? "VIVT" :
352 cache_is_vipt_aliasing() ? "VIPT aliasing" :
Will Deacon7f94e9c2011-08-23 22:22:11 +0100353 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
Russell King2b4ae1f2008-09-25 15:39:20 +0100354 cache_is_vivt() ? "VIVT" :
355 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
Will Deacon8925ec42010-09-13 16:18:30 +0100356 icache_is_vipt_aliasing() ? "VIPT aliasing" :
Will Deacon7f94e9c2011-08-23 22:22:11 +0100357 icache_is_pipt() ? "PIPT" :
Russell King2b4ae1f2008-09-25 15:39:20 +0100358 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
Russell Kingc0e95872008-09-25 15:35:28 +0100359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/*
362 * These functions re-use the assembly code in head.S, which
363 * already provide the required functionality.
364 */
Russell King0f44ba12006-02-24 21:04:56 +0000365extern struct proc_info_list *lookup_processor_type(unsigned int);
Russell King6fc31d52011-01-12 17:50:42 +0000366
Grant Likely93c02ab2011-04-28 14:27:21 -0600367void __init early_print(const char *str, ...)
Russell King6fc31d52011-01-12 17:50:42 +0000368{
369 extern void printascii(const char *);
370 char buf[256];
371 va_list ap;
372
373 va_start(ap, str);
374 vsnprintf(buf, sizeof(buf), str, ap);
375 va_end(ap);
376
377#ifdef CONFIG_DEBUG_LL
378 printascii(buf);
379#endif
380 printk("%s", buf);
381}
382
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100383#ifdef CONFIG_ARM_PATCH_IDIV
384
385static inline u32 __attribute_const__ sdiv_instruction(void)
386{
387 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
388 /* "sdiv r0, r0, r1" */
389 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
390 return __opcode_to_mem_thumb32(insn);
391 }
392
393 /* "sdiv r0, r0, r1" */
394 return __opcode_to_mem_arm(0xe710f110);
395}
396
397static inline u32 __attribute_const__ udiv_instruction(void)
398{
399 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
400 /* "udiv r0, r0, r1" */
401 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
402 return __opcode_to_mem_thumb32(insn);
403 }
404
405 /* "udiv r0, r0, r1" */
406 return __opcode_to_mem_arm(0xe730f110);
407}
408
409static inline u32 __attribute_const__ bx_lr_instruction(void)
410{
411 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
412 /* "bx lr; nop" */
413 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
414 return __opcode_to_mem_thumb32(insn);
415 }
416
417 /* "bx lr" */
418 return __opcode_to_mem_arm(0xe12fff1e);
419}
420
421static void __init patch_aeabi_idiv(void)
422{
423 extern void __aeabi_uidiv(void);
424 extern void __aeabi_idiv(void);
425 uintptr_t fn_addr;
426 unsigned int mask;
427
428 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
429 if (!(elf_hwcap & mask))
430 return;
431
432 pr_info("CPU: div instructions available: patching division code\n");
433
434 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
Nicolas Pitre208fae52016-03-14 02:55:45 +0100435 asm ("" : "+g" (fn_addr));
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100436 ((u32 *)fn_addr)[0] = udiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
439
440 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
Nicolas Pitre208fae52016-03-14 02:55:45 +0100441 asm ("" : "+g" (fn_addr));
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100442 ((u32 *)fn_addr)[0] = sdiv_instruction();
443 ((u32 *)fn_addr)[1] = bx_lr_instruction();
444 flush_icache_range(fn_addr, fn_addr + 8);
445}
446
447#else
448static inline void patch_aeabi_idiv(void) { }
449#endif
450
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100451static void __init cpuid_init_hwcaps(void)
452{
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100453 int block;
Ard Biesheuvela092aed2015-03-19 19:04:05 +0100454 u32 isar5;
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100455
456 if (cpu_architecture() < CPU_ARCH_ARMv7)
457 return;
458
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100459 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
460 if (block >= 2)
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100461 elf_hwcap |= HWCAP_IDIVA;
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100462 if (block >= 1)
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100463 elf_hwcap |= HWCAP_IDIVT;
Will Deacona469abd2013-04-08 17:13:12 +0100464
465 /* LPAE implies atomic ldrd/strd instructions */
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100466 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
467 if (block >= 5)
Will Deacona469abd2013-04-08 17:13:12 +0100468 elf_hwcap |= HWCAP_LPAE;
Ard Biesheuvela092aed2015-03-19 19:04:05 +0100469
470 /* check for supported v8 Crypto instructions */
471 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
472
473 block = cpuid_feature_extract_field(isar5, 4);
474 if (block >= 2)
475 elf_hwcap2 |= HWCAP2_PMULL;
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_AES;
478
479 block = cpuid_feature_extract_field(isar5, 8);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA1;
482
483 block = cpuid_feature_extract_field(isar5, 12);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_SHA2;
486
487 block = cpuid_feature_extract_field(isar5, 16);
488 if (block >= 1)
489 elf_hwcap2 |= HWCAP2_CRC32;
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100490}
491
Russell King58171bf2014-07-04 16:41:21 +0100492static void __init elf_hwcap_fixup(void)
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100493{
Russell King58171bf2014-07-04 16:41:21 +0100494 unsigned id = read_cpuid_id();
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100495
496 /*
497 * HWCAP_TLS is available only on 1136 r1p0 and later,
498 * see also kuser_get_tls_init.
499 */
Russell King58171bf2014-07-04 16:41:21 +0100500 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
501 ((id >> 20) & 3) == 0) {
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100502 elf_hwcap &= ~HWCAP_TLS;
Russell King58171bf2014-07-04 16:41:21 +0100503 return;
504 }
505
506 /* Verify if CPUID scheme is implemented */
507 if ((id & 0x000f0000) != 0x000f0000)
508 return;
509
510 /*
511 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
512 * avoid advertising SWP; it may not be atomic with
513 * multiprocessing cores.
514 */
Ard Biesheuvelb8c95922015-03-19 19:03:25 +0100515 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
516 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
Vladimir Murzin03f12172016-04-19 12:35:20 +0100517 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
Russell King58171bf2014-07-04 16:41:21 +0100518 elf_hwcap &= ~HWCAP_SWP;
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100519}
520
Russell Kingb69874e2011-06-21 18:57:31 +0100521/*
522 * cpu_init - initialise one CPU.
523 *
524 * cpu_init sets up the per-CPU stacks.
525 */
Jon Medhurst1783d452013-04-25 14:40:22 +0100526void notrace cpu_init(void)
Russell Kingb69874e2011-06-21 18:57:31 +0100527{
Catalin Marinas55bdd692010-05-21 18:06:41 +0100528#ifndef CONFIG_CPU_V7M
Russell Kingb69874e2011-06-21 18:57:31 +0100529 unsigned int cpu = smp_processor_id();
530 struct stack *stk = &stacks[cpu];
531
532 if (cpu >= NR_CPUS) {
Olof Johansson1b0f6682013-12-05 18:29:35 +0100533 pr_crit("CPU%u: bad primary CPU number\n", cpu);
Russell Kingb69874e2011-06-21 18:57:31 +0100534 BUG();
535 }
536
Rob Herring14318efb2012-11-29 20:39:54 +0100537 /*
538 * This only works on resume and secondary cores. For booting on the
539 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
540 */
541 set_my_cpu_offset(per_cpu_offset(cpu));
542
Russell Kingb69874e2011-06-21 18:57:31 +0100543 cpu_proc_init();
544
545 /*
546 * Define the placement constraint for the inline asm directive below.
547 * In Thumb-2, msr with an immediate value is not allowed.
548 */
549#ifdef CONFIG_THUMB2_KERNEL
550#define PLC "r"
551#else
552#define PLC "I"
553#endif
554
555 /*
556 * setup stacks for re-entrant exception handlers
557 */
558 __asm__ (
559 "msr cpsr_c, %1\n\t"
560 "add r14, %0, %2\n\t"
561 "mov sp, r14\n\t"
562 "msr cpsr_c, %3\n\t"
563 "add r14, %0, %4\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %5\n\t"
566 "add r14, %0, %6\n\t"
567 "mov sp, r14\n\t"
Daniel Thompsonc0e7f7e2014-09-17 17:12:06 +0100568 "msr cpsr_c, %7\n\t"
569 "add r14, %0, %8\n\t"
570 "mov sp, r14\n\t"
571 "msr cpsr_c, %9"
Russell Kingb69874e2011-06-21 18:57:31 +0100572 :
573 : "r" (stk),
574 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
575 "I" (offsetof(struct stack, irq[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
577 "I" (offsetof(struct stack, abt[0])),
578 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
579 "I" (offsetof(struct stack, und[0])),
Daniel Thompsonc0e7f7e2014-09-17 17:12:06 +0100580 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
581 "I" (offsetof(struct stack, fiq[0])),
Russell Kingb69874e2011-06-21 18:57:31 +0100582 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
583 : "r14");
Catalin Marinas55bdd692010-05-21 18:06:41 +0100584#endif
Russell Kingb69874e2011-06-21 18:57:31 +0100585}
586
Lorenzo Pieralisi18d7f152013-06-19 10:40:48 +0100587u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
Will Deaconeb504392012-01-20 12:01:12 +0100588
589void __init smp_setup_processor_id(void)
590{
591 int i;
Lorenzo Pieralisicb8cf4f2012-11-08 18:05:56 +0000592 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
593 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
Will Deaconeb504392012-01-20 12:01:12 +0100594
595 cpu_logical_map(0) = cpu;
Lorenzo Pieralisicb8cf4f2012-11-08 18:05:56 +0000596 for (i = 1; i < nr_cpu_ids; ++i)
Will Deaconeb504392012-01-20 12:01:12 +0100597 cpu_logical_map(i) = i == cpu ? 0 : i;
598
Ming Lei9394c1c2013-03-11 13:52:12 +0100599 /*
600 * clear __my_cpu_offset on boot CPU to avoid hang caused by
601 * using percpu variable early, for example, lockdep will
602 * access percpu variable inside lock_release
603 */
604 set_my_cpu_offset(0);
605
Olof Johansson1b0f6682013-12-05 18:29:35 +0100606 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
Will Deaconeb504392012-01-20 12:01:12 +0100607}
608
Lorenzo Pieralisi8cf72172013-05-16 10:32:09 +0100609struct mpidr_hash mpidr_hash;
610#ifdef CONFIG_SMP
611/**
612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
613 * level in order to build a linear index from an
614 * MPIDR value. Resulting algorithm is a collision
615 * free hash carried out through shifting and ORing
616 */
617static void __init smp_build_mpidr_hash(void)
618{
619 u32 i, affinity;
620 u32 fs[3], bits[3], ls, mask = 0;
621 /*
622 * Pre-scan the list of MPIDRS and filter out bits that do
623 * not contribute to affinity levels, ie they never toggle.
624 */
625 for_each_possible_cpu(i)
626 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
627 pr_debug("mask of set bits 0x%x\n", mask);
628 /*
629 * Find and stash the last and first bit set at all affinity levels to
630 * check how many bits are required to represent them.
631 */
632 for (i = 0; i < 3; i++) {
633 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
634 /*
635 * Find the MSB bit and LSB bits position
636 * to determine how many bits are required
637 * to express the affinity level.
638 */
639 ls = fls(affinity);
640 fs[i] = affinity ? ffs(affinity) - 1 : 0;
641 bits[i] = ls - fs[i];
642 }
643 /*
644 * An index can be created from the MPIDR by isolating the
645 * significant bits at each affinity level and by shifting
646 * them in order to compress the 24 bits values space to a
647 * compressed set of values. This is equivalent to hashing
648 * the MPIDR through shifting and ORing. It is a collision free
649 * hash though not minimal since some levels might contain a number
650 * of CPUs that is not an exact power of 2 and their bit
651 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
652 */
653 mpidr_hash.shift_aff[0] = fs[0];
654 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
655 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
656 (bits[1] + bits[0]);
657 mpidr_hash.mask = mask;
658 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
659 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
660 mpidr_hash.shift_aff[0],
661 mpidr_hash.shift_aff[1],
662 mpidr_hash.shift_aff[2],
663 mpidr_hash.mask,
664 mpidr_hash.bits);
665 /*
666 * 4x is an arbitrary value used to warn on a hash table much bigger
667 * than expected on most systems.
668 */
669 if (mpidr_hash_size() > 4 * num_possible_cpus())
670 pr_warn("Large number of MPIDR hash buckets detected\n");
671 sync_cache_w(&mpidr_hash);
672}
673#endif
674
Russell King65987a82018-07-19 11:59:56 +0100675/*
676 * locate processor in the list of supported processor types. The linker
677 * builds this table for us from the entries in arch/arm/mm/proc-*.S
678 */
679struct proc_info_list *lookup_processor(u32 midr)
680{
681 struct proc_info_list *list = lookup_processor_type(midr);
682
683 if (!list) {
684 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
685 smp_processor_id(), midr);
686 while (1)
687 /* can't use cpu_relax() here as it may require MMU setup */;
688 }
689
690 return list;
691}
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693static void __init setup_processor(void)
694{
Russell King65987a82018-07-19 11:59:56 +0100695 unsigned int midr = read_cpuid_id();
696 struct proc_info_list *list = lookup_processor(midr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 cpu_name = list->cpu_name;
Dave Martin2ecccf92011-08-19 17:58:35 +0100699 __cpu_architecture = __get_cpu_architecture();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Russell Kinge2099502018-07-19 12:17:38 +0100701 init_proc_vtable(list->proc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702#ifdef MULTI_TLB
703 cpu_tlb = *list->tlb;
704#endif
705#ifdef MULTI_USER
706 cpu_user = *list->user;
707#endif
708#ifdef MULTI_CACHE
709 cpu_cache = *list->cache;
710#endif
711
Olof Johansson1b0f6682013-12-05 18:29:35 +0100712 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
Russell King65987a82018-07-19 11:59:56 +0100713 list->cpu_name, midr, midr & 15,
Russell King4585eaf2014-04-13 18:47:34 +0100714 proc_arch[cpu_architecture()], get_cr());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
Will Deacona34dbfb2011-11-11 11:35:58 +0100716 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
717 list->arch_name, ENDIANNESS);
718 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
719 list->elf_name, ENDIANNESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 elf_hwcap = list->elf_hwcap;
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100721
722 cpuid_init_hwcaps();
Nicolas Pitre42f25bd2015-12-12 02:49:21 +0100723 patch_aeabi_idiv();
Stephen Boyd8164f7a2013-03-18 19:44:15 +0100724
Catalin Marinasadeff422006-04-10 21:32:35 +0100725#ifndef CONFIG_ARM_THUMB
Stephen Boydc40e3642013-03-18 19:44:14 +0100726 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
Catalin Marinasadeff422006-04-10 21:32:35 +0100727#endif
Russell Kingca8f0b02014-05-27 20:34:28 +0100728#ifdef CONFIG_MMU
729 init_default_cache_policy(list->__cpu_mm_mmu_flags);
730#endif
Rob Herring92871b92013-10-09 17:26:44 +0100731 erratum_a15_798181_init();
732
Russell King58171bf2014-07-04 16:41:21 +0100733 elf_hwcap_fixup();
Tony Lindgrenf159f4e2010-07-05 14:53:10 +0100734
Russell Kingc0e95872008-09-25 15:35:28 +0100735 cacheid_init();
Russell Kingb69874e2011-06-21 18:57:31 +0100736 cpu_init();
Russell Kingccea7a12005-05-31 22:22:32 +0100737}
738
Grant Likely93c02ab2011-04-28 14:27:21 -0600739void __init dump_machine_table(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Russell Kingff69a4c2013-07-26 14:55:59 +0100741 const struct machine_desc *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Grant Likely62913192011-04-28 14:27:21 -0600743 early_print("Available machine support:\n\nID (hex)\tNAME\n");
744 for_each_machine_desc(p)
Nicolas Pitredce72dd2011-02-21 07:00:32 +0100745 early_print("%08x\t%s\n", p->nr, p->name);
746
747 early_print("\nPlease check your kernel config and/or bootloader.\n");
748
749 while (true)
750 /* can't use cpu_relax() here as it may require MMU setup */;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751}
752
Magnus Damm6a5014a2013-10-22 17:53:16 +0100753int __init arm_add_memory(u64 start, u64 size)
Russell King3a669412005-06-22 21:43:10 +0100754{
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100755 u64 aligned_start;
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400756
Russell King3a669412005-06-22 21:43:10 +0100757 /*
758 * Ensure that start/size are aligned to a page boundary.
Masahiro Yamada909ba292015-01-20 04:38:25 +0100759 * Size is rounded down, start is rounded up.
Russell King3a669412005-06-22 21:43:10 +0100760 */
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100761 aligned_start = PAGE_ALIGN(start);
Masahiro Yamada909ba292015-01-20 04:38:25 +0100762 if (aligned_start > start + size)
763 size = 0;
764 else
765 size -= aligned_start - start;
Will Deacone5ab8582012-04-12 17:15:08 +0100766
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100767#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
768 if (aligned_start > ULONG_MAX) {
Olof Johansson1b0f6682013-12-05 18:29:35 +0100769 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
770 (long long)start);
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100771 return -EINVAL;
772 }
773
774 if (aligned_start + size > ULONG_MAX) {
Olof Johansson1b0f6682013-12-05 18:29:35 +0100775 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
776 (long long)start);
Will Deacone5ab8582012-04-12 17:15:08 +0100777 /*
778 * To ensure bank->start + bank->size is representable in
779 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
780 * This means we lose a page after masking.
781 */
Magnus Damm6d7d5da2013-10-22 17:59:54 +0100782 size = ULONG_MAX - aligned_start;
Will Deacone5ab8582012-04-12 17:15:08 +0100783 }
784#endif
785
Russell King571b1432014-01-11 11:22:18 +0000786 if (aligned_start < PHYS_OFFSET) {
787 if (aligned_start + size <= PHYS_OFFSET) {
788 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
789 aligned_start, aligned_start + size);
790 return -EINVAL;
791 }
792
793 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
794 aligned_start, (u64)PHYS_OFFSET);
795
796 size -= PHYS_OFFSET - aligned_start;
797 aligned_start = PHYS_OFFSET;
798 }
799
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100800 start = aligned_start;
801 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400802
803 /*
804 * Check whether this memory region has non-zero size or
805 * invalid node number.
806 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100807 if (size == 0)
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400808 return -EINVAL;
809
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100810 memblock_add(start, size);
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400811 return 0;
Russell King3a669412005-06-22 21:43:10 +0100812}
813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814/*
815 * Pick out the memory size. We look for mem=size@start,
816 * where start and size are "size[KkMm]"
817 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100818
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100819static int __init early_mem(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
821 static int usermem __initdata = 0;
Magnus Damm6a5014a2013-10-22 17:53:16 +0100822 u64 size;
823 u64 start;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100824 char *endp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 /*
827 * If the user specifies memory size, we
828 * blow away any automatically generated
829 * size.
830 */
831 if (usermem == 0) {
832 usermem = 1;
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100833 memblock_remove(memblock_start_of_DRAM(),
834 memblock_end_of_DRAM() - memblock_start_of_DRAM());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836
837 start = PHYS_OFFSET;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100838 size = memparse(p, &endp);
839 if (*endp == '@')
840 start = memparse(endp + 1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Andrew Morton1c97b732006-04-20 21:41:18 +0100842 arm_add_memory(start, size);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100843
844 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100846early_param("mem", early_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Russell Kingff69a4c2013-07-26 14:55:59 +0100848static void __init request_standard_resources(const struct machine_desc *mdesc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Dima Zavin11b93692011-01-14 23:05:14 +0100850 struct memblock_region *region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 struct resource *res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Russell King37efe642008-12-01 11:53:07 +0000853 kernel_code.start = virt_to_phys(_text);
Kees Cook14c4a532016-06-23 21:28:47 +0100854 kernel_code.end = virt_to_phys(__init_begin - 1);
Russell King842eab42010-10-01 14:12:22 +0100855 kernel_data.start = virt_to_phys(_sdata);
Russell King37efe642008-12-01 11:53:07 +0000856 kernel_data.end = virt_to_phys(_end - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Dima Zavin11b93692011-01-14 23:05:14 +0100858 for_each_memblock(memory, region) {
Russell King966fab02016-08-02 14:05:51 -0700859 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
860 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
861 unsigned long boot_alias_start;
862
863 /*
864 * Some systems have a special memory alias which is only
865 * used for booting. We need to advertise this region to
866 * kexec-tools so they know where bootable RAM is located.
867 */
868 boot_alias_start = phys_to_idmap(start);
869 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
870 res = memblock_virt_alloc(sizeof(*res), 0);
871 res->name = "System RAM (boot alias)";
872 res->start = boot_alias_start;
873 res->end = phys_to_idmap(end);
874 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
875 request_resource(&iomem_resource, res);
876 }
877
Santosh Shilimkarca474402014-02-06 19:50:35 +0100878 res = memblock_virt_alloc(sizeof(*res), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 res->name = "System RAM";
Russell King966fab02016-08-02 14:05:51 -0700880 res->start = start;
881 res->end = end;
Toshi Kani35d98e92016-01-26 21:57:22 +0100882 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884 request_resource(&iomem_resource, res);
885
886 if (kernel_code.start >= res->start &&
887 kernel_code.end <= res->end)
888 request_resource(res, &kernel_code);
889 if (kernel_data.start >= res->start &&
890 kernel_data.end <= res->end)
891 request_resource(res, &kernel_data);
892 }
893
894 if (mdesc->video_start) {
895 video_ram.start = mdesc->video_start;
896 video_ram.end = mdesc->video_end;
897 request_resource(&iomem_resource, &video_ram);
898 }
899
900 /*
901 * Some machines don't have the possibility of ever
902 * possessing lp0, lp1 or lp2
903 */
904 if (mdesc->reserve_lp0)
905 request_resource(&ioport_resource, &lp0);
906 if (mdesc->reserve_lp1)
907 request_resource(&ioport_resource, &lp1);
908 if (mdesc->reserve_lp2)
909 request_resource(&ioport_resource, &lp2);
910}
911
Ard Biesheuvel801820b2016-04-25 21:06:53 +0100912#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
913 defined(CONFIG_EFI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914struct screen_info screen_info = {
915 .orig_video_lines = 30,
916 .orig_video_cols = 80,
917 .orig_video_mode = 0,
918 .orig_video_ega_bx = 0,
919 .orig_video_isVGA = 1,
920 .orig_video_points = 8
921};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922#endif
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924static int __init customize_machine(void)
925{
Arnd Bergmann883a1062013-01-31 17:51:18 +0000926 /*
927 * customizes platform devices, or adds new ones
928 * On DT based machines, we fall back to populating the
929 * machine from the device tree, if no callback is provided,
930 * otherwise we would always need an init_machine callback.
931 */
Russell King8ff14432010-12-20 10:18:36 +0000932 if (machine_desc->init_machine)
933 machine_desc->init_machine();
Kefeng Wang850bea22016-06-01 14:52:56 +0800934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return 0;
936}
937arch_initcall(customize_machine);
938
Shawn Guo90de4132012-04-25 22:24:44 +0800939static int __init init_machine_late(void)
940{
Paul Kocialkowski3f599872015-05-06 15:23:56 +0100941 struct device_node *root;
942 int ret;
943
Shawn Guo90de4132012-04-25 22:24:44 +0800944 if (machine_desc->init_late)
945 machine_desc->init_late();
Paul Kocialkowski3f599872015-05-06 15:23:56 +0100946
947 root = of_find_node_by_path("/");
948 if (root) {
949 ret = of_property_read_string(root, "serial-number",
950 &system_serial);
951 if (ret)
952 system_serial = NULL;
953 }
954
955 if (!system_serial)
956 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
957 system_serial_high,
958 system_serial_low);
959
Shawn Guo90de4132012-04-25 22:24:44 +0800960 return 0;
961}
962late_initcall(init_machine_late);
963
Mika Westerberg3c57fb42010-05-10 09:20:22 +0100964#ifdef CONFIG_KEXEC
Russell King61603012016-03-14 19:34:37 +0000965/*
966 * The crash region must be aligned to 128MB to avoid
967 * zImage relocating below the reserved region.
968 */
969#define CRASH_ALIGN (128 << 20)
Russell King61603012016-03-14 19:34:37 +0000970
Mika Westerberg3c57fb42010-05-10 09:20:22 +0100971static inline unsigned long long get_total_mem(void)
972{
973 unsigned long total;
974
975 total = max_low_pfn - min_low_pfn;
976 return total << PAGE_SHIFT;
977}
978
979/**
980 * reserve_crashkernel() - reserves memory are for crash kernel
981 *
982 * This function reserves memory area given in "crashkernel=" kernel command
983 * line parameter. The memory reserved is used by a dump capture kernel when
984 * primary kernel is crashing.
985 */
986static void __init reserve_crashkernel(void)
987{
988 unsigned long long crash_size, crash_base;
989 unsigned long long total_mem;
990 int ret;
991
992 total_mem = get_total_mem();
993 ret = parse_crashkernel(boot_command_line, total_mem,
994 &crash_size, &crash_base);
995 if (ret)
996 return;
997
Russell King61603012016-03-14 19:34:37 +0000998 if (crash_base <= 0) {
Russell Kingd0506a22016-04-01 14:47:36 +0100999 unsigned long long crash_max = idmap_to_phys((u32)~0);
Russell King67556d72017-07-19 23:01:38 +01001000 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1001 if (crash_max > lowmem_max)
1002 crash_max = lowmem_max;
Russell King61603012016-03-14 19:34:37 +00001003 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1004 crash_size, CRASH_ALIGN);
1005 if (!crash_base) {
1006 pr_err("crashkernel reservation failed - No suitable area found.\n");
1007 return;
1008 }
1009 } else {
1010 unsigned long long start;
1011
1012 start = memblock_find_in_range(crash_base,
1013 crash_base + crash_size,
1014 crash_size, SECTION_SIZE);
1015 if (start != crash_base) {
1016 pr_err("crashkernel reservation failed - memory is in use.\n");
1017 return;
1018 }
1019 }
1020
Santosh Shilimkar84f452b2013-06-30 00:28:46 -04001021 ret = memblock_reserve(crash_base, crash_size);
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001022 if (ret < 0) {
Olof Johansson1b0f6682013-12-05 18:29:35 +01001023 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1024 (unsigned long)crash_base);
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001025 return;
1026 }
1027
Olof Johansson1b0f6682013-12-05 18:29:35 +01001028 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1029 (unsigned long)(crash_size >> 20),
1030 (unsigned long)(crash_base >> 20),
1031 (unsigned long)(total_mem >> 20));
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001032
Russell Kingf7f0b7d2016-08-02 14:05:48 -07001033 /* The crashk resource must always be located in normal mem */
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001034 crashk_res.start = crash_base;
1035 crashk_res.end = crash_base + crash_size - 1;
1036 insert_resource(&iomem_resource, &crashk_res);
Russell Kingf7f0b7d2016-08-02 14:05:48 -07001037
1038 if (arm_has_idmap_alias()) {
1039 /*
1040 * If we have a special RAM alias for use at boot, we
1041 * need to advertise to kexec tools where the alias is.
1042 */
1043 static struct resource crashk_boot_res = {
1044 .name = "Crash kernel (boot alias)",
1045 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1046 };
1047
1048 crashk_boot_res.start = phys_to_idmap(crash_base);
1049 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1050 insert_resource(&iomem_resource, &crashk_boot_res);
1051 }
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001052}
1053#else
1054static inline void reserve_crashkernel(void) {}
1055#endif /* CONFIG_KEXEC */
1056
Dave Martin4588c342012-02-17 16:54:28 +00001057void __init hyp_mode_check(void)
1058{
1059#ifdef CONFIG_ARM_VIRT_EXT
Mark Rutland8fbac212013-07-18 17:20:33 +01001060 sync_boot_mode();
1061
Dave Martin4588c342012-02-17 16:54:28 +00001062 if (is_hyp_mode_available()) {
1063 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1064 pr_info("CPU: Virtualization extensions available.\n");
1065 } else if (is_hyp_mode_mismatched()) {
1066 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1067 __boot_cpu_mode & MODE_MASK);
1068 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1069 } else
1070 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1071#endif
1072}
1073
Grant Likely62913192011-04-28 14:27:21 -06001074void __init setup_arch(char **cmdline_p)
1075{
Russell Kingff69a4c2013-07-26 14:55:59 +01001076 const struct machine_desc *mdesc;
Grant Likely62913192011-04-28 14:27:21 -06001077
Grant Likely62913192011-04-28 14:27:21 -06001078 setup_processor();
Grant Likely93c02ab2011-04-28 14:27:21 -06001079 mdesc = setup_machine_fdt(__atags_pointer);
1080 if (!mdesc)
Alexander Shiyanb8b499c2012-12-12 08:32:11 +01001081 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
Russell King99cf8f92017-09-21 12:06:20 +01001082 if (!mdesc) {
1083 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1084 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1085 __atags_pointer);
1086 if (__atags_pointer)
1087 early_print(" r2[]=%*ph\n", 16,
1088 phys_to_virt(__atags_pointer));
1089 dump_machine_table();
1090 }
1091
Grant Likely62913192011-04-28 14:27:21 -06001092 machine_desc = mdesc;
1093 machine_name = mdesc->name;
Russell King719c9d12014-10-28 12:40:26 +00001094 dump_stack_set_arch_desc("%s", mdesc->name);
Grant Likely62913192011-04-28 14:27:21 -06001095
Robin Holt16d6d5b2013-07-08 16:01:39 -07001096 if (mdesc->reboot_mode != REBOOT_HARD)
1097 reboot_mode = mdesc->reboot_mode;
Grant Likely62913192011-04-28 14:27:21 -06001098
Russell King37efe642008-12-01 11:53:07 +00001099 init_mm.start_code = (unsigned long) _text;
1100 init_mm.end_code = (unsigned long) _etext;
1101 init_mm.end_data = (unsigned long) _edata;
1102 init_mm.brk = (unsigned long) _end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Jeremy Kerr48ab7e02010-01-27 01:13:31 +01001104 /* populate cmd_line too for later use, preserving boot_command_line */
1105 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1106 *cmdline_p = cmd_line;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001107
Ard Biesheuvel29373672015-09-01 08:59:28 +02001108 early_fixmap_init();
1109 early_ioremap_init();
Stefan Agnera5f4c562015-08-13 00:01:52 +01001110
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001111 parse_early_param();
1112
Russell King1221ed12015-04-04 17:25:20 +01001113#ifdef CONFIG_MMU
Jon Medhurstb089c312017-04-10 11:13:59 +01001114 early_mm_init(mdesc);
Russell King1221ed12015-04-04 17:25:20 +01001115#endif
Santosh Shilimkar7c927322013-12-02 20:29:59 +01001116 setup_dma_zone(mdesc);
Shannon Zhao9b08aaa2016-04-07 20:03:28 +08001117 xen_early_init();
Ard Biesheuvelda58fb62015-09-24 13:49:52 -07001118 efi_init();
Laura Abbott98562652017-01-13 22:51:45 +01001119 /*
1120 * Make sure the calculation for lowmem/highmem is set appropriately
1121 * before reserving/allocating any mmeory
1122 */
Laura Abbott374d446d2017-01-13 22:51:08 +01001123 adjust_lowmem_bounds();
Laura Abbott1c2f87c2014-04-13 22:54:58 +01001124 arm_memblock_init(mdesc);
Laura Abbott98562652017-01-13 22:51:45 +01001125 /* Memory may have been removed so recalculate the bounds. */
1126 adjust_lowmem_bounds();
Russell King2778f622010-07-09 16:27:52 +01001127
Ard Biesheuvel29373672015-09-01 08:59:28 +02001128 early_ioremap_reset();
1129
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001130 paging_init(mdesc);
Dima Zavin11b93692011-01-14 23:05:14 +01001131 request_standard_resources(mdesc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Russell Kinga5287212011-11-04 15:05:24 +00001133 if (mdesc->restart)
1134 arm_pm_restart = mdesc->restart;
1135
Grant Likely93c02ab2011-04-28 14:27:21 -06001136 unflatten_device_tree();
1137
Lorenzo Pieralisi55871642011-12-14 16:01:24 +00001138 arm_dt_init_cpu_maps();
Mark Rutlandbe120392015-07-31 15:46:19 +01001139 psci_dt_init();
Russell King7bbb7942006-02-16 11:08:09 +00001140#ifdef CONFIG_SMP
Marc Zyngierabcee5f2011-09-08 09:06:10 +01001141 if (is_smp()) {
Jon Medhurstb382b942013-05-21 13:40:51 +00001142 if (!mdesc->smp_init || !mdesc->smp_init()) {
1143 if (psci_smp_available())
1144 smp_set_ops(&psci_smp_ops);
1145 else if (mdesc->smp)
1146 smp_set_ops(mdesc->smp);
1147 }
Russell Kingf00ec482010-09-04 10:47:48 +01001148 smp_init_cpus();
Lorenzo Pieralisi8cf72172013-05-16 10:32:09 +01001149 smp_build_mpidr_hash();
Marc Zyngierabcee5f2011-09-08 09:06:10 +01001150 }
Russell King7bbb7942006-02-16 11:08:09 +00001151#endif
Dave Martin4588c342012-02-17 16:54:28 +00001152
1153 if (!is_smp())
1154 hyp_mode_check();
1155
Mika Westerberg3c57fb42010-05-10 09:20:22 +01001156 reserve_crashkernel();
Russell King7bbb7942006-02-16 11:08:09 +00001157
eric miao52108642010-12-13 09:42:34 +01001158#ifdef CONFIG_MULTI_IRQ_HANDLER
1159 handle_arch_irq = mdesc->handle_irq;
1160#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162#ifdef CONFIG_VT
1163#if defined(CONFIG_VGA_CONSOLE)
1164 conswitchp = &vga_con;
1165#elif defined(CONFIG_DUMMY_CONSOLE)
1166 conswitchp = &dummy_con;
1167#endif
1168#endif
Russell Kingdec12e62010-12-16 13:49:34 +00001169
1170 if (mdesc->init_early)
1171 mdesc->init_early();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
1174
1175static int __init topology_init(void)
1176{
1177 int cpu;
1178
Russell King66fb8bd2007-03-13 09:54:21 +00001179 for_each_possible_cpu(cpu) {
1180 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
Stephen Boyd787047e2015-07-29 00:34:48 +01001181 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
Russell King66fb8bd2007-03-13 09:54:21 +00001182 register_cpu(&cpuinfo->cpu, cpu);
1183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 return 0;
1186}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187subsys_initcall(topology_init);
1188
Russell Kinge119bff2010-01-10 17:23:29 +00001189#ifdef CONFIG_HAVE_PROC_CPU
1190static int __init proc_cpu_init(void)
1191{
1192 struct proc_dir_entry *res;
1193
1194 res = proc_mkdir("cpu", NULL);
1195 if (!res)
1196 return -ENOMEM;
1197 return 0;
1198}
1199fs_initcall(proc_cpu_init);
1200#endif
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202static const char *hwcap_str[] = {
1203 "swp",
1204 "half",
1205 "thumb",
1206 "26bit",
1207 "fastmult",
1208 "fpa",
1209 "vfp",
1210 "edsp",
1211 "java",
Paul Gortmaker8f7f9432006-10-27 05:13:19 +01001212 "iwmmxt",
Lennert Buytenhek99e4a6d2006-12-18 00:59:10 +01001213 "crunch",
Catalin Marinas4369ae12008-11-06 13:23:06 +00001214 "thumbee",
Catalin Marinas2bedbdf2008-11-06 13:23:07 +00001215 "neon",
Catalin Marinas7279dc32009-02-11 13:13:56 +01001216 "vfpv3",
1217 "vfpv3d16",
Will Deacon254cdf82011-06-03 14:15:22 +01001218 "tls",
1219 "vfpv4",
1220 "idiva",
1221 "idivt",
Tetsuyuki Kobayashiab8d46c02013-07-22 14:58:17 +01001222 "vfpd32",
Will Deacona469abd2013-04-08 17:13:12 +01001223 "lpae",
Sudeep KarkadaNageshae9faebc2013-08-13 14:30:32 +01001224 "evtstrm",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 NULL
1226};
1227
Ard Biesheuvelb342ea42014-02-19 22:28:40 +01001228static const char *hwcap2_str[] = {
Ard Biesheuvel8258a982014-02-19 22:29:40 +01001229 "aes",
1230 "pmull",
1231 "sha1",
1232 "sha2",
1233 "crc32",
Ard Biesheuvelb342ea42014-02-19 22:28:40 +01001234 NULL
1235};
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237static int c_show(struct seq_file *m, void *v)
1238{
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001239 int i, j;
1240 u32 cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 for_each_online_cpu(i) {
Russell King15559722005-11-06 21:41:08 +00001243 /*
1244 * glibc reads /proc/cpuinfo to determine the number of
1245 * online processors, looking for lines beginning with
1246 * "processor". Give glibc what it expects.
1247 */
1248 seq_printf(m, "processor\t: %d\n", i);
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001249 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1250 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1251 cpu_name, cpuid & 15, elf_platform);
1252
Pavel Machek4bf9636c2015-01-04 20:01:23 +01001253#if defined(CONFIG_SMP)
1254 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1255 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1256 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1257#else
1258 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1259 loops_per_jiffy / (500000/HZ),
1260 (loops_per_jiffy / (5000/HZ)) % 100);
1261#endif
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001262 /* dump out the processor features */
1263 seq_puts(m, "Features\t: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001265 for (j = 0; hwcap_str[j]; j++)
1266 if (elf_hwcap & (1 << j))
1267 seq_printf(m, "%s ", hwcap_str[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Ard Biesheuvelb342ea42014-02-19 22:28:40 +01001269 for (j = 0; hwcap2_str[j]; j++)
1270 if (elf_hwcap2 & (1 << j))
1271 seq_printf(m, "%s ", hwcap2_str[j]);
1272
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001273 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1274 seq_printf(m, "CPU architecture: %s\n",
1275 proc_arch[cpu_architecture()]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001277 if ((cpuid & 0x0008f000) == 0x00000000) {
1278 /* pre-ARM7 */
1279 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 } else {
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001281 if ((cpuid & 0x0008f000) == 0x00007000) {
1282 /* ARM7 */
1283 seq_printf(m, "CPU variant\t: 0x%02x\n",
1284 (cpuid >> 16) & 127);
1285 } else {
1286 /* post-ARM7 */
1287 seq_printf(m, "CPU variant\t: 0x%x\n",
1288 (cpuid >> 20) & 15);
1289 }
1290 seq_printf(m, "CPU part\t: 0x%03x\n",
1291 (cpuid >> 4) & 0xfff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 }
Lorenzo Pieralisib4b8f7702012-09-10 18:55:21 +01001293 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
1296 seq_printf(m, "Hardware\t: %s\n", machine_name);
1297 seq_printf(m, "Revision\t: %04x\n", system_rev);
Paul Kocialkowski3f599872015-05-06 15:23:56 +01001298 seq_printf(m, "Serial\t\t: %s\n", system_serial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 return 0;
1301}
1302
1303static void *c_start(struct seq_file *m, loff_t *pos)
1304{
1305 return *pos < 1 ? (void *)1 : NULL;
1306}
1307
1308static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1309{
1310 ++*pos;
1311 return NULL;
1312}
1313
1314static void c_stop(struct seq_file *m, void *v)
1315{
1316}
1317
Jan Engelhardt2ffd6e12008-01-22 20:41:07 +01001318const struct seq_operations cpuinfo_op = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 .start = c_start,
1320 .next = c_next,
1321 .stop = c_stop,
1322 .show = c_show
1323};