Thomas Gleixner | 40b0b3f | 2019-06-03 07:44:46 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 2 | /* |
| 3 | * Code to handle transition of Linux booting another kernel. |
| 4 | * |
| 5 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> |
| 6 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz |
| 7 | * Copyright (C) 2005 IBM Corporation. |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/kexec.h> |
| 11 | #include <linux/reboot.h> |
| 12 | #include <linux/threads.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 13 | #include <linux/memblock.h> |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 14 | #include <linux/of.h> |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 15 | #include <linux/irq.h> |
Anton Blanchard | ac4414e4 | 2011-01-06 18:00:36 +0000 | [diff] [blame] | 16 | #include <linux/ftrace.h> |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 17 | |
Christophe Leroy | db0a2b6 | 2018-07-05 16:24:51 +0000 | [diff] [blame] | 18 | #include <asm/kdump.h> |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 19 | #include <asm/machdep.h> |
Hari Bathini | 8ff8127 | 2013-11-15 23:01:32 +0530 | [diff] [blame] | 20 | #include <asm/pgalloc.h> |
David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 21 | #include <asm/prom.h> |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 22 | #include <asm/sections.h> |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 23 | |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 24 | void machine_kexec_mask_interrupts(void) { |
| 25 | unsigned int i; |
Grant Likely | 4013369 | 2012-04-23 12:30:02 +0000 | [diff] [blame] | 26 | struct irq_desc *desc; |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 27 | |
Grant Likely | 4013369 | 2012-04-23 12:30:02 +0000 | [diff] [blame] | 28 | for_each_irq_desc(i, desc) { |
Lennert Buytenhek | e118028 | 2011-03-07 14:00:20 +0000 | [diff] [blame] | 29 | struct irq_chip *chip; |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 30 | |
Thomas Gleixner | ec775d0 | 2011-03-25 16:45:20 +0100 | [diff] [blame] | 31 | chip = irq_desc_get_chip(desc); |
Lennert Buytenhek | e118028 | 2011-03-07 14:00:20 +0000 | [diff] [blame] | 32 | if (!chip) |
| 33 | continue; |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 34 | |
Thomas Gleixner | 98488db | 2011-03-25 15:43:57 +0100 | [diff] [blame] | 35 | if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) |
Lennert Buytenhek | e118028 | 2011-03-07 14:00:20 +0000 | [diff] [blame] | 36 | chip->irq_eoi(&desc->irq_data); |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 37 | |
Lennert Buytenhek | e118028 | 2011-03-07 14:00:20 +0000 | [diff] [blame] | 38 | if (chip->irq_mask) |
| 39 | chip->irq_mask(&desc->irq_data); |
| 40 | |
Thomas Gleixner | 98488db | 2011-03-25 15:43:57 +0100 | [diff] [blame] | 41 | if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) |
Lennert Buytenhek | e118028 | 2011-03-07 14:00:20 +0000 | [diff] [blame] | 42 | chip->irq_disable(&desc->irq_data); |
Matthew McClintock | c71635d | 2010-09-16 17:58:23 -0500 | [diff] [blame] | 43 | } |
| 44 | } |
| 45 | |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 46 | void machine_crash_shutdown(struct pt_regs *regs) |
| 47 | { |
Anton Blanchard | c1f784e | 2011-01-06 17:56:09 +0000 | [diff] [blame] | 48 | default_machine_crash_shutdown(regs); |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | /* |
| 52 | * Do what every setup is needed on image and the |
| 53 | * reboot code buffer to allow us to avoid allocations |
| 54 | * later. |
| 55 | */ |
| 56 | int machine_kexec_prepare(struct kimage *image) |
| 57 | { |
| 58 | if (ppc_md.machine_kexec_prepare) |
| 59 | return ppc_md.machine_kexec_prepare(image); |
Anton Vorontsov | 77733f8 | 2008-12-16 06:23:05 +0000 | [diff] [blame] | 60 | else |
| 61 | return default_machine_kexec_prepare(image); |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | void machine_kexec_cleanup(struct kimage *image) |
| 65 | { |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 66 | } |
| 67 | |
Neil Horman | 67238fb | 2010-07-13 03:46:09 +0000 | [diff] [blame] | 68 | void arch_crash_save_vmcoreinfo(void) |
| 69 | { |
| 70 | |
| 71 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
| 72 | VMCOREINFO_SYMBOL(node_data); |
| 73 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); |
| 74 | #endif |
| 75 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
| 76 | VMCOREINFO_SYMBOL(contig_page_data); |
| 77 | #endif |
Hari Bathini | 8ff8127 | 2013-11-15 23:01:32 +0530 | [diff] [blame] | 78 | #if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP) |
| 79 | VMCOREINFO_SYMBOL(vmemmap_list); |
| 80 | VMCOREINFO_SYMBOL(mmu_vmemmap_psize); |
| 81 | VMCOREINFO_SYMBOL(mmu_psize_defs); |
| 82 | VMCOREINFO_STRUCT_SIZE(vmemmap_backing); |
| 83 | VMCOREINFO_OFFSET(vmemmap_backing, list); |
| 84 | VMCOREINFO_OFFSET(vmemmap_backing, phys); |
| 85 | VMCOREINFO_OFFSET(vmemmap_backing, virt_addr); |
| 86 | VMCOREINFO_STRUCT_SIZE(mmu_psize_def); |
| 87 | VMCOREINFO_OFFSET(mmu_psize_def, shift); |
| 88 | #endif |
Neil Horman | 67238fb | 2010-07-13 03:46:09 +0000 | [diff] [blame] | 89 | } |
| 90 | |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 91 | /* |
| 92 | * Do not allocate memory (or fail in any way) in machine_kexec(). |
| 93 | * We are past the point of no return, committed to rebooting now. |
| 94 | */ |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 95 | void machine_kexec(struct kimage *image) |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 96 | { |
Anton Blanchard | ac4414e4 | 2011-01-06 18:00:36 +0000 | [diff] [blame] | 97 | int save_ftrace_enabled; |
| 98 | |
| 99 | save_ftrace_enabled = __ftrace_enabled_save(); |
Naveen N. Rao | 88b1a85 | 2018-04-19 12:34:06 +0530 | [diff] [blame] | 100 | this_cpu_disable_ftrace(); |
Anton Blanchard | ac4414e4 | 2011-01-06 18:00:36 +0000 | [diff] [blame] | 101 | |
Anton Blanchard | 357574c | 2011-02-23 12:46:16 +0000 | [diff] [blame] | 102 | if (ppc_md.machine_kexec) |
| 103 | ppc_md.machine_kexec(image); |
| 104 | else |
| 105 | default_machine_kexec(image); |
Anton Vorontsov | 77733f8 | 2008-12-16 06:23:05 +0000 | [diff] [blame] | 106 | |
Naveen N. Rao | 88b1a85 | 2018-04-19 12:34:06 +0530 | [diff] [blame] | 107 | this_cpu_enable_ftrace(); |
Anton Blanchard | ac4414e4 | 2011-01-06 18:00:36 +0000 | [diff] [blame] | 108 | __ftrace_enabled_restore(save_ftrace_enabled); |
| 109 | |
Anton Vorontsov | 77733f8 | 2008-12-16 06:23:05 +0000 | [diff] [blame] | 110 | /* Fall back to normal restart if we're still alive. */ |
| 111 | machine_restart(NULL); |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 112 | for(;;); |
| 113 | } |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 114 | |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 115 | void __init reserve_crashkernel(void) |
| 116 | { |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 117 | unsigned long long crash_size, crash_base; |
| 118 | int ret; |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 119 | |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 120 | /* use common parsing */ |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 121 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 122 | &crash_size, &crash_base); |
| 123 | if (ret == 0 && crash_size > 0) { |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 124 | crashk_res.start = crash_base; |
Michael Ellerman | eabd909 | 2008-04-30 14:47:12 +1000 | [diff] [blame] | 125 | crashk_res.end = crash_base + crash_size - 1; |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Michael Ellerman | eabd909 | 2008-04-30 14:47:12 +1000 | [diff] [blame] | 128 | if (crashk_res.end == crashk_res.start) { |
| 129 | crashk_res.start = crashk_res.end = 0; |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 130 | return; |
Michael Ellerman | eabd909 | 2008-04-30 14:47:12 +1000 | [diff] [blame] | 131 | } |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 132 | |
| 133 | /* We might have got these values via the command line or the |
| 134 | * device tree, either way sanitise them now. */ |
| 135 | |
Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 136 | crash_size = resource_size(&crashk_res); |
Michael Ellerman | eabd909 | 2008-04-30 14:47:12 +1000 | [diff] [blame] | 137 | |
Suzuki Poulose | 0f890c8 | 2011-12-14 22:57:15 +0000 | [diff] [blame] | 138 | #ifndef CONFIG_NONSTATIC_KERNEL |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 139 | if (crashk_res.start != KDUMP_KERNELBASE) |
| 140 | printk("Crash kernel location must be 0x%x\n", |
| 141 | KDUMP_KERNELBASE); |
| 142 | |
| 143 | crashk_res.start = KDUMP_KERNELBASE; |
Milton Miller | 66c721e | 2009-01-02 10:46:15 +0000 | [diff] [blame] | 144 | #else |
| 145 | if (!crashk_res.start) { |
Anton Blanchard | 8aa6d35 | 2011-07-31 19:27:35 +0000 | [diff] [blame] | 146 | #ifdef CONFIG_PPC64 |
Milton Miller | 66c721e | 2009-01-02 10:46:15 +0000 | [diff] [blame] | 147 | /* |
Anton Blanchard | 8aa6d35 | 2011-07-31 19:27:35 +0000 | [diff] [blame] | 148 | * On 64bit we split the RMO in half but cap it at half of |
| 149 | * a small SLB (128MB) since the crash kernel needs to place |
| 150 | * itself and some stacks to be in the first segment. |
Milton Miller | 66c721e | 2009-01-02 10:46:15 +0000 | [diff] [blame] | 151 | */ |
Mahesh Salgaonkar | e641eb0 | 2013-12-09 15:33:39 +0530 | [diff] [blame] | 152 | crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); |
Anton Blanchard | 8aa6d35 | 2011-07-31 19:27:35 +0000 | [diff] [blame] | 153 | #else |
Milton Miller | 66c721e | 2009-01-02 10:46:15 +0000 | [diff] [blame] | 154 | crashk_res.start = KDUMP_KERNELBASE; |
Anton Blanchard | 8aa6d35 | 2011-07-31 19:27:35 +0000 | [diff] [blame] | 155 | #endif |
Milton Miller | 66c721e | 2009-01-02 10:46:15 +0000 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | crash_base = PAGE_ALIGN(crashk_res.start); |
| 159 | if (crash_base != crashk_res.start) { |
| 160 | printk("Crash kernel base must be aligned to 0x%lx\n", |
| 161 | PAGE_SIZE); |
| 162 | crashk_res.start = crash_base; |
| 163 | } |
| 164 | |
Mohan Kumar M | 54622f1 | 2008-10-21 17:38:10 +0000 | [diff] [blame] | 165 | #endif |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 166 | crash_size = PAGE_ALIGN(crash_size); |
| 167 | crashk_res.end = crashk_res.start + crash_size - 1; |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 168 | |
Milton Miller | 66c721e | 2009-01-02 10:46:15 +0000 | [diff] [blame] | 169 | /* The crash region must not overlap the current kernel */ |
| 170 | if (overlaps_crashkernel(__pa(_stext), _end - _stext)) { |
| 171 | printk(KERN_WARNING |
| 172 | "Crash kernel can not overlap current kernel\n"); |
| 173 | crashk_res.start = crashk_res.end = 0; |
| 174 | return; |
| 175 | } |
| 176 | |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 177 | /* Crash kernel trumps memory limit */ |
| 178 | if (memory_limit && memory_limit <= crashk_res.end) { |
| 179 | memory_limit = crashk_res.end + 1; |
Becky Bruce | 49a8496 | 2009-05-08 12:19:27 +0000 | [diff] [blame] | 180 | printk("Adjusted memory limit for crashkernel, now 0x%llx\n", |
Suzuki Poulose | a84fcd4 | 2012-08-21 01:42:33 +0000 | [diff] [blame] | 181 | memory_limit); |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 182 | } |
| 183 | |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 184 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
| 185 | "for crashkernel (System RAM: %ldMB)\n", |
| 186 | (unsigned long)(crash_size >> 20), |
| 187 | (unsigned long)(crashk_res.start >> 20), |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 188 | (unsigned long)(memblock_phys_mem_size() >> 20)); |
Bernhard Walle | edd8ce6 | 2007-10-18 23:41:01 -0700 | [diff] [blame] | 189 | |
Hari Bathini | 8950329 | 2018-06-28 10:49:56 +0530 | [diff] [blame] | 190 | if (!memblock_is_region_memory(crashk_res.start, crash_size) || |
| 191 | memblock_reserve(crashk_res.start, crash_size)) { |
| 192 | pr_err("Failed to reserve memory for crashkernel!\n"); |
| 193 | crashk_res.start = crashk_res.end = 0; |
| 194 | return; |
| 195 | } |
Michael Ellerman | 47585d8 | 2006-07-05 14:39:42 +1000 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | int overlaps_crashkernel(unsigned long start, unsigned long size) |
| 199 | { |
| 200 | return (start + size) > crashk_res.start && start <= crashk_res.end; |
| 201 | } |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 202 | |
| 203 | /* Values we need to export to the second kernel via the device tree. */ |
Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 204 | static phys_addr_t kernel_end; |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 205 | static phys_addr_t crashk_base; |
Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 206 | static phys_addr_t crashk_size; |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 207 | static unsigned long long mem_limit; |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 208 | |
| 209 | static struct property kernel_end_prop = { |
| 210 | .name = "linux,kernel-end", |
Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 211 | .length = sizeof(phys_addr_t), |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 212 | .value = &kernel_end, |
| 213 | }; |
| 214 | |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 215 | static struct property crashk_base_prop = { |
| 216 | .name = "linux,crashkernel-base", |
Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 217 | .length = sizeof(phys_addr_t), |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 218 | .value = &crashk_base |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 219 | }; |
| 220 | |
| 221 | static struct property crashk_size_prop = { |
| 222 | .name = "linux,crashkernel-size", |
Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 223 | .length = sizeof(phys_addr_t), |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 224 | .value = &crashk_size, |
| 225 | }; |
| 226 | |
Suzuki Poulose | 4bc77a5 | 2012-08-21 01:42:43 +0000 | [diff] [blame] | 227 | static struct property memory_limit_prop = { |
| 228 | .name = "linux,memory-limit", |
| 229 | .length = sizeof(unsigned long long), |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 230 | .value = &mem_limit, |
Suzuki Poulose | 4bc77a5 | 2012-08-21 01:42:43 +0000 | [diff] [blame] | 231 | }; |
| 232 | |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 233 | #define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) |
| 234 | |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 235 | static void __init export_crashk_values(struct device_node *node) |
| 236 | { |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 237 | /* There might be existing crash kernel properties, but we can't |
| 238 | * be sure what's in them, so remove them. */ |
Suraj Jitindar Singh | 925e2d1 | 2016-04-28 15:34:55 +1000 | [diff] [blame] | 239 | of_remove_property(node, of_find_property(node, |
| 240 | "linux,crashkernel-base", NULL)); |
| 241 | of_remove_property(node, of_find_property(node, |
| 242 | "linux,crashkernel-size", NULL)); |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 243 | |
| 244 | if (crashk_res.start != 0) { |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 245 | crashk_base = cpu_to_be_ulong(crashk_res.start), |
Nathan Fontenot | 79d1c71 | 2012-10-02 16:58:46 +0000 | [diff] [blame] | 246 | of_add_property(node, &crashk_base_prop); |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 247 | crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); |
Nathan Fontenot | 79d1c71 | 2012-10-02 16:58:46 +0000 | [diff] [blame] | 248 | of_add_property(node, &crashk_size_prop); |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 249 | } |
Suzuki Poulose | 4bc77a5 | 2012-08-21 01:42:43 +0000 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * memory_limit is required by the kexec-tools to limit the |
| 253 | * crash regions to the actual memory used. |
| 254 | */ |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 255 | mem_limit = cpu_to_be_ulong(memory_limit); |
Nathan Fontenot | 79d1c71 | 2012-10-02 16:58:46 +0000 | [diff] [blame] | 256 | of_update_property(node, &memory_limit_prop); |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 257 | } |
| 258 | |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 259 | static int __init kexec_setup(void) |
| 260 | { |
| 261 | struct device_node *node; |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 262 | |
| 263 | node = of_find_node_by_path("/chosen"); |
| 264 | if (!node) |
| 265 | return -ENOENT; |
| 266 | |
| 267 | /* remove any stale properties so ours can be found */ |
Suraj Jitindar Singh | 925e2d1 | 2016-04-28 15:34:55 +1000 | [diff] [blame] | 268 | of_remove_property(node, of_find_property(node, kernel_end_prop.name, NULL)); |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 269 | |
| 270 | /* information needed by userspace when using default_machine_kexec */ |
Anton Blanchard | ea961a8 | 2014-01-22 08:40:28 +1100 | [diff] [blame] | 271 | kernel_end = cpu_to_be_ulong(__pa(_end)); |
Nathan Fontenot | 79d1c71 | 2012-10-02 16:58:46 +0000 | [diff] [blame] | 272 | of_add_property(node, &kernel_end_prop); |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 273 | |
Dale Farnsworth | 6f29c32 | 2008-12-17 10:09:06 +0000 | [diff] [blame] | 274 | export_crashk_values(node); |
| 275 | |
Dale Farnsworth | 2e8e4f5 | 2008-12-16 06:22:59 +0000 | [diff] [blame] | 276 | of_node_put(node); |
| 277 | return 0; |
| 278 | } |
| 279 | late_initcall(kexec_setup); |