Thomas Gleixner | 40b0b3f | 2019-06-03 07:44:46 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 2 | /* |
Dave Jones | 835c34a | 2007-10-12 21:10:53 -0400 | [diff] [blame] | 3 | * handle transition of Linux booting another kernel |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 4 | * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 7 | #define pr_fmt(fmt) "kexec: " fmt |
| 8 | |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> |
| 10 | #include <linux/kexec.h> |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 11 | #include <linux/string.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/gfp.h> |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 13 | #include <linux/reboot.h> |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 14 | #include <linux/numa.h> |
Ingo Molnar | f43fdad | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 15 | #include <linux/ftrace.h> |
Huang Ying | fef3a7a | 2009-03-10 10:56:57 +0800 | [diff] [blame] | 16 | #include <linux/io.h> |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 17 | #include <linux/suspend.h> |
Stephen Rothwell | d647230 | 2015-06-02 19:01:38 +1000 | [diff] [blame] | 18 | #include <linux/vmalloc.h> |
Ingo Molnar | f43fdad | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 19 | |
Yinghai Lu | 9ebdc79 | 2013-01-24 12:20:04 -0800 | [diff] [blame] | 20 | #include <asm/init.h> |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 21 | #include <asm/pgtable.h> |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 22 | #include <asm/tlbflush.h> |
| 23 | #include <asm/mmu_context.h> |
Jiang Liu | 8643e28 | 2014-10-27 16:12:04 +0800 | [diff] [blame] | 24 | #include <asm/io_apic.h> |
K.Prasad | 17f557e | 2009-06-01 23:46:03 +0530 | [diff] [blame] | 25 | #include <asm/debugreg.h> |
Vivek Goyal | 27f48d3 | 2014-08-08 14:26:06 -0700 | [diff] [blame] | 26 | #include <asm/kexec-bzimage64.h> |
Jiri Kosina | 4545c89 | 2015-04-27 13:17:19 +0200 | [diff] [blame] | 27 | #include <asm/setup.h> |
Laura Abbott | d116365 | 2017-05-08 15:58:11 -0700 | [diff] [blame] | 28 | #include <asm/set_memory.h> |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 29 | |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 30 | #ifdef CONFIG_KEXEC_FILE |
AKASHI Takahiro | 9ec4ece | 2018-04-13 15:35:49 -0700 | [diff] [blame] | 31 | const struct kexec_file_ops * const kexec_file_loaders[] = { |
Vivek Goyal | 27f48d3 | 2014-08-08 14:26:06 -0700 | [diff] [blame] | 32 | &kexec_bzImage64_ops, |
AKASHI Takahiro | 9ec4ece | 2018-04-13 15:35:49 -0700 | [diff] [blame] | 33 | NULL |
Vivek Goyal | cb10525 | 2014-08-08 14:25:57 -0700 | [diff] [blame] | 34 | }; |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 35 | #endif |
Vivek Goyal | cb10525 | 2014-08-08 14:25:57 -0700 | [diff] [blame] | 36 | |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 37 | static void free_transition_pgtable(struct kimage *image) |
| 38 | { |
Kirill A. Shutemov | 7f68904 | 2017-03-17 21:55:10 +0300 | [diff] [blame] | 39 | free_page((unsigned long)image->arch.p4d); |
Tetsuo Handa | a466ef7 | 2018-05-09 19:42:20 +0900 | [diff] [blame] | 40 | image->arch.p4d = NULL; |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 41 | free_page((unsigned long)image->arch.pud); |
Tetsuo Handa | a466ef7 | 2018-05-09 19:42:20 +0900 | [diff] [blame] | 42 | image->arch.pud = NULL; |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 43 | free_page((unsigned long)image->arch.pmd); |
Tetsuo Handa | a466ef7 | 2018-05-09 19:42:20 +0900 | [diff] [blame] | 44 | image->arch.pmd = NULL; |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 45 | free_page((unsigned long)image->arch.pte); |
Tetsuo Handa | a466ef7 | 2018-05-09 19:42:20 +0900 | [diff] [blame] | 46 | image->arch.pte = NULL; |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) |
| 50 | { |
Kirill A. Shutemov | 7f68904 | 2017-03-17 21:55:10 +0300 | [diff] [blame] | 51 | p4d_t *p4d; |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 52 | pud_t *pud; |
| 53 | pmd_t *pmd; |
| 54 | pte_t *pte; |
| 55 | unsigned long vaddr, paddr; |
| 56 | int result = -ENOMEM; |
| 57 | |
| 58 | vaddr = (unsigned long)relocate_kernel; |
| 59 | paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE); |
| 60 | pgd += pgd_index(vaddr); |
| 61 | if (!pgd_present(*pgd)) { |
Kirill A. Shutemov | 7f68904 | 2017-03-17 21:55:10 +0300 | [diff] [blame] | 62 | p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); |
| 63 | if (!p4d) |
| 64 | goto err; |
| 65 | image->arch.p4d = p4d; |
| 66 | set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE)); |
| 67 | } |
| 68 | p4d = p4d_offset(pgd, vaddr); |
| 69 | if (!p4d_present(*p4d)) { |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 70 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL); |
| 71 | if (!pud) |
| 72 | goto err; |
| 73 | image->arch.pud = pud; |
Kirill A. Shutemov | 7f68904 | 2017-03-17 21:55:10 +0300 | [diff] [blame] | 74 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 75 | } |
Kirill A. Shutemov | 7f68904 | 2017-03-17 21:55:10 +0300 | [diff] [blame] | 76 | pud = pud_offset(p4d, vaddr); |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 77 | if (!pud_present(*pud)) { |
| 78 | pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
| 79 | if (!pmd) |
| 80 | goto err; |
| 81 | image->arch.pmd = pmd; |
| 82 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
| 83 | } |
| 84 | pmd = pmd_offset(pud, vaddr); |
| 85 | if (!pmd_present(*pmd)) { |
| 86 | pte = (pte_t *)get_zeroed_page(GFP_KERNEL); |
| 87 | if (!pte) |
| 88 | goto err; |
| 89 | image->arch.pte = pte; |
| 90 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); |
| 91 | } |
| 92 | pte = pte_offset_kernel(pmd, vaddr); |
Tom Lendacky | bba4ed0 | 2017-07-17 16:10:28 -0500 | [diff] [blame] | 93 | set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC)); |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 94 | return 0; |
| 95 | err: |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 96 | return result; |
| 97 | } |
| 98 | |
Yinghai Lu | 9ebdc79 | 2013-01-24 12:20:04 -0800 | [diff] [blame] | 99 | static void *alloc_pgt_page(void *data) |
| 100 | { |
| 101 | struct kimage *image = (struct kimage *)data; |
| 102 | struct page *page; |
| 103 | void *p = NULL; |
| 104 | |
| 105 | page = kimage_alloc_control_pages(image, 0); |
| 106 | if (page) { |
| 107 | p = page_address(page); |
| 108 | clear_page(p); |
| 109 | } |
| 110 | |
| 111 | return p; |
| 112 | } |
| 113 | |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 114 | static int init_pgtable(struct kimage *image, unsigned long start_pgtable) |
| 115 | { |
Yinghai Lu | 9ebdc79 | 2013-01-24 12:20:04 -0800 | [diff] [blame] | 116 | struct x86_mapping_info info = { |
| 117 | .alloc_pgt_page = alloc_pgt_page, |
| 118 | .context = image, |
Xunlei Pang | 66aad4f | 2017-05-04 09:42:50 +0800 | [diff] [blame] | 119 | .page_flag = __PAGE_KERNEL_LARGE_EXEC, |
Tom Lendacky | bba4ed0 | 2017-07-17 16:10:28 -0500 | [diff] [blame] | 120 | .kernpg_flag = _KERNPG_TABLE_NOENC, |
Yinghai Lu | 9ebdc79 | 2013-01-24 12:20:04 -0800 | [diff] [blame] | 121 | }; |
Yinghai Lu | 084d128 | 2013-01-24 12:20:03 -0800 | [diff] [blame] | 122 | unsigned long mstart, mend; |
Eric W. Biederman | 8bf2755 | 2005-07-29 13:25:28 -0600 | [diff] [blame] | 123 | pgd_t *level4p; |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 124 | int result; |
Yinghai Lu | 084d128 | 2013-01-24 12:20:03 -0800 | [diff] [blame] | 125 | int i; |
| 126 | |
Eric W. Biederman | 8bf2755 | 2005-07-29 13:25:28 -0600 | [diff] [blame] | 127 | level4p = (pgd_t *)__va(start_pgtable); |
Yinghai Lu | 9ebdc79 | 2013-01-24 12:20:04 -0800 | [diff] [blame] | 128 | clear_page(level4p); |
Xunlei Pang | 8638100c | 2017-05-04 09:42:51 +0800 | [diff] [blame] | 129 | |
| 130 | if (direct_gbpages) |
| 131 | info.direct_gbpages = true; |
| 132 | |
Yinghai Lu | 0e691cf | 2013-01-24 12:20:05 -0800 | [diff] [blame] | 133 | for (i = 0; i < nr_pfn_mapped; i++) { |
| 134 | mstart = pfn_mapped[i].start << PAGE_SHIFT; |
| 135 | mend = pfn_mapped[i].end << PAGE_SHIFT; |
| 136 | |
| 137 | result = kernel_ident_mapping_init(&info, |
| 138 | level4p, mstart, mend); |
| 139 | if (result) |
| 140 | return result; |
| 141 | } |
Yinghai Lu | 084d128 | 2013-01-24 12:20:03 -0800 | [diff] [blame] | 142 | |
Huang Ying | 5359454 | 2009-03-10 10:57:04 +0800 | [diff] [blame] | 143 | /* |
Yinghai Lu | 084d128 | 2013-01-24 12:20:03 -0800 | [diff] [blame] | 144 | * segments's mem ranges could be outside 0 ~ max_pfn, |
| 145 | * for example when jump back to original kernel from kexeced kernel. |
| 146 | * or first kernel is booted with user mem map, and second kernel |
| 147 | * could be loaded out of that range. |
Huang Ying | 5359454 | 2009-03-10 10:57:04 +0800 | [diff] [blame] | 148 | */ |
Yinghai Lu | 084d128 | 2013-01-24 12:20:03 -0800 | [diff] [blame] | 149 | for (i = 0; i < image->nr_segments; i++) { |
| 150 | mstart = image->segment[i].mem; |
| 151 | mend = mstart + image->segment[i].memsz; |
| 152 | |
Yinghai Lu | 9ebdc79 | 2013-01-24 12:20:04 -0800 | [diff] [blame] | 153 | result = kernel_ident_mapping_init(&info, |
| 154 | level4p, mstart, mend); |
Yinghai Lu | 084d128 | 2013-01-24 12:20:03 -0800 | [diff] [blame] | 155 | |
| 156 | if (result) |
| 157 | return result; |
| 158 | } |
| 159 | |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 160 | return init_transition_pgtable(image, level4p); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | static void set_idt(void *newidt, u16 limit) |
| 164 | { |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 165 | struct desc_ptr curidt; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 166 | |
| 167 | /* x86-64 supports unaliged loads & stores */ |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 168 | curidt.size = limit; |
| 169 | curidt.address = (unsigned long)newidt; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 170 | |
| 171 | __asm__ __volatile__ ( |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 172 | "lidtq %0\n" |
| 173 | : : "m" (curidt) |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 174 | ); |
| 175 | }; |
| 176 | |
| 177 | |
| 178 | static void set_gdt(void *newgdt, u16 limit) |
| 179 | { |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 180 | struct desc_ptr curgdt; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 181 | |
| 182 | /* x86-64 supports unaligned loads & stores */ |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 183 | curgdt.size = limit; |
| 184 | curgdt.address = (unsigned long)newgdt; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 185 | |
| 186 | __asm__ __volatile__ ( |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 187 | "lgdtq %0\n" |
| 188 | : : "m" (curgdt) |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 189 | ); |
| 190 | }; |
| 191 | |
| 192 | static void load_segments(void) |
| 193 | { |
| 194 | __asm__ __volatile__ ( |
Eric W. Biederman | 36c4fd2 | 2005-07-29 13:02:09 -0600 | [diff] [blame] | 195 | "\tmovl %0,%%ds\n" |
| 196 | "\tmovl %0,%%es\n" |
| 197 | "\tmovl %0,%%ss\n" |
| 198 | "\tmovl %0,%%fs\n" |
| 199 | "\tmovl %0,%%gs\n" |
Michael Matz | 2ec5e3a | 2006-03-07 21:55:48 -0800 | [diff] [blame] | 200 | : : "a" (__KERNEL_DS) : "memory" |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 201 | ); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 204 | #ifdef CONFIG_KEXEC_FILE |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 205 | /* Update purgatory as needed after various image segments have been prepared */ |
| 206 | static int arch_update_purgatory(struct kimage *image) |
| 207 | { |
| 208 | int ret = 0; |
| 209 | |
| 210 | if (!image->file_mode) |
| 211 | return 0; |
| 212 | |
| 213 | /* Setup copying of backup region */ |
| 214 | if (image->type == KEXEC_TYPE_CRASH) { |
Thomas Gleixner | 40c50c1 | 2017-03-10 13:17:18 +0100 | [diff] [blame] | 215 | ret = kexec_purgatory_get_set_symbol(image, |
| 216 | "purgatory_backup_dest", |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 217 | &image->arch.backup_load_addr, |
| 218 | sizeof(image->arch.backup_load_addr), 0); |
| 219 | if (ret) |
| 220 | return ret; |
| 221 | |
Thomas Gleixner | 40c50c1 | 2017-03-10 13:17:18 +0100 | [diff] [blame] | 222 | ret = kexec_purgatory_get_set_symbol(image, |
| 223 | "purgatory_backup_src", |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 224 | &image->arch.backup_src_start, |
| 225 | sizeof(image->arch.backup_src_start), 0); |
| 226 | if (ret) |
| 227 | return ret; |
| 228 | |
Thomas Gleixner | 40c50c1 | 2017-03-10 13:17:18 +0100 | [diff] [blame] | 229 | ret = kexec_purgatory_get_set_symbol(image, |
| 230 | "purgatory_backup_sz", |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 231 | &image->arch.backup_src_sz, |
| 232 | sizeof(image->arch.backup_src_sz), 0); |
| 233 | if (ret) |
| 234 | return ret; |
| 235 | } |
| 236 | |
| 237 | return ret; |
| 238 | } |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 239 | #else /* !CONFIG_KEXEC_FILE */ |
| 240 | static inline int arch_update_purgatory(struct kimage *image) |
| 241 | { |
| 242 | return 0; |
| 243 | } |
| 244 | #endif /* CONFIG_KEXEC_FILE */ |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 245 | |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 246 | int machine_kexec_prepare(struct kimage *image) |
| 247 | { |
Magnus Damm | 4bfaaef | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 248 | unsigned long start_pgtable; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 249 | int result; |
| 250 | |
| 251 | /* Calculate the offsets */ |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 252 | start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 253 | |
| 254 | /* Setup the identity mapped 64bit page table */ |
| 255 | result = init_pgtable(image, start_pgtable); |
Maneesh Soni | 72414d3 | 2005-06-25 14:58:28 -0700 | [diff] [blame] | 256 | if (result) |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 257 | return result; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 258 | |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 259 | /* update purgatory as needed */ |
| 260 | result = arch_update_purgatory(image); |
| 261 | if (result) |
| 262 | return result; |
| 263 | |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | void machine_kexec_cleanup(struct kimage *image) |
| 268 | { |
Huang Ying | f5deb79 | 2009-02-03 14:22:48 +0800 | [diff] [blame] | 269 | free_transition_pgtable(image); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | /* |
| 273 | * Do not allocate memory (or fail in any way) in machine_kexec(). |
| 274 | * We are past the point of no return, committed to rebooting now. |
| 275 | */ |
Huang Ying | 3ab8352 | 2008-07-25 19:45:07 -0700 | [diff] [blame] | 276 | void machine_kexec(struct kimage *image) |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 277 | { |
Magnus Damm | 4bfaaef | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 278 | unsigned long page_list[PAGES_NR]; |
| 279 | void *control_page; |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 280 | int save_ftrace_enabled; |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 281 | |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 282 | #ifdef CONFIG_KEXEC_JUMP |
Huang Ying | 6407df5 | 2009-05-08 10:51:41 +0800 | [diff] [blame] | 283 | if (image->preserve_context) |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 284 | save_processor_state(); |
| 285 | #endif |
| 286 | |
| 287 | save_ftrace_enabled = __ftrace_enabled_save(); |
Ingo Molnar | f43fdad | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 288 | |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 289 | /* Interrupts aren't acceptable while we reboot */ |
| 290 | local_irq_disable(); |
K.Prasad | 17f557e | 2009-06-01 23:46:03 +0530 | [diff] [blame] | 291 | hw_breakpoint_disable(); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 292 | |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 293 | if (image->preserve_context) { |
| 294 | #ifdef CONFIG_X86_IO_APIC |
| 295 | /* |
| 296 | * We need to put APICs in legacy mode so that we can |
| 297 | * get timer interrupts in second kernel. kexec/kdump |
Baoquan He | 50374b9 | 2018-02-14 13:46:54 +0800 | [diff] [blame] | 298 | * paths already have calls to restore_boot_irq_mode() |
| 299 | * in one form or other. kexec jump path also need one. |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 300 | */ |
Baoquan He | 3c9e76d | 2018-02-14 13:46:52 +0800 | [diff] [blame] | 301 | clear_IO_APIC(); |
| 302 | restore_boot_irq_mode(); |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 303 | #endif |
| 304 | } |
| 305 | |
Magnus Damm | 4bfaaef | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 306 | control_page = page_address(image->control_code_page) + PAGE_SIZE; |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 307 | memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 308 | |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 309 | page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 310 | page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; |
Magnus Damm | 4bfaaef | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 311 | page_list[PA_TABLE_PAGE] = |
| 312 | (unsigned long)__pa(page_address(image->control_code_page)); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 313 | |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 314 | if (image->type == KEXEC_TYPE_DEFAULT) |
| 315 | page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) |
| 316 | << PAGE_SHIFT); |
| 317 | |
Huang Ying | fef3a7a | 2009-03-10 10:56:57 +0800 | [diff] [blame] | 318 | /* |
| 319 | * The segment registers are funny things, they have both a |
Eric W. Biederman | 2a8a3d5 | 2006-07-30 03:03:20 -0700 | [diff] [blame] | 320 | * visible and an invisible part. Whenever the visible part is |
| 321 | * set to a specific selector, the invisible part is loaded |
| 322 | * with from a table in memory. At no other time is the |
| 323 | * descriptor table in memory accessed. |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 324 | * |
| 325 | * I take advantage of this here by force loading the |
| 326 | * segments, before I zap the gdt with an invalid value. |
| 327 | */ |
| 328 | load_segments(); |
Huang Ying | fef3a7a | 2009-03-10 10:56:57 +0800 | [diff] [blame] | 329 | /* |
| 330 | * The gdt & idt are now invalid. |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 331 | * If you want to load them you must set up your own idt & gdt. |
| 332 | */ |
Huang Ying | fef3a7a | 2009-03-10 10:56:57 +0800 | [diff] [blame] | 333 | set_gdt(phys_to_virt(0), 0); |
| 334 | set_idt(phys_to_virt(0), 0); |
Magnus Damm | 4bfaaef | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 335 | |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 336 | /* now call it */ |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 337 | image->start = relocate_kernel((unsigned long)image->head, |
| 338 | (unsigned long)page_list, |
| 339 | image->start, |
Tom Lendacky | 4e23790 | 2017-07-28 11:01:16 -0500 | [diff] [blame] | 340 | image->preserve_context, |
| 341 | sme_active()); |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 342 | |
| 343 | #ifdef CONFIG_KEXEC_JUMP |
Huang Ying | 6407df5 | 2009-05-08 10:51:41 +0800 | [diff] [blame] | 344 | if (image->preserve_context) |
Huang Ying | fee7b0d | 2009-03-10 10:57:16 +0800 | [diff] [blame] | 345 | restore_processor_state(); |
| 346 | #endif |
| 347 | |
| 348 | __ftrace_enabled_restore(save_ftrace_enabled); |
Eric W. Biederman | 5234f5e | 2005-06-25 14:58:02 -0700 | [diff] [blame] | 349 | } |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 350 | |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 351 | void arch_crash_save_vmcoreinfo(void) |
| 352 | { |
Lianbo Jiang | 65f750e | 2019-01-10 20:19:44 +0800 | [diff] [blame] | 353 | u64 sme_mask = sme_me_mask; |
| 354 | |
Baoquan He | 401721e | 2016-12-14 15:04:20 -0800 | [diff] [blame] | 355 | VMCOREINFO_NUMBER(phys_base); |
Kirill A. Shutemov | 65ade2f | 2017-06-06 14:31:27 +0300 | [diff] [blame] | 356 | VMCOREINFO_SYMBOL(init_top_pgt); |
Kirill A. Shutemov | ed7588d | 2018-05-18 13:35:24 +0300 | [diff] [blame] | 357 | vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n", |
| 358 | pgtable_l5_enabled()); |
Ken'ichi Ohmichi | 92df5c3 | 2008-02-07 00:15:23 -0800 | [diff] [blame] | 359 | |
| 360 | #ifdef CONFIG_NUMA |
| 361 | VMCOREINFO_SYMBOL(node_data); |
| 362 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); |
| 363 | #endif |
Eugene Surovegin | b6085a8 | 2014-01-23 09:31:20 -0800 | [diff] [blame] | 364 | vmcoreinfo_append_str("KERNELOFFSET=%lx\n", |
Jiri Kosina | 4545c89 | 2015-04-27 13:17:19 +0200 | [diff] [blame] | 365 | kaslr_offset()); |
Baoquan He | 401721e | 2016-12-14 15:04:20 -0800 | [diff] [blame] | 366 | VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); |
Lianbo Jiang | 65f750e | 2019-01-10 20:19:44 +0800 | [diff] [blame] | 367 | VMCOREINFO_NUMBER(sme_mask); |
Ken'ichi Ohmichi | fd59d23 | 2007-10-16 23:27:27 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Vivek Goyal | cb10525 | 2014-08-08 14:25:57 -0700 | [diff] [blame] | 370 | /* arch-dependent functionality related to kexec file-based syscall */ |
| 371 | |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 372 | #ifdef CONFIG_KEXEC_FILE |
Vivek Goyal | cb10525 | 2014-08-08 14:25:57 -0700 | [diff] [blame] | 373 | void *arch_kexec_kernel_image_load(struct kimage *image) |
| 374 | { |
Vivek Goyal | dd5f726 | 2014-08-08 14:26:09 -0700 | [diff] [blame] | 375 | vfree(image->arch.elf_headers); |
| 376 | image->arch.elf_headers = NULL; |
| 377 | |
Vivek Goyal | cb10525 | 2014-08-08 14:25:57 -0700 | [diff] [blame] | 378 | if (!image->fops || !image->fops->load) |
| 379 | return ERR_PTR(-ENOEXEC); |
| 380 | |
| 381 | return image->fops->load(image, image->kernel_buf, |
| 382 | image->kernel_buf_len, image->initrd_buf, |
| 383 | image->initrd_buf_len, image->cmdline_buf, |
| 384 | image->cmdline_buf_len); |
| 385 | } |
| 386 | |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 387 | /* |
| 388 | * Apply purgatory relocations. |
| 389 | * |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 390 | * @pi: Purgatory to be relocated. |
| 391 | * @section: Section relocations applying to. |
| 392 | * @relsec: Section containing RELAs. |
| 393 | * @symtabsec: Corresponding symtab. |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 394 | * |
| 395 | * TODO: Some of the code belongs to generic code. Move that in kexec.c. |
| 396 | */ |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 397 | int arch_kexec_apply_relocations_add(struct purgatory_info *pi, |
| 398 | Elf_Shdr *section, const Elf_Shdr *relsec, |
| 399 | const Elf_Shdr *symtabsec) |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 400 | { |
| 401 | unsigned int i; |
| 402 | Elf64_Rela *rel; |
| 403 | Elf64_Sym *sym; |
| 404 | void *location; |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 405 | unsigned long address, sec_base, value; |
| 406 | const char *strtab, *name, *shstrtab; |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 407 | const Elf_Shdr *sechdrs; |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 408 | |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 409 | /* String & section header string table */ |
| 410 | sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; |
| 411 | strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset; |
| 412 | shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 413 | |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 414 | rel = (void *)pi->ehdr + relsec->sh_offset; |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 415 | |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 416 | pr_debug("Applying relocate section %s to %u\n", |
| 417 | shstrtab + relsec->sh_name, relsec->sh_info); |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 418 | |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 419 | for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) { |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 420 | |
| 421 | /* |
| 422 | * rel[i].r_offset contains byte offset from beginning |
| 423 | * of section to the storage unit affected. |
| 424 | * |
Philipp Rudo | 8da0b72 | 2018-04-13 15:36:39 -0700 | [diff] [blame] | 425 | * This is location to update. This is temporary buffer |
| 426 | * where section is currently loaded. This will finally be |
| 427 | * loaded to a different address later, pointed to by |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 428 | * ->sh_addr. kexec takes care of moving it |
| 429 | * (kexec_load_segment()). |
| 430 | */ |
Philipp Rudo | 8da0b72 | 2018-04-13 15:36:39 -0700 | [diff] [blame] | 431 | location = pi->purgatory_buf; |
| 432 | location += section->sh_offset; |
| 433 | location += rel[i].r_offset; |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 434 | |
| 435 | /* Final address of the location */ |
| 436 | address = section->sh_addr + rel[i].r_offset; |
| 437 | |
| 438 | /* |
| 439 | * rel[i].r_info contains information about symbol table index |
| 440 | * w.r.t which relocation must be made and type of relocation |
| 441 | * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get |
| 442 | * these respectively. |
| 443 | */ |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 444 | sym = (void *)pi->ehdr + symtabsec->sh_offset; |
| 445 | sym += ELF64_R_SYM(rel[i].r_info); |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 446 | |
| 447 | if (sym->st_name) |
| 448 | name = strtab + sym->st_name; |
| 449 | else |
| 450 | name = shstrtab + sechdrs[sym->st_shndx].sh_name; |
| 451 | |
| 452 | pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n", |
| 453 | name, sym->st_info, sym->st_shndx, sym->st_value, |
| 454 | sym->st_size); |
| 455 | |
| 456 | if (sym->st_shndx == SHN_UNDEF) { |
| 457 | pr_err("Undefined symbol: %s\n", name); |
| 458 | return -ENOEXEC; |
| 459 | } |
| 460 | |
| 461 | if (sym->st_shndx == SHN_COMMON) { |
| 462 | pr_err("symbol '%s' in common section\n", name); |
| 463 | return -ENOEXEC; |
| 464 | } |
| 465 | |
| 466 | if (sym->st_shndx == SHN_ABS) |
| 467 | sec_base = 0; |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 468 | else if (sym->st_shndx >= pi->ehdr->e_shnum) { |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 469 | pr_err("Invalid section %d for symbol %s\n", |
| 470 | sym->st_shndx, name); |
| 471 | return -ENOEXEC; |
| 472 | } else |
Philipp Rudo | 8aec395 | 2018-04-13 15:36:24 -0700 | [diff] [blame] | 473 | sec_base = pi->sechdrs[sym->st_shndx].sh_addr; |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 474 | |
| 475 | value = sym->st_value; |
| 476 | value += sec_base; |
| 477 | value += rel[i].r_addend; |
| 478 | |
| 479 | switch (ELF64_R_TYPE(rel[i].r_info)) { |
| 480 | case R_X86_64_NONE: |
| 481 | break; |
| 482 | case R_X86_64_64: |
| 483 | *(u64 *)location = value; |
| 484 | break; |
| 485 | case R_X86_64_32: |
| 486 | *(u32 *)location = value; |
| 487 | if (value != *(u32 *)location) |
| 488 | goto overflow; |
| 489 | break; |
| 490 | case R_X86_64_32S: |
| 491 | *(s32 *)location = value; |
| 492 | if ((s64)value != *(s32 *)location) |
| 493 | goto overflow; |
| 494 | break; |
| 495 | case R_X86_64_PC32: |
H.J. Lu | b21ebf2f | 2018-02-07 14:20:09 -0800 | [diff] [blame] | 496 | case R_X86_64_PLT32: |
Vivek Goyal | 12db556 | 2014-08-08 14:26:04 -0700 | [diff] [blame] | 497 | value -= (u64)address; |
| 498 | *(u32 *)location = value; |
| 499 | break; |
| 500 | default: |
| 501 | pr_err("Unknown rela relocation: %llu\n", |
| 502 | ELF64_R_TYPE(rel[i].r_info)); |
| 503 | return -ENOEXEC; |
| 504 | } |
| 505 | } |
| 506 | return 0; |
| 507 | |
| 508 | overflow: |
| 509 | pr_err("Overflow in relocation type %d value 0x%lx\n", |
| 510 | (int)ELF64_R_TYPE(rel[i].r_info), value); |
| 511 | return -ENOEXEC; |
| 512 | } |
Vivek Goyal | 74ca317 | 2014-08-29 15:18:46 -0700 | [diff] [blame] | 513 | #endif /* CONFIG_KEXEC_FILE */ |
Xunlei Pang | 1e5768a | 2016-05-23 16:24:13 -0700 | [diff] [blame] | 514 | |
| 515 | static int |
| 516 | kexec_mark_range(unsigned long start, unsigned long end, bool protect) |
| 517 | { |
| 518 | struct page *page; |
| 519 | unsigned int nr_pages; |
| 520 | |
| 521 | /* |
| 522 | * For physical range: [start, end]. We must skip the unassigned |
| 523 | * crashk resource with zero-valued "end" member. |
| 524 | */ |
| 525 | if (!end || start > end) |
| 526 | return 0; |
| 527 | |
| 528 | page = pfn_to_page(start >> PAGE_SHIFT); |
| 529 | nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; |
| 530 | if (protect) |
| 531 | return set_pages_ro(page, nr_pages); |
| 532 | else |
| 533 | return set_pages_rw(page, nr_pages); |
| 534 | } |
| 535 | |
| 536 | static void kexec_mark_crashkres(bool protect) |
| 537 | { |
| 538 | unsigned long control; |
| 539 | |
| 540 | kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect); |
| 541 | |
| 542 | /* Don't touch the control code page used in crash_kexec().*/ |
| 543 | control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page)); |
| 544 | /* Control code page is located in the 2nd page. */ |
| 545 | kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect); |
| 546 | control += KEXEC_CONTROL_PAGE_SIZE; |
| 547 | kexec_mark_range(control, crashk_res.end, protect); |
| 548 | } |
| 549 | |
| 550 | void arch_kexec_protect_crashkres(void) |
| 551 | { |
| 552 | kexec_mark_crashkres(true); |
| 553 | } |
| 554 | |
| 555 | void arch_kexec_unprotect_crashkres(void) |
| 556 | { |
| 557 | kexec_mark_crashkres(false); |
| 558 | } |
Tom Lendacky | bba4ed0 | 2017-07-17 16:10:28 -0500 | [diff] [blame] | 559 | |
| 560 | int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) |
| 561 | { |
| 562 | /* |
| 563 | * If SME is active we need to be sure that kexec pages are |
| 564 | * not encrypted because when we boot to the new kernel the |
| 565 | * pages won't be accessed encrypted (initially). |
| 566 | */ |
| 567 | return set_memory_decrypted((unsigned long)vaddr, pages); |
| 568 | } |
| 569 | |
| 570 | void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) |
| 571 | { |
| 572 | /* |
| 573 | * If SME is active we need to reset the pages back to being |
| 574 | * an encrypted mapping before freeing them. |
| 575 | */ |
| 576 | set_memory_encrypted((unsigned long)vaddr, pages); |
| 577 | } |