blob: 9e6529da12edc770f1750f3fb81a8e2ccd53a93e [file] [log] [blame]
Dave Younga43cac02015-09-09 15:38:51 -07001/*
2 * kexec: kexec_file_load system call
3 *
4 * Copyright (C) 2014 Red Hat Inc.
5 * Authors:
6 * Vivek Goyal <vgoyal@redhat.com>
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
Minfei Huangde90a6b2015-11-06 16:32:45 -080012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Dave Younga43cac02015-09-09 15:38:51 -070014#include <linux/capability.h>
15#include <linux/mm.h>
16#include <linux/file.h>
17#include <linux/slab.h>
18#include <linux/kexec.h>
19#include <linux/mutex.h>
20#include <linux/list.h>
Mimi Zoharb804def2016-01-14 20:59:14 -050021#include <linux/fs.h>
Mimi Zohar7b8589c2016-12-19 16:22:48 -080022#include <linux/ima.h>
Dave Younga43cac02015-09-09 15:38:51 -070023#include <crypto/hash.h>
24#include <crypto/sha.h>
AKASHI Takahirobabac4a2018-04-13 15:36:06 -070025#include <linux/elf.h>
26#include <linux/elfcore.h>
27#include <linux/kernel.h>
Dave Younga43cac02015-09-09 15:38:51 -070028#include <linux/syscalls.h>
29#include <linux/vmalloc.h>
30#include "kexec_internal.h"
31
Dave Younga43cac02015-09-09 15:38:51 -070032static int kexec_calculate_store_digests(struct kimage *image);
33
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070034/*
35 * Currently this is the only default function that is exported as some
36 * architectures need it to do additional handlings.
37 * In the future, other default functions may be exported too if required.
38 */
39int kexec_image_probe_default(struct kimage *image, void *buf,
40 unsigned long buf_len)
41{
42 const struct kexec_file_ops * const *fops;
43 int ret = -ENOEXEC;
44
45 for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
46 ret = (*fops)->probe(buf, buf_len);
47 if (!ret) {
48 image->fops = *fops;
49 return ret;
50 }
51 }
52
53 return ret;
54}
55
Dave Younga43cac02015-09-09 15:38:51 -070056/* Architectures can provide this probe function */
57int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
58 unsigned long buf_len)
59{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070060 return kexec_image_probe_default(image, buf, buf_len);
61}
62
63static void *kexec_image_load_default(struct kimage *image)
64{
65 if (!image->fops || !image->fops->load)
66 return ERR_PTR(-ENOEXEC);
67
68 return image->fops->load(image, image->kernel_buf,
69 image->kernel_buf_len, image->initrd_buf,
70 image->initrd_buf_len, image->cmdline_buf,
71 image->cmdline_buf_len);
Dave Younga43cac02015-09-09 15:38:51 -070072}
73
74void * __weak arch_kexec_kernel_image_load(struct kimage *image)
75{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070076 return kexec_image_load_default(image);
77}
78
AKASHI Takahiro92a98a22018-11-15 14:52:41 +090079int kexec_image_post_load_cleanup_default(struct kimage *image)
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070080{
81 if (!image->fops || !image->fops->cleanup)
82 return 0;
83
84 return image->fops->cleanup(image->image_loader_data);
Dave Younga43cac02015-09-09 15:38:51 -070085}
86
87int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
88{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070089 return kexec_image_post_load_cleanup_default(image);
Dave Younga43cac02015-09-09 15:38:51 -070090}
91
Xunlei Pang978e30c2016-01-20 15:00:36 -080092#ifdef CONFIG_KEXEC_VERIFY_SIG
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -070093static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
94 unsigned long buf_len)
95{
96 if (!image->fops || !image->fops->verify_sig) {
97 pr_debug("kernel loader does not support signature verification.\n");
98 return -EKEYREJECTED;
99 }
100
101 return image->fops->verify_sig(buf, buf_len);
102}
103
Dave Younga43cac02015-09-09 15:38:51 -0700104int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
105 unsigned long buf_len)
106{
AKASHI Takahiro9ec4ece2018-04-13 15:35:49 -0700107 return kexec_image_verify_sig_default(image, buf, buf_len);
Dave Younga43cac02015-09-09 15:38:51 -0700108}
Xunlei Pang978e30c2016-01-20 15:00:36 -0800109#endif
Dave Younga43cac02015-09-09 15:38:51 -0700110
Philipp Rudo8aec3952018-04-13 15:36:24 -0700111/*
112 * arch_kexec_apply_relocations_add - apply relocations of type RELA
113 * @pi: Purgatory to be relocated.
114 * @section: Section relocations applying to.
115 * @relsec: Section containing RELAs.
116 * @symtab: Corresponding symtab.
117 *
118 * Return: 0 on success, negative errno on error.
119 */
Dave Younga43cac02015-09-09 15:38:51 -0700120int __weak
Philipp Rudo8aec3952018-04-13 15:36:24 -0700121arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
122 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
Dave Younga43cac02015-09-09 15:38:51 -0700123{
124 pr_err("RELA relocation unsupported.\n");
125 return -ENOEXEC;
126}
127
Philipp Rudo8aec3952018-04-13 15:36:24 -0700128/*
129 * arch_kexec_apply_relocations - apply relocations of type REL
130 * @pi: Purgatory to be relocated.
131 * @section: Section relocations applying to.
132 * @relsec: Section containing RELs.
133 * @symtab: Corresponding symtab.
134 *
135 * Return: 0 on success, negative errno on error.
136 */
Dave Younga43cac02015-09-09 15:38:51 -0700137int __weak
Philipp Rudo8aec3952018-04-13 15:36:24 -0700138arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
139 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
Dave Younga43cac02015-09-09 15:38:51 -0700140{
141 pr_err("REL relocation unsupported.\n");
142 return -ENOEXEC;
143}
144
145/*
146 * Free up memory used by kernel, initrd, and command line. This is temporary
147 * memory allocation which is not needed any more after these buffers have
148 * been loaded into separate segments and have been copied elsewhere.
149 */
150void kimage_file_post_load_cleanup(struct kimage *image)
151{
152 struct purgatory_info *pi = &image->purgatory_info;
153
154 vfree(image->kernel_buf);
155 image->kernel_buf = NULL;
156
157 vfree(image->initrd_buf);
158 image->initrd_buf = NULL;
159
160 kfree(image->cmdline_buf);
161 image->cmdline_buf = NULL;
162
163 vfree(pi->purgatory_buf);
164 pi->purgatory_buf = NULL;
165
166 vfree(pi->sechdrs);
167 pi->sechdrs = NULL;
168
169 /* See if architecture has anything to cleanup post load */
170 arch_kimage_file_post_load_cleanup(image);
171
172 /*
173 * Above call should have called into bootloader to free up
174 * any data stored in kimage->image_loader_data. It should
175 * be ok now to free it up.
176 */
177 kfree(image->image_loader_data);
178 image->image_loader_data = NULL;
179}
180
181/*
182 * In file mode list of segments is prepared by kernel. Copy relevant
183 * data from user space, do error checking, prepare segment list
184 */
185static int
186kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
187 const char __user *cmdline_ptr,
188 unsigned long cmdline_len, unsigned flags)
189{
190 int ret = 0;
191 void *ldata;
Mimi Zoharb804def2016-01-14 20:59:14 -0500192 loff_t size;
Dave Younga43cac02015-09-09 15:38:51 -0700193
Mimi Zoharb804def2016-01-14 20:59:14 -0500194 ret = kernel_read_file_from_fd(kernel_fd, &image->kernel_buf,
195 &size, INT_MAX, READING_KEXEC_IMAGE);
Dave Younga43cac02015-09-09 15:38:51 -0700196 if (ret)
197 return ret;
Mimi Zoharb804def2016-01-14 20:59:14 -0500198 image->kernel_buf_len = size;
Dave Younga43cac02015-09-09 15:38:51 -0700199
Mimi Zohar7b8589c2016-12-19 16:22:48 -0800200 /* IMA needs to pass the measurement list to the next kernel. */
201 ima_add_kexec_buffer(image);
202
Dave Younga43cac02015-09-09 15:38:51 -0700203 /* Call arch image probe handlers */
204 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
205 image->kernel_buf_len);
Dave Younga43cac02015-09-09 15:38:51 -0700206 if (ret)
207 goto out;
208
209#ifdef CONFIG_KEXEC_VERIFY_SIG
210 ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
211 image->kernel_buf_len);
212 if (ret) {
213 pr_debug("kernel signature verification failed.\n");
214 goto out;
215 }
216 pr_debug("kernel signature verification successful.\n");
217#endif
218 /* It is possible that there no initramfs is being loaded */
219 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
Mimi Zoharb804def2016-01-14 20:59:14 -0500220 ret = kernel_read_file_from_fd(initrd_fd, &image->initrd_buf,
221 &size, INT_MAX,
222 READING_KEXEC_INITRAMFS);
Dave Younga43cac02015-09-09 15:38:51 -0700223 if (ret)
224 goto out;
Mimi Zoharb804def2016-01-14 20:59:14 -0500225 image->initrd_buf_len = size;
Dave Younga43cac02015-09-09 15:38:51 -0700226 }
227
228 if (cmdline_len) {
Al Viroa9bd8df2017-05-13 18:39:01 -0400229 image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
230 if (IS_ERR(image->cmdline_buf)) {
231 ret = PTR_ERR(image->cmdline_buf);
232 image->cmdline_buf = NULL;
Dave Younga43cac02015-09-09 15:38:51 -0700233 goto out;
234 }
235
236 image->cmdline_buf_len = cmdline_len;
237
238 /* command line should be a string with last byte null */
239 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
240 ret = -EINVAL;
241 goto out;
242 }
243 }
244
245 /* Call arch image load handlers */
246 ldata = arch_kexec_kernel_image_load(image);
247
248 if (IS_ERR(ldata)) {
249 ret = PTR_ERR(ldata);
250 goto out;
251 }
252
253 image->image_loader_data = ldata;
254out:
255 /* In case of error, free up all allocated memory in this function */
256 if (ret)
257 kimage_file_post_load_cleanup(image);
258 return ret;
259}
260
261static int
262kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
263 int initrd_fd, const char __user *cmdline_ptr,
264 unsigned long cmdline_len, unsigned long flags)
265{
266 int ret;
267 struct kimage *image;
268 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
269
270 image = do_kimage_alloc_init();
271 if (!image)
272 return -ENOMEM;
273
274 image->file_mode = 1;
275
276 if (kexec_on_panic) {
277 /* Enable special crash kernel control page alloc policy. */
278 image->control_page = crashk_res.start;
279 image->type = KEXEC_TYPE_CRASH;
280 }
281
282 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
283 cmdline_ptr, cmdline_len, flags);
284 if (ret)
285 goto out_free_image;
286
287 ret = sanity_check_segment_list(image);
288 if (ret)
289 goto out_free_post_load_bufs;
290
291 ret = -ENOMEM;
292 image->control_code_page = kimage_alloc_control_pages(image,
293 get_order(KEXEC_CONTROL_PAGE_SIZE));
294 if (!image->control_code_page) {
295 pr_err("Could not allocate control_code_buffer\n");
296 goto out_free_post_load_bufs;
297 }
298
299 if (!kexec_on_panic) {
300 image->swap_page = kimage_alloc_control_pages(image, 0);
301 if (!image->swap_page) {
302 pr_err("Could not allocate swap buffer\n");
303 goto out_free_control_pages;
304 }
305 }
306
307 *rimage = image;
308 return 0;
309out_free_control_pages:
310 kimage_free_page_list(&image->control_pages);
311out_free_post_load_bufs:
312 kimage_file_post_load_cleanup(image);
313out_free_image:
314 kfree(image);
315 return ret;
316}
317
318SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
319 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
320 unsigned long, flags)
321{
322 int ret = 0, i;
323 struct kimage **dest_image, *image;
324
325 /* We only trust the superuser with rebooting the system. */
326 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
327 return -EPERM;
328
329 /* Make sure we have a legal set of flags */
330 if (flags != (flags & KEXEC_FILE_FLAGS))
331 return -EINVAL;
332
333 image = NULL;
334
335 if (!mutex_trylock(&kexec_mutex))
336 return -EBUSY;
337
338 dest_image = &kexec_image;
Xunlei Pang9b492cf2016-05-23 16:24:10 -0700339 if (flags & KEXEC_FILE_ON_CRASH) {
Dave Younga43cac02015-09-09 15:38:51 -0700340 dest_image = &kexec_crash_image;
Xunlei Pang9b492cf2016-05-23 16:24:10 -0700341 if (kexec_crash_image)
342 arch_kexec_unprotect_crashkres();
343 }
Dave Younga43cac02015-09-09 15:38:51 -0700344
345 if (flags & KEXEC_FILE_UNLOAD)
346 goto exchange;
347
348 /*
349 * In case of crash, new kernel gets loaded in reserved region. It is
350 * same memory where old crash kernel might be loaded. Free any
351 * current crash dump kernel before we corrupt it.
352 */
353 if (flags & KEXEC_FILE_ON_CRASH)
354 kimage_free(xchg(&kexec_crash_image, NULL));
355
356 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
357 cmdline_len, flags);
358 if (ret)
359 goto out;
360
361 ret = machine_kexec_prepare(image);
362 if (ret)
363 goto out;
364
Xunlei Pang12293842017-07-12 14:33:21 -0700365 /*
366 * Some architecture(like S390) may touch the crash memory before
367 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
368 */
369 ret = kimage_crash_copy_vmcoreinfo(image);
370 if (ret)
371 goto out;
372
Dave Younga43cac02015-09-09 15:38:51 -0700373 ret = kexec_calculate_store_digests(image);
374 if (ret)
375 goto out;
376
377 for (i = 0; i < image->nr_segments; i++) {
378 struct kexec_segment *ksegment;
379
380 ksegment = &image->segment[i];
381 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
382 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
383 ksegment->memsz);
384
385 ret = kimage_load_segment(image, &image->segment[i]);
386 if (ret)
387 goto out;
388 }
389
390 kimage_terminate(image);
391
392 /*
393 * Free up any temporary buffers allocated which are not needed
394 * after image has been loaded
395 */
396 kimage_file_post_load_cleanup(image);
397exchange:
398 image = xchg(dest_image, image);
399out:
Xunlei Pang9b492cf2016-05-23 16:24:10 -0700400 if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
401 arch_kexec_protect_crashkres();
402
Dave Younga43cac02015-09-09 15:38:51 -0700403 mutex_unlock(&kexec_mutex);
404 kimage_free(image);
405 return ret;
406}
407
408static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
409 struct kexec_buf *kbuf)
410{
411 struct kimage *image = kbuf->image;
412 unsigned long temp_start, temp_end;
413
414 temp_end = min(end, kbuf->buf_max);
415 temp_start = temp_end - kbuf->memsz;
416
417 do {
418 /* align down start */
419 temp_start = temp_start & (~(kbuf->buf_align - 1));
420
421 if (temp_start < start || temp_start < kbuf->buf_min)
422 return 0;
423
424 temp_end = temp_start + kbuf->memsz - 1;
425
426 /*
427 * Make sure this does not conflict with any of existing
428 * segments
429 */
430 if (kimage_is_destination_range(image, temp_start, temp_end)) {
431 temp_start = temp_start - PAGE_SIZE;
432 continue;
433 }
434
435 /* We found a suitable memory range */
436 break;
437 } while (1);
438
439 /* If we are here, we found a suitable memory range */
440 kbuf->mem = temp_start;
441
442 /* Success, stop navigating through remaining System RAM ranges */
443 return 1;
444}
445
446static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
447 struct kexec_buf *kbuf)
448{
449 struct kimage *image = kbuf->image;
450 unsigned long temp_start, temp_end;
451
452 temp_start = max(start, kbuf->buf_min);
453
454 do {
455 temp_start = ALIGN(temp_start, kbuf->buf_align);
456 temp_end = temp_start + kbuf->memsz - 1;
457
458 if (temp_end > end || temp_end > kbuf->buf_max)
459 return 0;
460 /*
461 * Make sure this does not conflict with any of existing
462 * segments
463 */
464 if (kimage_is_destination_range(image, temp_start, temp_end)) {
465 temp_start = temp_start + PAGE_SIZE;
466 continue;
467 }
468
469 /* We found a suitable memory range */
470 break;
471 } while (1);
472
473 /* If we are here, we found a suitable memory range */
474 kbuf->mem = temp_start;
475
476 /* Success, stop navigating through remaining System RAM ranges */
477 return 1;
478}
479
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500480static int locate_mem_hole_callback(struct resource *res, void *arg)
Dave Younga43cac02015-09-09 15:38:51 -0700481{
482 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500483 u64 start = res->start, end = res->end;
Dave Younga43cac02015-09-09 15:38:51 -0700484 unsigned long sz = end - start + 1;
485
486 /* Returning 0 will take to next memory range */
487 if (sz < kbuf->memsz)
488 return 0;
489
490 if (end < kbuf->buf_min || start > kbuf->buf_max)
491 return 0;
492
493 /*
494 * Allocate memory top down with-in ram range. Otherwise bottom up
495 * allocation.
496 */
497 if (kbuf->top_down)
498 return locate_mem_hole_top_down(start, end, kbuf);
499 return locate_mem_hole_bottom_up(start, end, kbuf);
500}
501
Thiago Jung Bauermann60fe3912016-11-29 23:45:47 +1100502/**
503 * arch_kexec_walk_mem - call func(data) on free memory regions
504 * @kbuf: Context info for the search. Also passed to @func.
505 * @func: Function to call for each memory region.
506 *
507 * Return: The memory walk will stop when func returns a non-zero value
508 * and that value will be returned. If all free regions are visited without
509 * func returning non-zero, then zero will be returned.
510 */
511int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
Tom Lendacky1d2e7332017-10-20 09:30:51 -0500512 int (*func)(struct resource *, void *))
Thiago Jung Bauermann60fe3912016-11-29 23:45:47 +1100513{
514 if (kbuf->image->type == KEXEC_TYPE_CRASH)
515 return walk_iomem_res_desc(crashk_res.desc,
516 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
517 crashk_res.start, crashk_res.end,
518 kbuf, func);
519 else
520 return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
521}
522
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100523/**
Thiago Jung Bauermanne2e806f2016-11-29 23:45:49 +1100524 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
525 * @kbuf: Parameters for the memory search.
526 *
527 * On success, kbuf->mem will have the start address of the memory region found.
528 *
529 * Return: 0 on success, negative errno on error.
530 */
531int kexec_locate_mem_hole(struct kexec_buf *kbuf)
532{
533 int ret;
534
AKASHI Takahirob6664ba2018-11-15 14:52:42 +0900535 /* Arch knows where to place */
536 if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
537 return 0;
538
Thiago Jung Bauermanne2e806f2016-11-29 23:45:49 +1100539 ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
540
541 return ret == 1 ? 0 : -EADDRNOTAVAIL;
542}
543
544/**
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100545 * kexec_add_buffer - place a buffer in a kexec segment
546 * @kbuf: Buffer contents and memory parameters.
547 *
548 * This function assumes that kexec_mutex is held.
549 * On successful return, @kbuf->mem will have the physical address of
550 * the buffer in memory.
551 *
552 * Return: 0 on success, negative errno on error.
Dave Younga43cac02015-09-09 15:38:51 -0700553 */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100554int kexec_add_buffer(struct kexec_buf *kbuf)
Dave Younga43cac02015-09-09 15:38:51 -0700555{
556
557 struct kexec_segment *ksegment;
Dave Younga43cac02015-09-09 15:38:51 -0700558 int ret;
559
560 /* Currently adding segment this way is allowed only in file mode */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100561 if (!kbuf->image->file_mode)
Dave Younga43cac02015-09-09 15:38:51 -0700562 return -EINVAL;
563
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100564 if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
Dave Younga43cac02015-09-09 15:38:51 -0700565 return -EINVAL;
566
567 /*
568 * Make sure we are not trying to add buffer after allocating
569 * control pages. All segments need to be placed first before
570 * any control pages are allocated. As control page allocation
571 * logic goes through list of segments to make sure there are
572 * no destination overlaps.
573 */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100574 if (!list_empty(&kbuf->image->control_pages)) {
Dave Younga43cac02015-09-09 15:38:51 -0700575 WARN_ON(1);
576 return -EINVAL;
577 }
578
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100579 /* Ensure minimum alignment needed for segments. */
580 kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
581 kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
Dave Younga43cac02015-09-09 15:38:51 -0700582
583 /* Walk the RAM ranges and allocate a suitable range for the buffer */
Thiago Jung Bauermanne2e806f2016-11-29 23:45:49 +1100584 ret = kexec_locate_mem_hole(kbuf);
585 if (ret)
586 return ret;
Dave Younga43cac02015-09-09 15:38:51 -0700587
588 /* Found a suitable memory range */
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100589 ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
Dave Younga43cac02015-09-09 15:38:51 -0700590 ksegment->kbuf = kbuf->buffer;
591 ksegment->bufsz = kbuf->bufsz;
592 ksegment->mem = kbuf->mem;
593 ksegment->memsz = kbuf->memsz;
Thiago Jung Bauermannec2b9bf2016-11-29 23:45:48 +1100594 kbuf->image->nr_segments++;
Dave Younga43cac02015-09-09 15:38:51 -0700595 return 0;
596}
597
598/* Calculate and store the digest of segments */
599static int kexec_calculate_store_digests(struct kimage *image)
600{
601 struct crypto_shash *tfm;
602 struct shash_desc *desc;
603 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
604 size_t desc_size, nullsz;
605 char *digest;
606 void *zero_buf;
607 struct kexec_sha_region *sha_regions;
608 struct purgatory_info *pi = &image->purgatory_info;
609
AKASHI Takahirob799a092018-04-13 15:35:45 -0700610 if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
611 return 0;
612
Dave Younga43cac02015-09-09 15:38:51 -0700613 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
614 zero_buf_sz = PAGE_SIZE;
615
616 tfm = crypto_alloc_shash("sha256", 0, 0);
617 if (IS_ERR(tfm)) {
618 ret = PTR_ERR(tfm);
619 goto out;
620 }
621
622 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
623 desc = kzalloc(desc_size, GFP_KERNEL);
624 if (!desc) {
625 ret = -ENOMEM;
626 goto out_free_tfm;
627 }
628
629 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
630 sha_regions = vzalloc(sha_region_sz);
631 if (!sha_regions)
632 goto out_free_desc;
633
634 desc->tfm = tfm;
635 desc->flags = 0;
636
637 ret = crypto_shash_init(desc);
638 if (ret < 0)
639 goto out_free_sha_regions;
640
641 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
642 if (!digest) {
643 ret = -ENOMEM;
644 goto out_free_sha_regions;
645 }
646
647 for (j = i = 0; i < image->nr_segments; i++) {
648 struct kexec_segment *ksegment;
649
650 ksegment = &image->segment[i];
651 /*
652 * Skip purgatory as it will be modified once we put digest
653 * info in purgatory.
654 */
655 if (ksegment->kbuf == pi->purgatory_buf)
656 continue;
657
658 ret = crypto_shash_update(desc, ksegment->kbuf,
659 ksegment->bufsz);
660 if (ret)
661 break;
662
663 /*
664 * Assume rest of the buffer is filled with zero and
665 * update digest accordingly.
666 */
667 nullsz = ksegment->memsz - ksegment->bufsz;
668 while (nullsz) {
669 unsigned long bytes = nullsz;
670
671 if (bytes > zero_buf_sz)
672 bytes = zero_buf_sz;
673 ret = crypto_shash_update(desc, zero_buf, bytes);
674 if (ret)
675 break;
676 nullsz -= bytes;
677 }
678
679 if (ret)
680 break;
681
682 sha_regions[j].start = ksegment->mem;
683 sha_regions[j].len = ksegment->memsz;
684 j++;
685 }
686
687 if (!ret) {
688 ret = crypto_shash_final(desc, digest);
689 if (ret)
690 goto out_free_digest;
Thomas Gleixner40c50c12017-03-10 13:17:18 +0100691 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
692 sha_regions, sha_region_sz, 0);
Dave Younga43cac02015-09-09 15:38:51 -0700693 if (ret)
694 goto out_free_digest;
695
Thomas Gleixner40c50c12017-03-10 13:17:18 +0100696 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
697 digest, SHA256_DIGEST_SIZE, 0);
Dave Younga43cac02015-09-09 15:38:51 -0700698 if (ret)
699 goto out_free_digest;
700 }
701
702out_free_digest:
703 kfree(digest);
704out_free_sha_regions:
705 vfree(sha_regions);
706out_free_desc:
707 kfree(desc);
708out_free_tfm:
709 kfree(tfm);
710out:
711 return ret;
712}
713
AKASHI Takahirob799a092018-04-13 15:35:45 -0700714#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
Philipp Rudo93045702018-04-13 15:36:28 -0700715/*
716 * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
717 * @pi: Purgatory to be loaded.
718 * @kbuf: Buffer to setup.
719 *
720 * Allocates the memory needed for the buffer. Caller is responsible to free
721 * the memory after use.
722 *
723 * Return: 0 on success, negative errno on error.
724 */
725static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
726 struct kexec_buf *kbuf)
Dave Younga43cac02015-09-09 15:38:51 -0700727{
Philipp Rudo93045702018-04-13 15:36:28 -0700728 const Elf_Shdr *sechdrs;
729 unsigned long bss_align;
730 unsigned long bss_sz;
731 unsigned long align;
732 int i, ret;
Dave Younga43cac02015-09-09 15:38:51 -0700733
Philipp Rudo93045702018-04-13 15:36:28 -0700734 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
Philipp Rudo3be3f612018-04-13 15:36:43 -0700735 kbuf->buf_align = bss_align = 1;
736 kbuf->bufsz = bss_sz = 0;
Dave Younga43cac02015-09-09 15:38:51 -0700737
Philipp Rudo93045702018-04-13 15:36:28 -0700738 for (i = 0; i < pi->ehdr->e_shnum; i++) {
739 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
740 continue;
741
742 align = sechdrs[i].sh_addralign;
743 if (sechdrs[i].sh_type != SHT_NOBITS) {
744 if (kbuf->buf_align < align)
745 kbuf->buf_align = align;
746 kbuf->bufsz = ALIGN(kbuf->bufsz, align);
747 kbuf->bufsz += sechdrs[i].sh_size;
748 } else {
749 if (bss_align < align)
750 bss_align = align;
751 bss_sz = ALIGN(bss_sz, align);
752 bss_sz += sechdrs[i].sh_size;
753 }
754 }
755 kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
756 kbuf->memsz = kbuf->bufsz + bss_sz;
757 if (kbuf->buf_align < bss_align)
758 kbuf->buf_align = bss_align;
759
760 kbuf->buffer = vzalloc(kbuf->bufsz);
761 if (!kbuf->buffer)
762 return -ENOMEM;
763 pi->purgatory_buf = kbuf->buffer;
764
765 ret = kexec_add_buffer(kbuf);
766 if (ret)
767 goto out;
Philipp Rudo93045702018-04-13 15:36:28 -0700768
769 return 0;
770out:
771 vfree(pi->purgatory_buf);
772 pi->purgatory_buf = NULL;
773 return ret;
774}
775
776/*
777 * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
778 * @pi: Purgatory to be loaded.
779 * @kbuf: Buffer prepared to store purgatory.
780 *
781 * Allocates the memory needed for the buffer. Caller is responsible to free
782 * the memory after use.
783 *
784 * Return: 0 on success, negative errno on error.
785 */
786static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
787 struct kexec_buf *kbuf)
788{
Philipp Rudo93045702018-04-13 15:36:28 -0700789 unsigned long bss_addr;
790 unsigned long offset;
Philipp Rudo93045702018-04-13 15:36:28 -0700791 Elf_Shdr *sechdrs;
Philipp Rudo93045702018-04-13 15:36:28 -0700792 int i;
793
Philipp Rudo8da0b722018-04-13 15:36:39 -0700794 /*
795 * The section headers in kexec_purgatory are read-only. In order to
796 * have them modifiable make a temporary copy.
797 */
Kees Cookfad953c2018-06-12 14:27:37 -0700798 sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
Dave Younga43cac02015-09-09 15:38:51 -0700799 if (!sechdrs)
800 return -ENOMEM;
Philipp Rudo93045702018-04-13 15:36:28 -0700801 memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
802 pi->ehdr->e_shnum * sizeof(Elf_Shdr));
803 pi->sechdrs = sechdrs;
Dave Younga43cac02015-09-09 15:38:51 -0700804
Philipp Rudo620f6972018-04-13 15:36:35 -0700805 offset = 0;
806 bss_addr = kbuf->mem + kbuf->bufsz;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700807 kbuf->image->start = pi->ehdr->e_entry;
Dave Younga43cac02015-09-09 15:38:51 -0700808
809 for (i = 0; i < pi->ehdr->e_shnum; i++) {
Philipp Rudo93045702018-04-13 15:36:28 -0700810 unsigned long align;
Philipp Rudo620f6972018-04-13 15:36:35 -0700811 void *src, *dst;
Philipp Rudo93045702018-04-13 15:36:28 -0700812
Dave Younga43cac02015-09-09 15:38:51 -0700813 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
814 continue;
815
816 align = sechdrs[i].sh_addralign;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700817 if (sechdrs[i].sh_type == SHT_NOBITS) {
Dave Younga43cac02015-09-09 15:38:51 -0700818 bss_addr = ALIGN(bss_addr, align);
819 sechdrs[i].sh_addr = bss_addr;
820 bss_addr += sechdrs[i].sh_size;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700821 continue;
Dave Younga43cac02015-09-09 15:38:51 -0700822 }
Dave Younga43cac02015-09-09 15:38:51 -0700823
Philipp Rudo620f6972018-04-13 15:36:35 -0700824 offset = ALIGN(offset, align);
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700825 if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
826 pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
827 pi->ehdr->e_entry < (sechdrs[i].sh_addr
828 + sechdrs[i].sh_size)) {
829 kbuf->image->start -= sechdrs[i].sh_addr;
Philipp Rudo620f6972018-04-13 15:36:35 -0700830 kbuf->image->start += kbuf->mem + offset;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700831 }
832
Philipp Rudo8da0b722018-04-13 15:36:39 -0700833 src = (void *)pi->ehdr + sechdrs[i].sh_offset;
Philipp Rudo620f6972018-04-13 15:36:35 -0700834 dst = pi->purgatory_buf + offset;
835 memcpy(dst, src, sechdrs[i].sh_size);
836
837 sechdrs[i].sh_addr = kbuf->mem + offset;
Philipp Rudo8da0b722018-04-13 15:36:39 -0700838 sechdrs[i].sh_offset = offset;
Philipp Rudo620f6972018-04-13 15:36:35 -0700839 offset += sechdrs[i].sh_size;
Philipp Rudof1b1cca2018-04-13 15:36:32 -0700840 }
Dave Younga43cac02015-09-09 15:38:51 -0700841
Philipp Rudo93045702018-04-13 15:36:28 -0700842 return 0;
Dave Younga43cac02015-09-09 15:38:51 -0700843}
844
845static int kexec_apply_relocations(struct kimage *image)
846{
847 int i, ret;
848 struct purgatory_info *pi = &image->purgatory_info;
Philipp Rudo8aec3952018-04-13 15:36:24 -0700849 const Elf_Shdr *sechdrs;
Dave Younga43cac02015-09-09 15:38:51 -0700850
Philipp Rudo8aec3952018-04-13 15:36:24 -0700851 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
852
Dave Younga43cac02015-09-09 15:38:51 -0700853 for (i = 0; i < pi->ehdr->e_shnum; i++) {
Philipp Rudo8aec3952018-04-13 15:36:24 -0700854 const Elf_Shdr *relsec;
855 const Elf_Shdr *symtab;
856 Elf_Shdr *section;
Dave Younga43cac02015-09-09 15:38:51 -0700857
Philipp Rudo8aec3952018-04-13 15:36:24 -0700858 relsec = sechdrs + i;
859
860 if (relsec->sh_type != SHT_RELA &&
861 relsec->sh_type != SHT_REL)
Dave Younga43cac02015-09-09 15:38:51 -0700862 continue;
863
864 /*
865 * For section of type SHT_RELA/SHT_REL,
866 * ->sh_link contains section header index of associated
867 * symbol table. And ->sh_info contains section header
868 * index of section to which relocations apply.
869 */
Philipp Rudo8aec3952018-04-13 15:36:24 -0700870 if (relsec->sh_info >= pi->ehdr->e_shnum ||
871 relsec->sh_link >= pi->ehdr->e_shnum)
Dave Younga43cac02015-09-09 15:38:51 -0700872 return -ENOEXEC;
873
Philipp Rudo8aec3952018-04-13 15:36:24 -0700874 section = pi->sechdrs + relsec->sh_info;
875 symtab = sechdrs + relsec->sh_link;
Dave Younga43cac02015-09-09 15:38:51 -0700876
877 if (!(section->sh_flags & SHF_ALLOC))
878 continue;
879
880 /*
881 * symtab->sh_link contain section header index of associated
882 * string table.
883 */
884 if (symtab->sh_link >= pi->ehdr->e_shnum)
885 /* Invalid section number? */
886 continue;
887
888 /*
889 * Respective architecture needs to provide support for applying
890 * relocations of type SHT_RELA/SHT_REL.
891 */
Philipp Rudo8aec3952018-04-13 15:36:24 -0700892 if (relsec->sh_type == SHT_RELA)
893 ret = arch_kexec_apply_relocations_add(pi, section,
894 relsec, symtab);
895 else if (relsec->sh_type == SHT_REL)
896 ret = arch_kexec_apply_relocations(pi, section,
897 relsec, symtab);
Dave Younga43cac02015-09-09 15:38:51 -0700898 if (ret)
899 return ret;
900 }
901
902 return 0;
903}
904
Philipp Rudo3be3f612018-04-13 15:36:43 -0700905/*
906 * kexec_load_purgatory - Load and relocate the purgatory object.
907 * @image: Image to add the purgatory to.
908 * @kbuf: Memory parameters to use.
909 *
910 * Allocates the memory needed for image->purgatory_info.sechdrs and
911 * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
912 * to free the memory after use.
913 *
914 * Return: 0 on success, negative errno on error.
915 */
916int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
Dave Younga43cac02015-09-09 15:38:51 -0700917{
918 struct purgatory_info *pi = &image->purgatory_info;
919 int ret;
920
921 if (kexec_purgatory_size <= 0)
922 return -EINVAL;
923
Philipp Rudo65c225d2018-04-13 15:36:17 -0700924 pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
Dave Younga43cac02015-09-09 15:38:51 -0700925
Philipp Rudo3be3f612018-04-13 15:36:43 -0700926 ret = kexec_purgatory_setup_kbuf(pi, kbuf);
Dave Younga43cac02015-09-09 15:38:51 -0700927 if (ret)
928 return ret;
929
Philipp Rudo3be3f612018-04-13 15:36:43 -0700930 ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
Philipp Rudo93045702018-04-13 15:36:28 -0700931 if (ret)
932 goto out_free_kbuf;
933
Dave Younga43cac02015-09-09 15:38:51 -0700934 ret = kexec_apply_relocations(image);
935 if (ret)
936 goto out;
937
Dave Younga43cac02015-09-09 15:38:51 -0700938 return 0;
939out:
940 vfree(pi->sechdrs);
Thiago Jung Bauermann070c43e2016-09-01 16:14:44 -0700941 pi->sechdrs = NULL;
Philipp Rudo93045702018-04-13 15:36:28 -0700942out_free_kbuf:
Dave Younga43cac02015-09-09 15:38:51 -0700943 vfree(pi->purgatory_buf);
Thiago Jung Bauermann070c43e2016-09-01 16:14:44 -0700944 pi->purgatory_buf = NULL;
Dave Younga43cac02015-09-09 15:38:51 -0700945 return ret;
946}
947
Philipp Rudo961d9212018-04-13 15:36:21 -0700948/*
949 * kexec_purgatory_find_symbol - find a symbol in the purgatory
950 * @pi: Purgatory to search in.
951 * @name: Name of the symbol.
952 *
953 * Return: pointer to symbol in read-only symtab on success, NULL on error.
954 */
955static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
956 const char *name)
Dave Younga43cac02015-09-09 15:38:51 -0700957{
Philipp Rudo961d9212018-04-13 15:36:21 -0700958 const Elf_Shdr *sechdrs;
Philipp Rudo65c225d2018-04-13 15:36:17 -0700959 const Elf_Ehdr *ehdr;
Philipp Rudo961d9212018-04-13 15:36:21 -0700960 const Elf_Sym *syms;
Dave Younga43cac02015-09-09 15:38:51 -0700961 const char *strtab;
Philipp Rudo961d9212018-04-13 15:36:21 -0700962 int i, k;
Dave Younga43cac02015-09-09 15:38:51 -0700963
Philipp Rudo961d9212018-04-13 15:36:21 -0700964 if (!pi->ehdr)
Dave Younga43cac02015-09-09 15:38:51 -0700965 return NULL;
966
Dave Younga43cac02015-09-09 15:38:51 -0700967 ehdr = pi->ehdr;
Philipp Rudo961d9212018-04-13 15:36:21 -0700968 sechdrs = (void *)ehdr + ehdr->e_shoff;
Dave Younga43cac02015-09-09 15:38:51 -0700969
970 for (i = 0; i < ehdr->e_shnum; i++) {
971 if (sechdrs[i].sh_type != SHT_SYMTAB)
972 continue;
973
974 if (sechdrs[i].sh_link >= ehdr->e_shnum)
975 /* Invalid strtab section number */
976 continue;
Philipp Rudo961d9212018-04-13 15:36:21 -0700977 strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
978 syms = (void *)ehdr + sechdrs[i].sh_offset;
Dave Younga43cac02015-09-09 15:38:51 -0700979
980 /* Go through symbols for a match */
981 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
982 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
983 continue;
984
985 if (strcmp(strtab + syms[k].st_name, name) != 0)
986 continue;
987
988 if (syms[k].st_shndx == SHN_UNDEF ||
989 syms[k].st_shndx >= ehdr->e_shnum) {
990 pr_debug("Symbol: %s has bad section index %d.\n",
991 name, syms[k].st_shndx);
992 return NULL;
993 }
994
995 /* Found the symbol we are looking for */
996 return &syms[k];
997 }
998 }
999
1000 return NULL;
1001}
1002
1003void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
1004{
1005 struct purgatory_info *pi = &image->purgatory_info;
Philipp Rudo961d9212018-04-13 15:36:21 -07001006 const Elf_Sym *sym;
Dave Younga43cac02015-09-09 15:38:51 -07001007 Elf_Shdr *sechdr;
1008
1009 sym = kexec_purgatory_find_symbol(pi, name);
1010 if (!sym)
1011 return ERR_PTR(-EINVAL);
1012
1013 sechdr = &pi->sechdrs[sym->st_shndx];
1014
1015 /*
1016 * Returns the address where symbol will finally be loaded after
1017 * kexec_load_segment()
1018 */
1019 return (void *)(sechdr->sh_addr + sym->st_value);
1020}
1021
1022/*
1023 * Get or set value of a symbol. If "get_value" is true, symbol value is
1024 * returned in buf otherwise symbol value is set based on value in buf.
1025 */
1026int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
1027 void *buf, unsigned int size, bool get_value)
1028{
Dave Younga43cac02015-09-09 15:38:51 -07001029 struct purgatory_info *pi = &image->purgatory_info;
Philipp Rudo961d9212018-04-13 15:36:21 -07001030 const Elf_Sym *sym;
1031 Elf_Shdr *sec;
Dave Younga43cac02015-09-09 15:38:51 -07001032 char *sym_buf;
1033
1034 sym = kexec_purgatory_find_symbol(pi, name);
1035 if (!sym)
1036 return -EINVAL;
1037
1038 if (sym->st_size != size) {
1039 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1040 name, (unsigned long)sym->st_size, size);
1041 return -EINVAL;
1042 }
1043
Philipp Rudo961d9212018-04-13 15:36:21 -07001044 sec = pi->sechdrs + sym->st_shndx;
Dave Younga43cac02015-09-09 15:38:51 -07001045
Philipp Rudo961d9212018-04-13 15:36:21 -07001046 if (sec->sh_type == SHT_NOBITS) {
Dave Younga43cac02015-09-09 15:38:51 -07001047 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
1048 get_value ? "get" : "set");
1049 return -EINVAL;
1050 }
1051
Philipp Rudo8da0b722018-04-13 15:36:39 -07001052 sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
Dave Younga43cac02015-09-09 15:38:51 -07001053
1054 if (get_value)
1055 memcpy((void *)buf, sym_buf, size);
1056 else
1057 memcpy((void *)sym_buf, buf, size);
1058
1059 return 0;
1060}
AKASHI Takahirob799a092018-04-13 15:35:45 -07001061#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
AKASHI Takahirobabac4a2018-04-13 15:36:06 -07001062
1063int crash_exclude_mem_range(struct crash_mem *mem,
1064 unsigned long long mstart, unsigned long long mend)
1065{
1066 int i, j;
1067 unsigned long long start, end;
1068 struct crash_mem_range temp_range = {0, 0};
1069
1070 for (i = 0; i < mem->nr_ranges; i++) {
1071 start = mem->ranges[i].start;
1072 end = mem->ranges[i].end;
1073
1074 if (mstart > end || mend < start)
1075 continue;
1076
1077 /* Truncate any area outside of range */
1078 if (mstart < start)
1079 mstart = start;
1080 if (mend > end)
1081 mend = end;
1082
1083 /* Found completely overlapping range */
1084 if (mstart == start && mend == end) {
1085 mem->ranges[i].start = 0;
1086 mem->ranges[i].end = 0;
1087 if (i < mem->nr_ranges - 1) {
1088 /* Shift rest of the ranges to left */
1089 for (j = i; j < mem->nr_ranges - 1; j++) {
1090 mem->ranges[j].start =
1091 mem->ranges[j+1].start;
1092 mem->ranges[j].end =
1093 mem->ranges[j+1].end;
1094 }
1095 }
1096 mem->nr_ranges--;
1097 return 0;
1098 }
1099
1100 if (mstart > start && mend < end) {
1101 /* Split original range */
1102 mem->ranges[i].end = mstart - 1;
1103 temp_range.start = mend + 1;
1104 temp_range.end = end;
1105 } else if (mstart != start)
1106 mem->ranges[i].end = mstart - 1;
1107 else
1108 mem->ranges[i].start = mend + 1;
1109 break;
1110 }
1111
1112 /* If a split happened, add the split to array */
1113 if (!temp_range.end)
1114 return 0;
1115
1116 /* Split happened */
1117 if (i == mem->max_nr_ranges - 1)
1118 return -ENOMEM;
1119
1120 /* Location where new range should go */
1121 j = i + 1;
1122 if (j < mem->nr_ranges) {
1123 /* Move over all ranges one slot towards the end */
1124 for (i = mem->nr_ranges - 1; i >= j; i--)
1125 mem->ranges[i + 1] = mem->ranges[i];
1126 }
1127
1128 mem->ranges[j].start = temp_range.start;
1129 mem->ranges[j].end = temp_range.end;
1130 mem->nr_ranges++;
1131 return 0;
1132}
1133
1134int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
1135 void **addr, unsigned long *sz)
1136{
1137 Elf64_Ehdr *ehdr;
1138 Elf64_Phdr *phdr;
1139 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
1140 unsigned char *buf;
1141 unsigned int cpu, i;
1142 unsigned long long notes_addr;
1143 unsigned long mstart, mend;
1144
1145 /* extra phdr for vmcoreinfo elf note */
1146 nr_phdr = nr_cpus + 1;
1147 nr_phdr += mem->nr_ranges;
1148
1149 /*
1150 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1151 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1152 * I think this is required by tools like gdb. So same physical
1153 * memory will be mapped in two elf headers. One will contain kernel
1154 * text virtual addresses and other will have __va(physical) addresses.
1155 */
1156
1157 nr_phdr++;
1158 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
1159 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
1160
1161 buf = vzalloc(elf_sz);
1162 if (!buf)
1163 return -ENOMEM;
1164
1165 ehdr = (Elf64_Ehdr *)buf;
1166 phdr = (Elf64_Phdr *)(ehdr + 1);
1167 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1168 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
1169 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1170 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1171 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
1172 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
1173 ehdr->e_type = ET_CORE;
1174 ehdr->e_machine = ELF_ARCH;
1175 ehdr->e_version = EV_CURRENT;
1176 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1177 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1178 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1179
1180 /* Prepare one phdr of type PT_NOTE for each present cpu */
1181 for_each_present_cpu(cpu) {
1182 phdr->p_type = PT_NOTE;
1183 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
1184 phdr->p_offset = phdr->p_paddr = notes_addr;
1185 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
1186 (ehdr->e_phnum)++;
1187 phdr++;
1188 }
1189
1190 /* Prepare one PT_NOTE header for vmcoreinfo */
1191 phdr->p_type = PT_NOTE;
1192 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
1193 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
1194 (ehdr->e_phnum)++;
1195 phdr++;
1196
1197 /* Prepare PT_LOAD type program header for kernel text region */
1198 if (kernel_map) {
1199 phdr->p_type = PT_LOAD;
1200 phdr->p_flags = PF_R|PF_W|PF_X;
1201 phdr->p_vaddr = (Elf64_Addr)_text;
1202 phdr->p_filesz = phdr->p_memsz = _end - _text;
1203 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
1204 ehdr->e_phnum++;
1205 phdr++;
1206 }
1207
1208 /* Go through all the ranges in mem->ranges[] and prepare phdr */
1209 for (i = 0; i < mem->nr_ranges; i++) {
1210 mstart = mem->ranges[i].start;
1211 mend = mem->ranges[i].end;
1212
1213 phdr->p_type = PT_LOAD;
1214 phdr->p_flags = PF_R|PF_W|PF_X;
1215 phdr->p_offset = mstart;
1216
1217 phdr->p_paddr = mstart;
1218 phdr->p_vaddr = (unsigned long long) __va(mstart);
1219 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
1220 phdr->p_align = 0;
1221 ehdr->e_phnum++;
1222 phdr++;
1223 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1224 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
1225 ehdr->e_phnum, phdr->p_offset);
1226 }
1227
1228 *addr = buf;
1229 *sz = elf_sz;
1230 return 0;
1231}