blob: 4e1d7460574b4ab6a532f4e68fe0208b1e9eaf7a [file] [log] [blame]
Thomas Gleixner1ccea772019-05-19 15:51:43 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06002/*
3 * elf.c - ELF access library
4 *
5 * Adapted from kpatch (https://github.com/dynup/kpatch):
6 * Copyright (C) 2013-2015 Josh Poimboeuf <jpoimboe@redhat.com>
7 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06008 */
9
10#include <sys/types.h>
11#include <sys/stat.h>
12#include <fcntl.h>
13#include <stdio.h>
14#include <stdlib.h>
15#include <string.h>
16#include <unistd.h>
Josh Poimboeuf385d11b2018-01-15 08:17:08 -060017#include <errno.h>
Peter Zijlstra1e11f3f2020-03-12 09:26:29 +010018#include "builtin.h"
Josh Poimboeuf442f04c2016-02-28 22:22:41 -060019
20#include "elf.h"
21#include "warn.h"
22
Artem Savkov22566c12018-11-20 11:52:16 -060023#define MAX_NAME_LEN 128
24
Peter Zijlstraae358192020-03-12 09:32:10 +010025static inline u32 str_hash(const char *str)
26{
27 return jhash(str, strlen(str), 0);
28}
29
Peter Zijlstra34f7c962020-03-12 14:29:38 +010030static inline int elf_hash_bits(void)
31{
32 return vmlinux ? ELF_HASH_BITS : 16;
33}
34
35#define elf_hash_add(hashtable, node, key) \
36 hlist_add_head(node, &hashtable[hash_min(key, elf_hash_bits())])
37
38static void elf_hash_init(struct hlist_head *table)
39{
40 __hash_init(table, 1U << elf_hash_bits());
41}
42
43#define elf_hash_for_each_possible(name, obj, member, key) \
44 hlist_for_each_entry(obj, &name[hash_min(key, elf_hash_bits())], member)
45
Peter Zijlstra2a362ec2020-03-12 09:34:42 +010046static void rb_add(struct rb_root *tree, struct rb_node *node,
47 int (*cmp)(struct rb_node *, const struct rb_node *))
48{
49 struct rb_node **link = &tree->rb_node;
50 struct rb_node *parent = NULL;
51
52 while (*link) {
53 parent = *link;
54 if (cmp(node, parent) < 0)
55 link = &parent->rb_left;
56 else
57 link = &parent->rb_right;
58 }
59
60 rb_link_node(node, parent, link);
61 rb_insert_color(node, tree);
62}
63
Miroslav Benesb490f452020-04-24 16:30:42 +020064static struct rb_node *rb_find_first(const struct rb_root *tree, const void *key,
Peter Zijlstra2a362ec2020-03-12 09:34:42 +010065 int (*cmp)(const void *key, const struct rb_node *))
66{
67 struct rb_node *node = tree->rb_node;
68 struct rb_node *match = NULL;
69
70 while (node) {
71 int c = cmp(key, node);
72 if (c <= 0) {
73 if (!c)
74 match = node;
75 node = node->rb_left;
76 } else if (c > 0) {
77 node = node->rb_right;
78 }
79 }
80
81 return match;
82}
83
84static struct rb_node *rb_next_match(struct rb_node *node, const void *key,
85 int (*cmp)(const void *key, const struct rb_node *))
86{
87 node = rb_next(node);
88 if (node && cmp(key, node))
89 node = NULL;
90 return node;
91}
92
93#define rb_for_each(tree, node, key, cmp) \
94 for ((node) = rb_find_first((tree), (key), (cmp)); \
95 (node); (node) = rb_next_match((node), (key), (cmp)))
96
97static int symbol_to_offset(struct rb_node *a, const struct rb_node *b)
98{
99 struct symbol *sa = rb_entry(a, struct symbol, node);
100 struct symbol *sb = rb_entry(b, struct symbol, node);
101
102 if (sa->offset < sb->offset)
103 return -1;
104 if (sa->offset > sb->offset)
105 return 1;
106
107 if (sa->len < sb->len)
108 return -1;
109 if (sa->len > sb->len)
110 return 1;
111
112 sa->alias = sb;
113
114 return 0;
115}
116
117static int symbol_by_offset(const void *key, const struct rb_node *node)
118{
119 const struct symbol *s = rb_entry(node, struct symbol, node);
120 const unsigned long *o = key;
121
122 if (*o < s->offset)
123 return -1;
Julien Thierry5377cae2020-04-03 14:17:30 +0100124 if (*o >= s->offset + s->len)
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100125 return 1;
126
127 return 0;
128}
129
Ingo Molnar894e48c2020-04-22 12:32:03 +0200130struct section *find_section_by_name(const struct elf *elf, const char *name)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600131{
132 struct section *sec;
133
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100134 elf_hash_for_each_possible(elf->section_name_hash, sec, name_hash, str_hash(name))
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600135 if (!strcmp(sec->name, name))
136 return sec;
137
138 return NULL;
139}
140
141static struct section *find_section_by_index(struct elf *elf,
142 unsigned int idx)
143{
144 struct section *sec;
145
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100146 elf_hash_for_each_possible(elf->section_hash, sec, hash, idx)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600147 if (sec->idx == idx)
148 return sec;
149
150 return NULL;
151}
152
153static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
154{
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600155 struct symbol *sym;
156
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100157 elf_hash_for_each_possible(elf->symbol_hash, sym, hash, idx)
Peter Zijlstra65fb11a2020-03-10 18:39:45 +0100158 if (sym->idx == idx)
159 return sym;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600160
161 return NULL;
162}
163
164struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
165{
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100166 struct rb_node *node;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600167
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100168 rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
169 struct symbol *s = rb_entry(node, struct symbol, node);
170
171 if (s->offset == offset && s->type != STT_SECTION)
172 return s;
173 }
Josh Poimboeuf7acfe532020-02-17 21:41:54 -0600174
175 return NULL;
176}
177
178struct symbol *find_func_by_offset(struct section *sec, unsigned long offset)
179{
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100180 struct rb_node *node;
Josh Poimboeuf7acfe532020-02-17 21:41:54 -0600181
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100182 rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
183 struct symbol *s = rb_entry(node, struct symbol, node);
184
185 if (s->offset == offset && s->type == STT_FUNC)
186 return s;
187 }
188
189 return NULL;
190}
191
Miroslav Benesb490f452020-04-24 16:30:42 +0200192struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset)
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100193{
194 struct rb_node *node;
195
196 rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
197 struct symbol *s = rb_entry(node, struct symbol, node);
198
199 if (s->type != STT_SECTION)
200 return s;
201 }
202
203 return NULL;
204}
205
Peter Zijlstra53d20722020-03-16 10:36:53 +0100206struct symbol *find_func_containing(struct section *sec, unsigned long offset)
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100207{
208 struct rb_node *node;
209
210 rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
211 struct symbol *s = rb_entry(node, struct symbol, node);
212
213 if (s->type == STT_FUNC)
214 return s;
215 }
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600216
217 return NULL;
218}
219
Ingo Molnar894e48c2020-04-22 12:32:03 +0200220struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
Josh Poimboeuf13810432018-05-09 22:39:15 -0500221{
Josh Poimboeuf13810432018-05-09 22:39:15 -0500222 struct symbol *sym;
223
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100224 elf_hash_for_each_possible(elf->symbol_name_hash, sym, name_hash, str_hash(name))
Peter Zijlstracdb3d052020-03-12 10:17:38 +0100225 if (!strcmp(sym->name, name))
226 return sym;
Josh Poimboeuf13810432018-05-09 22:39:15 -0500227
228 return NULL;
229}
230
Matt Helsleyf1974222020-05-29 14:01:13 -0700231struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
Peter Zijlstra8b5fa6b2020-03-12 11:23:36 +0100232 unsigned long offset, unsigned int len)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600233{
Matt Helsleyf1974222020-05-29 14:01:13 -0700234 struct reloc *reloc, *r = NULL;
Josh Poimboeuf042ba732016-03-09 00:07:00 -0600235 unsigned long o;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600236
Matt Helsleyf1974222020-05-29 14:01:13 -0700237 if (!sec->reloc)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600238 return NULL;
239
Matt Helsleyf1974222020-05-29 14:01:13 -0700240 sec = sec->reloc;
Peter Zijlstra8b5fa6b2020-03-12 11:23:36 +0100241
Peter Zijlstra74b873e2020-03-12 11:30:50 +0100242 for_offset_range(o, offset, offset + len) {
Matt Helsleyf1974222020-05-29 14:01:13 -0700243 elf_hash_for_each_possible(elf->reloc_hash, reloc, hash,
Peter Zijlstra8b5fa6b2020-03-12 11:23:36 +0100244 sec_offset_hash(sec, o)) {
Matt Helsleyf1974222020-05-29 14:01:13 -0700245 if (reloc->sec != sec)
Peter Zijlstra74b873e2020-03-12 11:30:50 +0100246 continue;
247
Matt Helsleyf1974222020-05-29 14:01:13 -0700248 if (reloc->offset >= offset && reloc->offset < offset + len) {
249 if (!r || reloc->offset < r->offset)
250 r = reloc;
Peter Zijlstra74b873e2020-03-12 11:30:50 +0100251 }
Peter Zijlstra8b5fa6b2020-03-12 11:23:36 +0100252 }
Peter Zijlstra74b873e2020-03-12 11:30:50 +0100253 if (r)
254 return r;
Peter Zijlstra8b5fa6b2020-03-12 11:23:36 +0100255 }
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600256
257 return NULL;
258}
259
Matt Helsleyf1974222020-05-29 14:01:13 -0700260struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600261{
Matt Helsleyf1974222020-05-29 14:01:13 -0700262 return find_reloc_by_dest_range(elf, sec, offset, 1);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600263}
264
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600265static int read_sections(struct elf *elf)
266{
267 Elf_Scn *s = NULL;
268 struct section *sec;
269 size_t shstrndx, sections_nr;
270 int i;
271
272 if (elf_getshdrnum(elf->elf, &sections_nr)) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500273 WARN_ELF("elf_getshdrnum");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600274 return -1;
275 }
276
277 if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500278 WARN_ELF("elf_getshdrstrndx");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600279 return -1;
280 }
281
282 for (i = 0; i < sections_nr; i++) {
283 sec = malloc(sizeof(*sec));
284 if (!sec) {
285 perror("malloc");
286 return -1;
287 }
288 memset(sec, 0, sizeof(*sec));
289
Josh Poimboeufa196e172016-03-09 00:06:57 -0600290 INIT_LIST_HEAD(&sec->symbol_list);
Matt Helsleyf1974222020-05-29 14:01:13 -0700291 INIT_LIST_HEAD(&sec->reloc_list);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600292
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600293 s = elf_getscn(elf->elf, i);
294 if (!s) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500295 WARN_ELF("elf_getscn");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600296 return -1;
297 }
298
299 sec->idx = elf_ndxscn(s);
300
301 if (!gelf_getshdr(s, &sec->sh)) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500302 WARN_ELF("gelf_getshdr");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600303 return -1;
304 }
305
306 sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
307 if (!sec->name) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500308 WARN_ELF("elf_strptr");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600309 return -1;
310 }
311
Petr Vandrovecdf968c92017-09-15 02:15:05 -0500312 if (sec->sh.sh_size != 0) {
313 sec->data = elf_getdata(s, NULL);
314 if (!sec->data) {
315 WARN_ELF("elf_getdata");
316 return -1;
317 }
318 if (sec->data->d_off != 0 ||
319 sec->data->d_size != sec->sh.sh_size) {
320 WARN("unexpected data attributes for %s",
321 sec->name);
322 return -1;
323 }
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600324 }
Petr Vandrovecdf968c92017-09-15 02:15:05 -0500325 sec->len = sec->sh.sh_size;
Peter Zijlstra530389962020-03-10 18:43:35 +0100326
327 list_add_tail(&sec->list, &elf->sections);
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100328 elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
329 elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600330 }
331
Peter Zijlstra1e11f3f2020-03-12 09:26:29 +0100332 if (stats)
333 printf("nr_sections: %lu\n", (unsigned long)sections_nr);
334
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600335 /* sanity check, one more call to elf_nextscn() should return NULL */
336 if (elf_nextscn(elf->elf, s)) {
337 WARN("section entry mismatch");
338 return -1;
339 }
340
341 return 0;
342}
343
344static int read_symbols(struct elf *elf)
345{
Sami Tolvanen28fe1d72020-04-21 15:08:42 -0700346 struct section *symtab, *symtab_shndx, *sec;
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100347 struct symbol *sym, *pfunc;
348 struct list_head *entry;
349 struct rb_node *pnode;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600350 int symbols_nr, i;
Josh Poimboeuf13810432018-05-09 22:39:15 -0500351 char *coldstr;
Sami Tolvanen28fe1d72020-04-21 15:08:42 -0700352 Elf_Data *shndx_data = NULL;
353 Elf32_Word shndx;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600354
355 symtab = find_section_by_name(elf, ".symtab");
356 if (!symtab) {
357 WARN("missing symbol table");
358 return -1;
359 }
360
Sami Tolvanen28fe1d72020-04-21 15:08:42 -0700361 symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
362 if (symtab_shndx)
363 shndx_data = symtab_shndx->data;
364
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600365 symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
366
367 for (i = 0; i < symbols_nr; i++) {
368 sym = malloc(sizeof(*sym));
369 if (!sym) {
370 perror("malloc");
371 return -1;
372 }
373 memset(sym, 0, sizeof(*sym));
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100374 sym->alias = sym;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600375
376 sym->idx = i;
377
Sami Tolvanen28fe1d72020-04-21 15:08:42 -0700378 if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym,
379 &shndx)) {
380 WARN_ELF("gelf_getsymshndx");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600381 goto err;
382 }
383
384 sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
385 sym->sym.st_name);
386 if (!sym->name) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500387 WARN_ELF("elf_strptr");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600388 goto err;
389 }
390
391 sym->type = GELF_ST_TYPE(sym->sym.st_info);
392 sym->bind = GELF_ST_BIND(sym->sym.st_info);
393
Sami Tolvanen28fe1d72020-04-21 15:08:42 -0700394 if ((sym->sym.st_shndx > SHN_UNDEF &&
395 sym->sym.st_shndx < SHN_LORESERVE) ||
396 (shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
397 if (sym->sym.st_shndx != SHN_XINDEX)
398 shndx = sym->sym.st_shndx;
399
400 sym->sec = find_section_by_index(elf, shndx);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600401 if (!sym->sec) {
402 WARN("couldn't find section for symbol %s",
403 sym->name);
404 goto err;
405 }
406 if (sym->type == STT_SECTION) {
407 sym->name = sym->sec->name;
408 sym->sec->sym = sym;
409 }
410 } else
411 sym->sec = find_section_by_index(elf, 0);
412
413 sym->offset = sym->sym.st_value;
414 sym->len = sym->sym.st_size;
415
Peter Zijlstra2a362ec2020-03-12 09:34:42 +0100416 rb_add(&sym->sec->symbol_tree, &sym->node, symbol_to_offset);
417 pnode = rb_prev(&sym->node);
418 if (pnode)
419 entry = &rb_entry(pnode, struct symbol, node)->list;
420 else
421 entry = &sym->sec->symbol_list;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600422 list_add(&sym->list, entry);
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100423 elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
424 elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600425 }
426
Peter Zijlstra1e11f3f2020-03-12 09:26:29 +0100427 if (stats)
428 printf("nr_symbols: %lu\n", (unsigned long)symbols_nr);
429
Josh Poimboeuf13810432018-05-09 22:39:15 -0500430 /* Create parent/child links for any cold subfunctions */
431 list_for_each_entry(sec, &elf->sections, list) {
432 list_for_each_entry(sym, &sec->symbol_list, list) {
Artem Savkov22566c12018-11-20 11:52:16 -0600433 char pname[MAX_NAME_LEN + 1];
434 size_t pnamelen;
Josh Poimboeuf13810432018-05-09 22:39:15 -0500435 if (sym->type != STT_FUNC)
436 continue;
Kristen Carlson Accardie000acc2020-04-15 14:04:43 -0700437
438 if (sym->pfunc == NULL)
439 sym->pfunc = sym;
440
441 if (sym->cfunc == NULL)
442 sym->cfunc = sym;
443
Josh Poimboeufbcb6fb52018-10-31 21:57:30 -0500444 coldstr = strstr(sym->name, ".cold");
Josh Poimboeuf08b393d2018-06-27 17:03:45 -0500445 if (!coldstr)
446 continue;
Josh Poimboeuf13810432018-05-09 22:39:15 -0500447
Artem Savkov22566c12018-11-20 11:52:16 -0600448 pnamelen = coldstr - sym->name;
449 if (pnamelen > MAX_NAME_LEN) {
450 WARN("%s(): parent function name exceeds maximum length of %d characters",
451 sym->name, MAX_NAME_LEN);
452 return -1;
453 }
454
455 strncpy(pname, sym->name, pnamelen);
456 pname[pnamelen] = '\0';
457 pfunc = find_symbol_by_name(elf, pname);
Josh Poimboeuf13810432018-05-09 22:39:15 -0500458
Josh Poimboeuf08b393d2018-06-27 17:03:45 -0500459 if (!pfunc) {
460 WARN("%s(): can't find parent function",
461 sym->name);
Artem Savkov0b9301fb2018-11-20 11:52:15 -0600462 return -1;
Josh Poimboeuf08b393d2018-06-27 17:03:45 -0500463 }
464
465 sym->pfunc = pfunc;
466 pfunc->cfunc = sym;
467
468 /*
469 * Unfortunately, -fnoreorder-functions puts the child
470 * inside the parent. Remove the overlap so we can
471 * have sane assumptions.
472 *
473 * Note that pfunc->len now no longer matches
474 * pfunc->sym.st_size.
475 */
476 if (sym->sec == pfunc->sec &&
477 sym->offset >= pfunc->offset &&
478 sym->offset + sym->len == pfunc->offset + pfunc->len) {
479 pfunc->len -= sym->len;
Josh Poimboeuf13810432018-05-09 22:39:15 -0500480 }
481 }
482 }
483
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600484 return 0;
485
486err:
487 free(sym);
488 return -1;
489}
490
Matt Helsleyf1974222020-05-29 14:01:13 -0700491void elf_add_reloc(struct elf *elf, struct reloc *reloc)
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100492{
Matt Helsleyf1974222020-05-29 14:01:13 -0700493 struct section *sec = reloc->sec;
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100494
Matt Helsleyf1974222020-05-29 14:01:13 -0700495 list_add_tail(&reloc->list, &sec->reloc_list);
496 elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100497}
498
Matt Helsleyfb414782020-05-29 14:01:14 -0700499static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
500{
501 if (!gelf_getrel(sec->data, i, &reloc->rel)) {
502 WARN_ELF("gelf_getrel");
503 return -1;
504 }
505 reloc->type = GELF_R_TYPE(reloc->rel.r_info);
506 reloc->addend = 0;
507 reloc->offset = reloc->rel.r_offset;
508 *symndx = GELF_R_SYM(reloc->rel.r_info);
509 return 0;
510}
511
512static int read_rela_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
513{
514 if (!gelf_getrela(sec->data, i, &reloc->rela)) {
515 WARN_ELF("gelf_getrela");
516 return -1;
517 }
518 reloc->type = GELF_R_TYPE(reloc->rela.r_info);
519 reloc->addend = reloc->rela.r_addend;
520 reloc->offset = reloc->rela.r_offset;
521 *symndx = GELF_R_SYM(reloc->rela.r_info);
522 return 0;
523}
524
Matt Helsleyf1974222020-05-29 14:01:13 -0700525static int read_relocs(struct elf *elf)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600526{
527 struct section *sec;
Matt Helsleyf1974222020-05-29 14:01:13 -0700528 struct reloc *reloc;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600529 int i;
530 unsigned int symndx;
Matt Helsleyf1974222020-05-29 14:01:13 -0700531 unsigned long nr_reloc, max_reloc = 0, tot_reloc = 0;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600532
533 list_for_each_entry(sec, &elf->sections, list) {
Matt Helsleyfb414782020-05-29 14:01:14 -0700534 if ((sec->sh.sh_type != SHT_RELA) &&
535 (sec->sh.sh_type != SHT_REL))
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600536 continue;
537
Sami Tolvanen1e968bf2020-04-21 11:25:01 -0700538 sec->base = find_section_by_index(elf, sec->sh.sh_info);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600539 if (!sec->base) {
Matt Helsleyf1974222020-05-29 14:01:13 -0700540 WARN("can't find base section for reloc section %s",
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600541 sec->name);
542 return -1;
543 }
544
Matt Helsleyf1974222020-05-29 14:01:13 -0700545 sec->base->reloc = sec;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600546
Matt Helsleyf1974222020-05-29 14:01:13 -0700547 nr_reloc = 0;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600548 for (i = 0; i < sec->sh.sh_size / sec->sh.sh_entsize; i++) {
Matt Helsleyf1974222020-05-29 14:01:13 -0700549 reloc = malloc(sizeof(*reloc));
550 if (!reloc) {
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600551 perror("malloc");
552 return -1;
553 }
Matt Helsleyf1974222020-05-29 14:01:13 -0700554 memset(reloc, 0, sizeof(*reloc));
Matt Helsleyfb414782020-05-29 14:01:14 -0700555 switch (sec->sh.sh_type) {
556 case SHT_REL:
557 if (read_rel_reloc(sec, i, reloc, &symndx))
558 return -1;
559 break;
560 case SHT_RELA:
561 if (read_rela_reloc(sec, i, reloc, &symndx))
562 return -1;
563 break;
564 default: return -1;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600565 }
566
Matt Helsleyf1974222020-05-29 14:01:13 -0700567 reloc->sec = sec;
Peter Zijlstrad832c002020-06-18 17:55:29 +0200568 reloc->idx = i;
569 reloc->sym = find_symbol_by_index(elf, symndx);
Matt Helsleyf1974222020-05-29 14:01:13 -0700570 if (!reloc->sym) {
571 WARN("can't find reloc entry symbol %d for %s",
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600572 symndx, sec->name);
573 return -1;
574 }
Josh Poimboeuf042ba732016-03-09 00:07:00 -0600575
Matt Helsleyf1974222020-05-29 14:01:13 -0700576 elf_add_reloc(elf, reloc);
577 nr_reloc++;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600578 }
Matt Helsleyf1974222020-05-29 14:01:13 -0700579 max_reloc = max(max_reloc, nr_reloc);
580 tot_reloc += nr_reloc;
Peter Zijlstra1e11f3f2020-03-12 09:26:29 +0100581 }
582
583 if (stats) {
Matt Helsleyf1974222020-05-29 14:01:13 -0700584 printf("max_reloc: %lu\n", max_reloc);
585 printf("tot_reloc: %lu\n", tot_reloc);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600586 }
587
588 return 0;
589}
590
Ingo Molnarbc359ff2020-04-22 12:32:04 +0200591struct elf *elf_open_read(const char *name, int flags)
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600592{
593 struct elf *elf;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500594 Elf_Cmd cmd;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600595
596 elf_version(EV_CURRENT);
597
598 elf = malloc(sizeof(*elf));
599 if (!elf) {
600 perror("malloc");
601 return NULL;
602 }
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100603 memset(elf, 0, offsetof(struct elf, sections));
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600604
605 INIT_LIST_HEAD(&elf->sections);
606
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100607 elf_hash_init(elf->symbol_hash);
608 elf_hash_init(elf->symbol_name_hash);
609 elf_hash_init(elf->section_hash);
610 elf_hash_init(elf->section_name_hash);
Matt Helsleyf1974222020-05-29 14:01:13 -0700611 elf_hash_init(elf->reloc_hash);
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100612
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500613 elf->fd = open(name, flags);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600614 if (elf->fd == -1) {
Josh Poimboeuf385d11b2018-01-15 08:17:08 -0600615 fprintf(stderr, "objtool: Can't open '%s': %s\n",
616 name, strerror(errno));
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600617 goto err;
618 }
619
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500620 if ((flags & O_ACCMODE) == O_RDONLY)
621 cmd = ELF_C_READ_MMAP;
622 else if ((flags & O_ACCMODE) == O_RDWR)
623 cmd = ELF_C_RDWR;
624 else /* O_WRONLY */
625 cmd = ELF_C_WRITE;
626
627 elf->elf = elf_begin(elf->fd, cmd, NULL);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600628 if (!elf->elf) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500629 WARN_ELF("elf_begin");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600630 goto err;
631 }
632
633 if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500634 WARN_ELF("gelf_getehdr");
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600635 goto err;
636 }
637
638 if (read_sections(elf))
639 goto err;
640
641 if (read_symbols(elf))
642 goto err;
643
Matt Helsleyf1974222020-05-29 14:01:13 -0700644 if (read_relocs(elf))
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600645 goto err;
646
647 return elf;
648
649err:
650 elf_close(elf);
651 return NULL;
652}
653
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500654struct section *elf_create_section(struct elf *elf, const char *name,
Josh Poimboeuf1e7e4782020-08-18 15:57:45 +0200655 unsigned int sh_flags, size_t entsize, int nr)
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500656{
657 struct section *sec, *shstrtab;
658 size_t size = entsize * nr;
Michael Forney3c3ea502019-07-10 16:17:35 -0500659 Elf_Scn *s;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500660 Elf_Data *data;
661
662 sec = malloc(sizeof(*sec));
663 if (!sec) {
664 perror("malloc");
665 return NULL;
666 }
667 memset(sec, 0, sizeof(*sec));
668
669 INIT_LIST_HEAD(&sec->symbol_list);
Matt Helsleyf1974222020-05-29 14:01:13 -0700670 INIT_LIST_HEAD(&sec->reloc_list);
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500671
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500672 s = elf_newscn(elf->elf);
673 if (!s) {
674 WARN_ELF("elf_newscn");
675 return NULL;
676 }
677
678 sec->name = strdup(name);
679 if (!sec->name) {
680 perror("strdup");
681 return NULL;
682 }
683
684 sec->idx = elf_ndxscn(s);
685 sec->len = size;
686 sec->changed = true;
687
688 sec->data = elf_newdata(s);
689 if (!sec->data) {
690 WARN_ELF("elf_newdata");
691 return NULL;
692 }
693
694 sec->data->d_size = size;
695 sec->data->d_align = 1;
696
697 if (size) {
698 sec->data->d_buf = malloc(size);
699 if (!sec->data->d_buf) {
700 perror("malloc");
701 return NULL;
702 }
703 memset(sec->data->d_buf, 0, size);
704 }
705
706 if (!gelf_getshdr(s, &sec->sh)) {
707 WARN_ELF("gelf_getshdr");
708 return NULL;
709 }
710
711 sec->sh.sh_size = size;
712 sec->sh.sh_entsize = entsize;
713 sec->sh.sh_type = SHT_PROGBITS;
714 sec->sh.sh_addralign = 1;
Josh Poimboeuf1e7e4782020-08-18 15:57:45 +0200715 sec->sh.sh_flags = SHF_ALLOC | sh_flags;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500716
717
Simon Ser6d77d3b2018-07-09 11:17:22 -0500718 /* Add section name to .shstrtab (or .strtab for Clang) */
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500719 shstrtab = find_section_by_name(elf, ".shstrtab");
Simon Ser6d77d3b2018-07-09 11:17:22 -0500720 if (!shstrtab)
721 shstrtab = find_section_by_name(elf, ".strtab");
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500722 if (!shstrtab) {
Simon Ser6d77d3b2018-07-09 11:17:22 -0500723 WARN("can't find .shstrtab or .strtab section");
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500724 return NULL;
725 }
726
727 s = elf_getscn(elf->elf, shstrtab->idx);
728 if (!s) {
729 WARN_ELF("elf_getscn");
730 return NULL;
731 }
732
733 data = elf_newdata(s);
734 if (!data) {
735 WARN_ELF("elf_newdata");
736 return NULL;
737 }
738
739 data->d_buf = sec->name;
740 data->d_size = strlen(name) + 1;
741 data->d_align = 1;
742
743 sec->sh.sh_name = shstrtab->len;
744
745 shstrtab->len += strlen(name) + 1;
746 shstrtab->changed = true;
747
Peter Zijlstra530389962020-03-10 18:43:35 +0100748 list_add_tail(&sec->list, &elf->sections);
Peter Zijlstra34f7c962020-03-12 14:29:38 +0100749 elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
750 elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
Peter Zijlstra530389962020-03-10 18:43:35 +0100751
Peter Zijlstra2b10be22020-04-17 23:15:00 +0200752 elf->changed = true;
753
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500754 return sec;
755}
756
Matt Helsleyfb414782020-05-29 14:01:14 -0700757static struct section *elf_create_rel_reloc_section(struct elf *elf, struct section *base)
758{
759 char *relocname;
760 struct section *sec;
761
762 relocname = malloc(strlen(base->name) + strlen(".rel") + 1);
763 if (!relocname) {
764 perror("malloc");
765 return NULL;
766 }
767 strcpy(relocname, ".rel");
768 strcat(relocname, base->name);
769
Josh Poimboeuf1e7e4782020-08-18 15:57:45 +0200770 sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rel), 0);
Matt Helsleyfb414782020-05-29 14:01:14 -0700771 free(relocname);
772 if (!sec)
773 return NULL;
774
775 base->reloc = sec;
776 sec->base = base;
777
778 sec->sh.sh_type = SHT_REL;
779 sec->sh.sh_addralign = 8;
780 sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
781 sec->sh.sh_info = base->idx;
782 sec->sh.sh_flags = SHF_INFO_LINK;
783
784 return sec;
785}
786
787static struct section *elf_create_rela_reloc_section(struct elf *elf, struct section *base)
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500788{
Matt Helsleyf1974222020-05-29 14:01:13 -0700789 char *relocname;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500790 struct section *sec;
791
Matt Helsleyf1974222020-05-29 14:01:13 -0700792 relocname = malloc(strlen(base->name) + strlen(".rela") + 1);
793 if (!relocname) {
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500794 perror("malloc");
795 return NULL;
796 }
Matt Helsleyf1974222020-05-29 14:01:13 -0700797 strcpy(relocname, ".rela");
798 strcat(relocname, base->name);
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500799
Josh Poimboeuf1e7e4782020-08-18 15:57:45 +0200800 sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0);
Matt Helsleyf1974222020-05-29 14:01:13 -0700801 free(relocname);
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500802 if (!sec)
803 return NULL;
804
Matt Helsleyf1974222020-05-29 14:01:13 -0700805 base->reloc = sec;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500806 sec->base = base;
807
808 sec->sh.sh_type = SHT_RELA;
809 sec->sh.sh_addralign = 8;
810 sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
811 sec->sh.sh_info = base->idx;
812 sec->sh.sh_flags = SHF_INFO_LINK;
813
814 return sec;
815}
816
Matt Helsleyfb414782020-05-29 14:01:14 -0700817struct section *elf_create_reloc_section(struct elf *elf,
818 struct section *base,
819 int reltype)
820{
821 switch (reltype) {
822 case SHT_REL: return elf_create_rel_reloc_section(elf, base);
823 case SHT_RELA: return elf_create_rela_reloc_section(elf, base);
824 default: return NULL;
825 }
826}
827
828static int elf_rebuild_rel_reloc_section(struct section *sec, int nr)
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500829{
Matt Helsleyf1974222020-05-29 14:01:13 -0700830 struct reloc *reloc;
Matt Helsleyfb414782020-05-29 14:01:14 -0700831 int idx = 0, size;
832 GElf_Rel *relocs;
833
834 /* Allocate a buffer for relocations */
835 size = nr * sizeof(*relocs);
836 relocs = malloc(size);
837 if (!relocs) {
838 perror("malloc");
839 return -1;
840 }
841
842 sec->data->d_buf = relocs;
843 sec->data->d_size = size;
844
845 sec->sh.sh_size = size;
846
847 idx = 0;
848 list_for_each_entry(reloc, &sec->reloc_list, list) {
849 relocs[idx].r_offset = reloc->offset;
850 relocs[idx].r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
851 idx++;
852 }
853
854 return 0;
855}
856
857static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
858{
859 struct reloc *reloc;
860 int idx = 0, size;
Matt Helsleyf1974222020-05-29 14:01:13 -0700861 GElf_Rela *relocs;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500862
Matt Helsleyfb414782020-05-29 14:01:14 -0700863 /* Allocate a buffer for relocations with addends */
Matt Helsleyf1974222020-05-29 14:01:13 -0700864 size = nr * sizeof(*relocs);
865 relocs = malloc(size);
866 if (!relocs) {
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500867 perror("malloc");
868 return -1;
869 }
870
Matt Helsleyf1974222020-05-29 14:01:13 -0700871 sec->data->d_buf = relocs;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500872 sec->data->d_size = size;
873
874 sec->sh.sh_size = size;
875
876 idx = 0;
Matt Helsleyf1974222020-05-29 14:01:13 -0700877 list_for_each_entry(reloc, &sec->reloc_list, list) {
878 relocs[idx].r_offset = reloc->offset;
879 relocs[idx].r_addend = reloc->addend;
880 relocs[idx].r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500881 idx++;
882 }
883
884 return 0;
885}
886
Peter Zijlstrad832c002020-06-18 17:55:29 +0200887int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
Matt Helsleyfb414782020-05-29 14:01:14 -0700888{
889 struct reloc *reloc;
890 int nr;
891
Peter Zijlstrad832c002020-06-18 17:55:29 +0200892 sec->changed = true;
893 elf->changed = true;
894
Matt Helsleyfb414782020-05-29 14:01:14 -0700895 nr = 0;
896 list_for_each_entry(reloc, &sec->reloc_list, list)
897 nr++;
898
899 switch (sec->sh.sh_type) {
900 case SHT_REL: return elf_rebuild_rel_reloc_section(sec, nr);
901 case SHT_RELA: return elf_rebuild_rela_reloc_section(sec, nr);
902 default: return -1;
903 }
904}
905
Peter Zijlstrafdabdd02020-06-12 15:43:00 +0200906int elf_write_insn(struct elf *elf, struct section *sec,
907 unsigned long offset, unsigned int len,
908 const char *insn)
909{
910 Elf_Data *data = sec->data;
911
912 if (data->d_type != ELF_T_BYTE || data->d_off) {
913 WARN("write to unexpected data for section: %s", sec->name);
914 return -1;
915 }
916
917 memcpy(data->d_buf + offset, insn, len);
918 elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY);
919
920 elf->changed = true;
921
922 return 0;
923}
924
Peter Zijlstrad832c002020-06-18 17:55:29 +0200925int elf_write_reloc(struct elf *elf, struct reloc *reloc)
Peter Zijlstrafdabdd02020-06-12 15:43:00 +0200926{
Peter Zijlstrad832c002020-06-18 17:55:29 +0200927 struct section *sec = reloc->sec;
Peter Zijlstrafdabdd02020-06-12 15:43:00 +0200928
Peter Zijlstrad832c002020-06-18 17:55:29 +0200929 if (sec->sh.sh_type == SHT_REL) {
930 reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
931 reloc->rel.r_offset = reloc->offset;
Peter Zijlstrafdabdd02020-06-12 15:43:00 +0200932
Peter Zijlstrad832c002020-06-18 17:55:29 +0200933 if (!gelf_update_rel(sec->data, reloc->idx, &reloc->rel)) {
934 WARN_ELF("gelf_update_rel");
935 return -1;
936 }
937 } else {
938 reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
939 reloc->rela.r_addend = reloc->addend;
940 reloc->rela.r_offset = reloc->offset;
941
942 if (!gelf_update_rela(sec->data, reloc->idx, &reloc->rela)) {
943 WARN_ELF("gelf_update_rela");
944 return -1;
945 }
Peter Zijlstrafdabdd02020-06-12 15:43:00 +0200946 }
947
948 elf->changed = true;
949
950 return 0;
951}
952
Peter Zijlstra2b10be22020-04-17 23:15:00 +0200953int elf_write(struct elf *elf)
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500954{
955 struct section *sec;
956 Elf_Scn *s;
957
Josh Poimboeuf97dab2a2017-09-15 02:17:11 -0500958 /* Update section headers for changed sections: */
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500959 list_for_each_entry(sec, &elf->sections, list) {
960 if (sec->changed) {
961 s = elf_getscn(elf->elf, sec->idx);
962 if (!s) {
963 WARN_ELF("elf_getscn");
964 return -1;
965 }
Josh Poimboeuf97dab2a2017-09-15 02:17:11 -0500966 if (!gelf_update_shdr(s, &sec->sh)) {
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500967 WARN_ELF("gelf_update_shdr");
968 return -1;
969 }
Peter Zijlstra2b10be22020-04-17 23:15:00 +0200970
971 sec->changed = false;
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500972 }
973 }
974
Josh Poimboeuf97dab2a2017-09-15 02:17:11 -0500975 /* Make sure the new section header entries get updated properly. */
976 elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
977
978 /* Write all changes to the file. */
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500979 if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
980 WARN_ELF("elf_update");
981 return -1;
982 }
983
Peter Zijlstra2b10be22020-04-17 23:15:00 +0200984 elf->changed = false;
985
Josh Poimboeuf627fce12017-07-11 10:33:42 -0500986 return 0;
987}
988
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600989void elf_close(struct elf *elf)
990{
991 struct section *sec, *tmpsec;
992 struct symbol *sym, *tmpsym;
Matt Helsleyf1974222020-05-29 14:01:13 -0700993 struct reloc *reloc, *tmpreloc;
Josh Poimboeuf442f04c2016-02-28 22:22:41 -0600994
Josh Poimboeufbaa414692017-06-28 10:11:07 -0500995 if (elf->elf)
996 elf_end(elf->elf);
997
998 if (elf->fd > 0)
999 close(elf->fd);
1000
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06001001 list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
Josh Poimboeufa196e172016-03-09 00:06:57 -06001002 list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06001003 list_del(&sym->list);
Josh Poimboeuf042ba732016-03-09 00:07:00 -06001004 hash_del(&sym->hash);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06001005 free(sym);
1006 }
Matt Helsleyf1974222020-05-29 14:01:13 -07001007 list_for_each_entry_safe(reloc, tmpreloc, &sec->reloc_list, list) {
1008 list_del(&reloc->list);
1009 hash_del(&reloc->hash);
1010 free(reloc);
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06001011 }
1012 list_del(&sec->list);
1013 free(sec);
1014 }
Josh Poimboeufbaa414692017-06-28 10:11:07 -05001015
Josh Poimboeuf442f04c2016-02-28 22:22:41 -06001016 free(elf);
1017}