blob: e77c5017ae0c399a6f1f34e2b00074168648bbbc [file] [log] [blame]
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
Seth Jenningsb700e7f2014-12-16 11:58:19 -060027#include <linux/list.h>
28#include <linux/kallsyms.h>
29#include <linux/livepatch.h>
Jessica Yu425595a2016-03-22 20:03:18 -040030#include <linux/elf.h>
31#include <linux/moduleloader.h>
Josh Poimboeuf3ec24772017-03-06 11:20:29 -060032#include <linux/completion.h>
Josh Poimboeufb56b36e2015-12-03 16:33:26 -060033#include <asm/cacheflush.h>
Jiri Kosina10517422017-03-08 14:27:05 +010034#include "core.h"
Josh Poimboeufc349cdca2017-02-13 19:42:37 -060035#include "patch.h"
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060036#include "transition.h"
Seth Jenningsb700e7f2014-12-16 11:58:19 -060037
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060038/*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060039 * klp_mutex is a coarse lock which serializes access to klp data. All
40 * accesses to klp-related variables and structures must have mutex protection,
41 * except within the following functions which carefully avoid the need for it:
42 *
43 * - klp_ftrace_handler()
44 * - klp_update_patch_state()
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060045 */
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060046DEFINE_MUTEX(klp_mutex);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060047
Petr Mladek68007282019-01-09 13:43:22 +010048/* Registered patches */
49LIST_HEAD(klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -060050
51static struct kobject *klp_root_kobj;
52
53static bool klp_is_module(struct klp_object *obj)
54{
55 return obj->name;
56}
57
Seth Jenningsb700e7f2014-12-16 11:58:19 -060058/* sets obj->mod if object is not vmlinux and module is found */
59static void klp_find_object_module(struct klp_object *obj)
60{
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010061 struct module *mod;
62
Seth Jenningsb700e7f2014-12-16 11:58:19 -060063 if (!klp_is_module(obj))
64 return;
65
66 mutex_lock(&module_mutex);
67 /*
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010068 * We do not want to block removal of patched modules and therefore
69 * we do not take a reference here. The patches are removed by
Jessica Yu7e545d62016-03-16 20:55:39 -040070 * klp_module_going() instead.
Seth Jenningsb700e7f2014-12-16 11:58:19 -060071 */
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010072 mod = find_module(obj->name);
73 /*
Jessica Yu7e545d62016-03-16 20:55:39 -040074 * Do not mess work of klp_module_coming() and klp_module_going().
75 * Note that the patch might still be needed before klp_module_going()
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010076 * is called. Module functions can be called even in the GOING state
77 * until mod->exit() finishes. This is especially important for
78 * patches that modify semantic of the functions.
79 */
80 if (mod && mod->klp_alive)
81 obj->mod = mod;
82
Seth Jenningsb700e7f2014-12-16 11:58:19 -060083 mutex_unlock(&module_mutex);
84}
85
Seth Jenningsb700e7f2014-12-16 11:58:19 -060086static bool klp_is_patch_registered(struct klp_patch *patch)
87{
88 struct klp_patch *mypatch;
89
90 list_for_each_entry(mypatch, &klp_patches, list)
91 if (mypatch == patch)
92 return true;
93
94 return false;
95}
96
97static bool klp_initialized(void)
98{
Nicholas Mc Guiree76ff062015-05-11 07:52:29 +020099 return !!klp_root_kobj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600100}
101
102struct klp_find_arg {
103 const char *objname;
104 const char *name;
105 unsigned long addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600106 unsigned long count;
Chris J Argesb2b018e2015-12-01 20:40:54 -0600107 unsigned long pos;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600108};
109
110static int klp_find_callback(void *data, const char *name,
111 struct module *mod, unsigned long addr)
112{
113 struct klp_find_arg *args = data;
114
115 if ((mod && !args->objname) || (!mod && args->objname))
116 return 0;
117
118 if (strcmp(args->name, name))
119 return 0;
120
121 if (args->objname && strcmp(args->objname, mod->name))
122 return 0;
123
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600124 args->addr = addr;
125 args->count++;
126
Chris J Argesb2b018e2015-12-01 20:40:54 -0600127 /*
128 * Finish the search when the symbol is found for the desired position
129 * or the position is not defined for a non-unique symbol.
130 */
131 if ((args->pos && (args->count == args->pos)) ||
132 (!args->pos && (args->count > 1)))
133 return 1;
134
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600135 return 0;
136}
137
138static int klp_find_object_symbol(const char *objname, const char *name,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600139 unsigned long sympos, unsigned long *addr)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600140{
141 struct klp_find_arg args = {
142 .objname = objname,
143 .name = name,
144 .addr = 0,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600145 .count = 0,
146 .pos = sympos,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600147 };
148
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200149 mutex_lock(&module_mutex);
Zhou Chengming72f04b52017-03-28 21:10:35 +0800150 if (objname)
151 module_kallsyms_on_each_symbol(klp_find_callback, &args);
152 else
153 kallsyms_on_each_symbol(klp_find_callback, &args);
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200154 mutex_unlock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600155
Chris J Argesb2b018e2015-12-01 20:40:54 -0600156 /*
157 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
158 * otherwise ensure the symbol position count matches sympos.
159 */
160 if (args.addr == 0)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600161 pr_err("symbol '%s' not found in symbol table\n", name);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600162 else if (args.count > 1 && sympos == 0) {
Petr Mladekf995b5f2016-03-09 15:20:59 +0100163 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
164 name, objname);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600165 } else if (sympos != args.count && sympos > 0) {
166 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
167 sympos, name, objname ? objname : "vmlinux");
168 } else {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600169 *addr = args.addr;
170 return 0;
171 }
172
173 *addr = 0;
174 return -EINVAL;
175}
176
Jessica Yu425595a2016-03-22 20:03:18 -0400177static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600178{
Jessica Yu425595a2016-03-22 20:03:18 -0400179 int i, cnt, vmlinux, ret;
180 char objname[MODULE_NAME_LEN];
181 char symname[KSYM_NAME_LEN];
182 char *strtab = pmod->core_kallsyms.strtab;
183 Elf_Rela *relas;
184 Elf_Sym *sym;
185 unsigned long sympos, addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600186
Chris J Argesb2b018e2015-12-01 20:40:54 -0600187 /*
Jessica Yu425595a2016-03-22 20:03:18 -0400188 * Since the field widths for objname and symname in the sscanf()
189 * call are hard-coded and correspond to MODULE_NAME_LEN and
190 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
191 * and KSYM_NAME_LEN have the values we expect them to have.
192 *
193 * Because the value of MODULE_NAME_LEN can differ among architectures,
194 * we use the smallest/strictest upper bound possible (56, based on
195 * the current definition of MODULE_NAME_LEN) to prevent overflows.
Chris J Argesb2b018e2015-12-01 20:40:54 -0600196 */
Jessica Yu425595a2016-03-22 20:03:18 -0400197 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
198
199 relas = (Elf_Rela *) relasec->sh_addr;
200 /* For each rela in this klp relocation section */
201 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
202 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
203 if (sym->st_shndx != SHN_LIVEPATCH) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500204 pr_err("symbol %s is not marked as a livepatch symbol\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400205 strtab + sym->st_name);
206 return -EINVAL;
207 }
208
209 /* Format: .klp.sym.objname.symname,sympos */
210 cnt = sscanf(strtab + sym->st_name,
211 ".klp.sym.%55[^.].%127[^,],%lu",
212 objname, symname, &sympos);
213 if (cnt != 3) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500214 pr_err("symbol %s has an incorrectly formatted name\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400215 strtab + sym->st_name);
216 return -EINVAL;
217 }
218
219 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
220 vmlinux = !strcmp(objname, "vmlinux");
221 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
222 symname, sympos, &addr);
223 if (ret)
224 return ret;
225
226 sym->st_value = addr;
227 }
228
229 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600230}
231
232static int klp_write_object_relocations(struct module *pmod,
233 struct klp_object *obj)
234{
Jessica Yu425595a2016-03-22 20:03:18 -0400235 int i, cnt, ret = 0;
236 const char *objname, *secname;
237 char sec_objname[MODULE_NAME_LEN];
238 Elf_Shdr *sec;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600239
240 if (WARN_ON(!klp_is_object_loaded(obj)))
241 return -EINVAL;
242
Jessica Yu425595a2016-03-22 20:03:18 -0400243 objname = klp_is_module(obj) ? obj->name : "vmlinux";
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600244
Jessica Yu425595a2016-03-22 20:03:18 -0400245 /* For each klp relocation section */
246 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
247 sec = pmod->klp_info->sechdrs + i;
248 secname = pmod->klp_info->secstrings + sec->sh_name;
249 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
250 continue;
Josh Poimboeufb56b36e2015-12-03 16:33:26 -0600251
Jessica Yu425595a2016-03-22 20:03:18 -0400252 /*
253 * Format: .klp.rela.sec_objname.section_name
254 * See comment in klp_resolve_symbols() for an explanation
255 * of the selected field width value.
256 */
257 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
258 if (cnt != 1) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500259 pr_err("section %s has an incorrectly formatted name\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400260 secname);
261 ret = -EINVAL;
262 break;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600263 }
Jessica Yu425595a2016-03-22 20:03:18 -0400264
265 if (strcmp(objname, sec_objname))
266 continue;
267
268 ret = klp_resolve_symbols(sec, pmod);
269 if (ret)
270 break;
271
272 ret = apply_relocate_add(pmod->klp_info->sechdrs,
273 pmod->core_kallsyms.strtab,
274 pmod->klp_info->symndx, i, pmod);
275 if (ret)
276 break;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600277 }
278
Josh Poimboeufb56b36e2015-12-03 16:33:26 -0600279 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600280}
281
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600282/*
283 * Sysfs Interface
284 *
285 * /sys/kernel/livepatch
286 * /sys/kernel/livepatch/<patch>
287 * /sys/kernel/livepatch/<patch>/enabled
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600288 * /sys/kernel/livepatch/<patch>/transition
Miroslav Benes43347d52017-11-15 14:50:13 +0100289 * /sys/kernel/livepatch/<patch>/signal
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100290 * /sys/kernel/livepatch/<patch>/force
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600291 * /sys/kernel/livepatch/<patch>/<object>
Chris J Arges444f9e92015-12-01 20:40:56 -0600292 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600293 */
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100294static int __klp_disable_patch(struct klp_patch *patch);
295static int __klp_enable_patch(struct klp_patch *patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600296
297static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
298 const char *buf, size_t count)
299{
300 struct klp_patch *patch;
301 int ret;
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600302 bool enabled;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600303
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600304 ret = kstrtobool(buf, &enabled);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600305 if (ret)
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600306 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600307
308 patch = container_of(kobj, struct klp_patch, kobj);
309
310 mutex_lock(&klp_mutex);
311
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600312 if (!klp_is_patch_registered(patch)) {
313 /*
314 * Module with the patch could either disappear meanwhile or is
315 * not properly initialized yet.
316 */
317 ret = -EINVAL;
318 goto err;
319 }
320
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600321 if (patch->enabled == enabled) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600322 /* already in requested state */
323 ret = -EINVAL;
324 goto err;
325 }
326
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600327 if (patch == klp_transition_patch) {
328 klp_reverse_transition();
329 } else if (enabled) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600330 ret = __klp_enable_patch(patch);
331 if (ret)
332 goto err;
333 } else {
334 ret = __klp_disable_patch(patch);
335 if (ret)
336 goto err;
337 }
338
339 mutex_unlock(&klp_mutex);
340
341 return count;
342
343err:
344 mutex_unlock(&klp_mutex);
345 return ret;
346}
347
348static ssize_t enabled_show(struct kobject *kobj,
349 struct kobj_attribute *attr, char *buf)
350{
351 struct klp_patch *patch;
352
353 patch = container_of(kobj, struct klp_patch, kobj);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600354 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600355}
356
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600357static ssize_t transition_show(struct kobject *kobj,
358 struct kobj_attribute *attr, char *buf)
359{
360 struct klp_patch *patch;
361
362 patch = container_of(kobj, struct klp_patch, kobj);
363 return snprintf(buf, PAGE_SIZE-1, "%d\n",
364 patch == klp_transition_patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600365}
366
Miroslav Benes43347d52017-11-15 14:50:13 +0100367static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
368 const char *buf, size_t count)
369{
370 struct klp_patch *patch;
371 int ret;
372 bool val;
373
Miroslav Benes43347d52017-11-15 14:50:13 +0100374 ret = kstrtobool(buf, &val);
375 if (ret)
376 return ret;
377
Miroslav Benes88690162017-12-21 14:40:43 +0100378 if (!val)
379 return count;
380
381 mutex_lock(&klp_mutex);
382
383 patch = container_of(kobj, struct klp_patch, kobj);
384 if (patch != klp_transition_patch) {
385 mutex_unlock(&klp_mutex);
386 return -EINVAL;
387 }
388
389 klp_send_signals();
390
391 mutex_unlock(&klp_mutex);
Miroslav Benes43347d52017-11-15 14:50:13 +0100392
393 return count;
394}
395
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100396static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
397 const char *buf, size_t count)
398{
399 struct klp_patch *patch;
400 int ret;
401 bool val;
402
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100403 ret = kstrtobool(buf, &val);
404 if (ret)
405 return ret;
406
Miroslav Benes88690162017-12-21 14:40:43 +0100407 if (!val)
408 return count;
409
410 mutex_lock(&klp_mutex);
411
412 patch = container_of(kobj, struct klp_patch, kobj);
413 if (patch != klp_transition_patch) {
414 mutex_unlock(&klp_mutex);
415 return -EINVAL;
416 }
417
418 klp_force_transition();
419
420 mutex_unlock(&klp_mutex);
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100421
422 return count;
423}
424
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600425static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600426static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
Miroslav Benes43347d52017-11-15 14:50:13 +0100427static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100428static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600429static struct attribute *klp_patch_attrs[] = {
430 &enabled_kobj_attr.attr,
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600431 &transition_kobj_attr.attr,
Miroslav Benes43347d52017-11-15 14:50:13 +0100432 &signal_kobj_attr.attr,
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100433 &force_kobj_attr.attr,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600434 NULL
435};
436
437static void klp_kobj_release_patch(struct kobject *kobj)
438{
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600439 struct klp_patch *patch;
440
441 patch = container_of(kobj, struct klp_patch, kobj);
442 complete(&patch->finish);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600443}
444
445static struct kobj_type klp_ktype_patch = {
446 .release = klp_kobj_release_patch,
447 .sysfs_ops = &kobj_sysfs_ops,
448 .default_attrs = klp_patch_attrs,
449};
450
Miroslav Benescad706d2015-05-19 12:01:18 +0200451static void klp_kobj_release_object(struct kobject *kobj)
452{
453}
454
455static struct kobj_type klp_ktype_object = {
456 .release = klp_kobj_release_object,
457 .sysfs_ops = &kobj_sysfs_ops,
458};
459
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600460static void klp_kobj_release_func(struct kobject *kobj)
461{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600462}
463
464static struct kobj_type klp_ktype_func = {
465 .release = klp_kobj_release_func,
466 .sysfs_ops = &kobj_sysfs_ops,
467};
468
Petr Mladek0430f782019-01-09 13:43:21 +0100469static void klp_free_funcs(struct klp_object *obj)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600470{
471 struct klp_func *func;
472
Petr Mladek0430f782019-01-09 13:43:21 +0100473 klp_for_each_func(obj, func) {
474 /* Might be called from klp_init_patch() error path. */
475 if (func->kobj_added)
476 kobject_put(&func->kobj);
477 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600478}
479
480/* Clean up when a patched object is unloaded */
481static void klp_free_object_loaded(struct klp_object *obj)
482{
483 struct klp_func *func;
484
485 obj->mod = NULL;
486
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200487 klp_for_each_func(obj, func)
Petr Mladek19514912019-01-09 13:43:19 +0100488 func->old_func = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600489}
490
Petr Mladek0430f782019-01-09 13:43:21 +0100491static void klp_free_objects(struct klp_patch *patch)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600492{
493 struct klp_object *obj;
494
Petr Mladek0430f782019-01-09 13:43:21 +0100495 klp_for_each_object(patch, obj) {
496 klp_free_funcs(obj);
497
498 /* Might be called from klp_init_patch() error path. */
499 if (obj->kobj_added)
500 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600501 }
502}
503
Petr Mladek0430f782019-01-09 13:43:21 +0100504/*
505 * This function implements the free operations that can be called safely
506 * under klp_mutex.
507 *
508 * The operation must be completed by calling klp_free_patch_finish()
509 * outside klp_mutex.
510 */
511static void klp_free_patch_start(struct klp_patch *patch)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600512{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600513 if (!list_empty(&patch->list))
514 list_del(&patch->list);
Petr Mladek0430f782019-01-09 13:43:21 +0100515
516 klp_free_objects(patch);
517}
518
519/*
520 * This function implements the free part that must be called outside
521 * klp_mutex.
522 *
523 * It must be called after klp_free_patch_start(). And it has to be
524 * the last function accessing the livepatch structures when the patch
525 * gets disabled.
526 */
527static void klp_free_patch_finish(struct klp_patch *patch)
528{
529 /*
530 * Avoid deadlock with enabled_store() sysfs callback by
531 * calling this outside klp_mutex. It is safe because
532 * this is called when the patch gets disabled and it
533 * cannot get enabled again.
534 */
535 if (patch->kobj_added) {
536 kobject_put(&patch->kobj);
537 wait_for_completion(&patch->finish);
538 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600539}
540
541static int klp_init_func(struct klp_object *obj, struct klp_func *func)
542{
Petr Mladek0430f782019-01-09 13:43:21 +0100543 int ret;
544
Miroslav Benesf09d9082016-04-28 16:34:08 +0200545 if (!func->old_name || !func->new_func)
546 return -EINVAL;
547
Kamalesh Babulal6e9df952018-07-20 15:16:42 +0530548 if (strlen(func->old_name) >= KSYM_NAME_LEN)
549 return -EINVAL;
550
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600551 INIT_LIST_HEAD(&func->stack_node);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600552 func->patched = false;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600553 func->transition = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600554
Chris J Arges444f9e92015-12-01 20:40:56 -0600555 /* The format for the sysfs directory is <function,sympos> where sympos
556 * is the nth occurrence of this symbol in kallsyms for the patched
557 * object. If the user selects 0 for old_sympos, then 1 will be used
558 * since a unique symbol will be the first occurrence.
559 */
Petr Mladek0430f782019-01-09 13:43:21 +0100560 ret = kobject_init_and_add(&func->kobj, &klp_ktype_func,
561 &obj->kobj, "%s,%lu", func->old_name,
562 func->old_sympos ? func->old_sympos : 1);
563 if (!ret)
564 func->kobj_added = true;
565
566 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600567}
568
Jessica Yu255e7322016-08-17 20:58:28 -0400569/* Arches may override this to finish any remaining arch-specific tasks */
570void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
571 struct klp_object *obj)
572{
573}
574
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600575/* parts of the initialization that is done only when the object is loaded */
576static int klp_init_object_loaded(struct klp_patch *patch,
577 struct klp_object *obj)
578{
579 struct klp_func *func;
580 int ret;
581
Jessica Yu255e7322016-08-17 20:58:28 -0400582 module_disable_ro(patch->mod);
Jessica Yu425595a2016-03-22 20:03:18 -0400583 ret = klp_write_object_relocations(patch->mod, obj);
Jessica Yu255e7322016-08-17 20:58:28 -0400584 if (ret) {
585 module_enable_ro(patch->mod, true);
Jessica Yu425595a2016-03-22 20:03:18 -0400586 return ret;
Jessica Yu255e7322016-08-17 20:58:28 -0400587 }
588
589 arch_klp_init_object_loaded(patch, obj);
590 module_enable_ro(patch->mod, true);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600591
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200592 klp_for_each_func(obj, func) {
Chris J Argesb2b018e2015-12-01 20:40:54 -0600593 ret = klp_find_object_symbol(obj->name, func->old_name,
594 func->old_sympos,
Petr Mladek19514912019-01-09 13:43:19 +0100595 (unsigned long *)&func->old_func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600596 if (ret)
597 return ret;
Josh Poimboeuff5e547f2017-02-13 19:42:39 -0600598
Petr Mladek19514912019-01-09 13:43:19 +0100599 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
Josh Poimboeuff5e547f2017-02-13 19:42:39 -0600600 &func->old_size, NULL);
601 if (!ret) {
602 pr_err("kallsyms size lookup failed for '%s'\n",
603 func->old_name);
604 return -ENOENT;
605 }
606
607 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
608 &func->new_size, NULL);
609 if (!ret) {
610 pr_err("kallsyms size lookup failed for '%s' replacement\n",
611 func->old_name);
612 return -ENOENT;
613 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600614 }
615
616 return 0;
617}
618
619static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
620{
621 struct klp_func *func;
622 int ret;
623 const char *name;
624
Kamalesh Babulal6e9df952018-07-20 15:16:42 +0530625 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
626 return -EINVAL;
627
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600628 obj->patched = false;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100629 obj->mod = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600630
631 klp_find_object_module(obj);
632
633 name = klp_is_module(obj) ? obj->name : "vmlinux";
Miroslav Benescad706d2015-05-19 12:01:18 +0200634 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
635 &patch->kobj, "%s", name);
636 if (ret)
637 return ret;
Petr Mladek0430f782019-01-09 13:43:21 +0100638 obj->kobj_added = true;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600639
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200640 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600641 ret = klp_init_func(obj, func);
642 if (ret)
Petr Mladek0430f782019-01-09 13:43:21 +0100643 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600644 }
645
Petr Mladek0430f782019-01-09 13:43:21 +0100646 if (klp_is_object_loaded(obj))
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600647 ret = klp_init_object_loaded(patch, obj);
Petr Mladek0430f782019-01-09 13:43:21 +0100648
649 return ret;
650}
651
652static int klp_init_patch_early(struct klp_patch *patch)
653{
654 struct klp_object *obj;
655 struct klp_func *func;
656
657 if (!patch->objs)
658 return -EINVAL;
659
660 INIT_LIST_HEAD(&patch->list);
661 patch->kobj_added = false;
662 patch->enabled = false;
Petr Mladek68007282019-01-09 13:43:22 +0100663 patch->forced = false;
Petr Mladek0430f782019-01-09 13:43:21 +0100664 init_completion(&patch->finish);
665
666 klp_for_each_object(patch, obj) {
667 if (!obj->funcs)
668 return -EINVAL;
669
670 obj->kobj_added = false;
671
672 klp_for_each_func(obj, func)
673 func->kobj_added = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600674 }
675
676 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600677}
678
679static int klp_init_patch(struct klp_patch *patch)
680{
681 struct klp_object *obj;
682 int ret;
683
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600684 mutex_lock(&klp_mutex);
685
Petr Mladek0430f782019-01-09 13:43:21 +0100686 ret = klp_init_patch_early(patch);
687 if (ret) {
688 mutex_unlock(&klp_mutex);
689 return ret;
690 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600691
692 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100693 klp_root_kobj, "%s", patch->mod->name);
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600694 if (ret) {
695 mutex_unlock(&klp_mutex);
696 return ret;
697 }
Petr Mladek0430f782019-01-09 13:43:21 +0100698 patch->kobj_added = true;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600699
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200700 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600701 ret = klp_init_object(patch, obj);
702 if (ret)
703 goto free;
704 }
705
Josh Poimboeuf99590ba2015-01-09 14:03:04 -0600706 list_add_tail(&patch->list, &klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600707
708 mutex_unlock(&klp_mutex);
709
710 return 0;
711
712free:
Petr Mladek0430f782019-01-09 13:43:21 +0100713 klp_free_patch_start(patch);
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600714
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600715 mutex_unlock(&klp_mutex);
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600716
Petr Mladek0430f782019-01-09 13:43:21 +0100717 klp_free_patch_finish(patch);
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600718
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600719 return ret;
720}
721
722/**
723 * klp_unregister_patch() - unregisters a patch
724 * @patch: Disabled patch to be unregistered
725 *
726 * Frees the data structures and removes the sysfs interface.
727 *
728 * Return: 0 on success, otherwise error
729 */
730int klp_unregister_patch(struct klp_patch *patch)
731{
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600732 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600733
734 mutex_lock(&klp_mutex);
735
736 if (!klp_is_patch_registered(patch)) {
737 ret = -EINVAL;
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600738 goto err;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600739 }
740
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600741 if (patch->enabled) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600742 ret = -EBUSY;
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600743 goto err;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600744 }
745
Petr Mladek0430f782019-01-09 13:43:21 +0100746 klp_free_patch_start(patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600747
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600748 mutex_unlock(&klp_mutex);
749
Petr Mladek0430f782019-01-09 13:43:21 +0100750 klp_free_patch_finish(patch);
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600751
752 return 0;
753err:
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600754 mutex_unlock(&klp_mutex);
755 return ret;
756}
757EXPORT_SYMBOL_GPL(klp_unregister_patch);
758
759/**
760 * klp_register_patch() - registers a patch
761 * @patch: Patch to be registered
762 *
763 * Initializes the data structure associated with the patch and
764 * creates the sysfs interface.
765 *
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600766 * There is no need to take the reference on the patch module here. It is done
767 * later when the patch is enabled.
768 *
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600769 * Return: 0 on success, otherwise error
770 */
771int klp_register_patch(struct klp_patch *patch)
772{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600773 if (!patch || !patch->mod)
774 return -EINVAL;
775
Jessica Yu425595a2016-03-22 20:03:18 -0400776 if (!is_livepatch_module(patch->mod)) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500777 pr_err("module %s is not marked as a livepatch module\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400778 patch->mod->name);
779 return -EINVAL;
780 }
781
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600782 if (!klp_initialized())
783 return -ENODEV;
784
Miroslav Benesd0807da2018-01-10 11:01:28 +0100785 if (!klp_have_reliable_stack()) {
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600786 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
787 return -ENOSYS;
788 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600789
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600790 return klp_init_patch(patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600791}
792EXPORT_SYMBOL_GPL(klp_register_patch);
793
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100794static int __klp_disable_patch(struct klp_patch *patch)
795{
796 struct klp_object *obj;
797
798 if (WARN_ON(!patch->enabled))
799 return -EINVAL;
800
801 if (klp_transition_patch)
802 return -EBUSY;
803
804 /* enforce stacking: only the last enabled patch can be disabled */
805 if (!list_is_last(&patch->list, &klp_patches) &&
806 list_next_entry(patch, list)->enabled)
807 return -EBUSY;
808
809 klp_init_transition(patch, KLP_UNPATCHED);
810
811 klp_for_each_object(patch, obj)
812 if (obj->patched)
813 klp_pre_unpatch_callback(obj);
814
815 /*
816 * Enforce the order of the func->transition writes in
817 * klp_init_transition() and the TIF_PATCH_PENDING writes in
818 * klp_start_transition(). In the rare case where klp_ftrace_handler()
819 * is called shortly after klp_update_patch_state() switches the task,
820 * this ensures the handler sees that func->transition is set.
821 */
822 smp_wmb();
823
824 klp_start_transition();
825 klp_try_complete_transition();
826 patch->enabled = false;
827
828 return 0;
829}
830
831/**
832 * klp_disable_patch() - disables a registered patch
833 * @patch: The registered, enabled patch to be disabled
834 *
835 * Unregisters the patched functions from ftrace.
836 *
837 * Return: 0 on success, otherwise error
838 */
839int klp_disable_patch(struct klp_patch *patch)
840{
841 int ret;
842
843 mutex_lock(&klp_mutex);
844
845 if (!klp_is_patch_registered(patch)) {
846 ret = -EINVAL;
847 goto err;
848 }
849
850 if (!patch->enabled) {
851 ret = -EINVAL;
852 goto err;
853 }
854
855 ret = __klp_disable_patch(patch);
856
857err:
858 mutex_unlock(&klp_mutex);
859 return ret;
860}
861EXPORT_SYMBOL_GPL(klp_disable_patch);
862
863static int __klp_enable_patch(struct klp_patch *patch)
864{
865 struct klp_object *obj;
866 int ret;
867
868 if (klp_transition_patch)
869 return -EBUSY;
870
871 if (WARN_ON(patch->enabled))
872 return -EINVAL;
873
874 /* enforce stacking: only the first disabled patch can be enabled */
875 if (patch->list.prev != &klp_patches &&
876 !list_prev_entry(patch, list)->enabled)
877 return -EBUSY;
878
879 /*
880 * A reference is taken on the patch module to prevent it from being
881 * unloaded.
882 */
883 if (!try_module_get(patch->mod))
884 return -ENODEV;
885
886 pr_notice("enabling patch '%s'\n", patch->mod->name);
887
888 klp_init_transition(patch, KLP_PATCHED);
889
890 /*
891 * Enforce the order of the func->transition writes in
892 * klp_init_transition() and the ops->func_stack writes in
893 * klp_patch_object(), so that klp_ftrace_handler() will see the
894 * func->transition updates before the handler is registered and the
895 * new funcs become visible to the handler.
896 */
897 smp_wmb();
898
899 klp_for_each_object(patch, obj) {
900 if (!klp_is_object_loaded(obj))
901 continue;
902
903 ret = klp_pre_patch_callback(obj);
904 if (ret) {
905 pr_warn("pre-patch callback failed for object '%s'\n",
906 klp_is_module(obj) ? obj->name : "vmlinux");
907 goto err;
908 }
909
910 ret = klp_patch_object(obj);
911 if (ret) {
912 pr_warn("failed to patch object '%s'\n",
913 klp_is_module(obj) ? obj->name : "vmlinux");
914 goto err;
915 }
916 }
917
918 klp_start_transition();
919 klp_try_complete_transition();
920 patch->enabled = true;
921
922 return 0;
923err:
924 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
925
926 klp_cancel_transition();
927 return ret;
928}
929
930/**
931 * klp_enable_patch() - enables a registered patch
932 * @patch: The registered, disabled patch to be enabled
933 *
934 * Performs the needed symbol lookups and code relocations,
935 * then registers the patched functions with ftrace.
936 *
937 * Return: 0 on success, otherwise error
938 */
939int klp_enable_patch(struct klp_patch *patch)
940{
941 int ret;
942
943 mutex_lock(&klp_mutex);
944
945 if (!klp_is_patch_registered(patch)) {
946 ret = -EINVAL;
947 goto err;
948 }
949
950 ret = __klp_enable_patch(patch);
951
952err:
953 mutex_unlock(&klp_mutex);
954 return ret;
955}
956EXPORT_SYMBOL_GPL(klp_enable_patch);
957
Joe Lawrenceef8daf82017-10-02 11:56:48 -0400958/*
959 * Remove parts of patches that touch a given kernel module. The list of
960 * patches processed might be limited. When limit is NULL, all patches
961 * will be handled.
962 */
963static void klp_cleanup_module_patches_limited(struct module *mod,
964 struct klp_patch *limit)
965{
966 struct klp_patch *patch;
967 struct klp_object *obj;
968
969 list_for_each_entry(patch, &klp_patches, list) {
970 if (patch == limit)
971 break;
972
973 klp_for_each_object(patch, obj) {
974 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
975 continue;
976
977 /*
978 * Only unpatch the module if the patch is enabled or
979 * is in transition.
980 */
981 if (patch->enabled || patch == klp_transition_patch) {
Jiri Kosinafc41efc182017-11-15 10:53:24 +0100982
983 if (patch != klp_transition_patch)
984 klp_pre_unpatch_callback(obj);
985
Joe Lawrenceef8daf82017-10-02 11:56:48 -0400986 pr_notice("reverting patch '%s' on unloading module '%s'\n",
987 patch->mod->name, obj->mod->name);
988 klp_unpatch_object(obj);
Jiri Kosinafc41efc182017-11-15 10:53:24 +0100989
990 klp_post_unpatch_callback(obj);
Joe Lawrenceef8daf82017-10-02 11:56:48 -0400991 }
992
993 klp_free_object_loaded(obj);
994 break;
995 }
996 }
997}
998
Jessica Yu7e545d62016-03-16 20:55:39 -0400999int klp_module_coming(struct module *mod)
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001000{
Minfei Huang36e505c2015-05-15 10:22:48 +08001001 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001002 struct klp_patch *patch;
1003 struct klp_object *obj;
1004
Jessica Yu7e545d62016-03-16 20:55:39 -04001005 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1006 return -EINVAL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001007
1008 mutex_lock(&klp_mutex);
Petr Mladek8cb2c2d2015-03-12 12:55:13 +01001009 /*
Jessica Yu7e545d62016-03-16 20:55:39 -04001010 * Each module has to know that klp_module_coming()
1011 * has been called. We never know what module will
1012 * get patched by a new patch.
Petr Mladek8cb2c2d2015-03-12 12:55:13 +01001013 */
Jessica Yu7e545d62016-03-16 20:55:39 -04001014 mod->klp_alive = true;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +01001015
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001016 list_for_each_entry(patch, &klp_patches, list) {
Jiri Slaby8cdd0432015-05-19 12:01:19 +02001017 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001018 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1019 continue;
1020
Jessica Yu7e545d62016-03-16 20:55:39 -04001021 obj->mod = mod;
1022
1023 ret = klp_init_object_loaded(patch, obj);
1024 if (ret) {
1025 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1026 patch->mod->name, obj->mod->name, ret);
1027 goto err;
1028 }
1029
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -06001030 /*
1031 * Only patch the module if the patch is enabled or is
1032 * in transition.
1033 */
1034 if (!patch->enabled && patch != klp_transition_patch)
Jessica Yu7e545d62016-03-16 20:55:39 -04001035 break;
1036
1037 pr_notice("applying patch '%s' to loading module '%s'\n",
1038 patch->mod->name, obj->mod->name);
1039
Joe Lawrence93862e32017-10-13 15:08:41 -04001040 ret = klp_pre_patch_callback(obj);
1041 if (ret) {
1042 pr_warn("pre-patch callback failed for object '%s'\n",
1043 obj->name);
1044 goto err;
1045 }
1046
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -06001047 ret = klp_patch_object(obj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001048 if (ret) {
1049 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1050 patch->mod->name, obj->mod->name, ret);
Joe Lawrence93862e32017-10-13 15:08:41 -04001051
Petr Mladek5aaf1ab2017-10-20 16:56:50 +02001052 klp_post_unpatch_callback(obj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001053 goto err;
1054 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001055
Joe Lawrence93862e32017-10-13 15:08:41 -04001056 if (patch != klp_transition_patch)
1057 klp_post_patch_callback(obj);
1058
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001059 break;
1060 }
1061 }
1062
1063 mutex_unlock(&klp_mutex);
1064
1065 return 0;
Jessica Yu7e545d62016-03-16 20:55:39 -04001066
1067err:
1068 /*
1069 * If a patch is unsuccessfully applied, return
1070 * error to the module loader.
1071 */
1072 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1073 patch->mod->name, obj->mod->name, obj->mod->name);
1074 mod->klp_alive = false;
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001075 klp_cleanup_module_patches_limited(mod, patch);
Jessica Yu7e545d62016-03-16 20:55:39 -04001076 mutex_unlock(&klp_mutex);
1077
1078 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001079}
1080
Jessica Yu7e545d62016-03-16 20:55:39 -04001081void klp_module_going(struct module *mod)
1082{
Jessica Yu7e545d62016-03-16 20:55:39 -04001083 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1084 mod->state != MODULE_STATE_COMING))
1085 return;
1086
1087 mutex_lock(&klp_mutex);
1088 /*
1089 * Each module has to know that klp_module_going()
1090 * has been called. We never know what module will
1091 * get patched by a new patch.
1092 */
1093 mod->klp_alive = false;
1094
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001095 klp_cleanup_module_patches_limited(mod, NULL);
Jessica Yu7e545d62016-03-16 20:55:39 -04001096
1097 mutex_unlock(&klp_mutex);
1098}
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001099
Minfei Huang26029d82015-05-22 22:26:29 +08001100static int __init klp_init(void)
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001101{
1102 int ret;
1103
Jiri Kosinab9dfe0b2015-01-09 10:53:21 +01001104 ret = klp_check_compiler_support();
1105 if (ret) {
1106 pr_info("Your compiler is too old; turning off.\n");
1107 return -EINVAL;
1108 }
1109
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001110 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001111 if (!klp_root_kobj)
1112 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001113
1114 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001115}
1116
1117module_init(klp_init);