blob: 4b7f55d9e89ce31ab06489b3a988a3c5c116ec2d [file] [log] [blame]
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
Seth Jenningsb700e7f2014-12-16 11:58:19 -060027#include <linux/list.h>
28#include <linux/kallsyms.h>
29#include <linux/livepatch.h>
Jessica Yu425595a2016-03-22 20:03:18 -040030#include <linux/elf.h>
31#include <linux/moduleloader.h>
Josh Poimboeuf3ec24772017-03-06 11:20:29 -060032#include <linux/completion.h>
Josh Poimboeufb56b36e2015-12-03 16:33:26 -060033#include <asm/cacheflush.h>
Jiri Kosina10517422017-03-08 14:27:05 +010034#include "core.h"
Josh Poimboeufc349cdca2017-02-13 19:42:37 -060035#include "patch.h"
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060036#include "transition.h"
Seth Jenningsb700e7f2014-12-16 11:58:19 -060037
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060038/*
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060039 * klp_mutex is a coarse lock which serializes access to klp data. All
40 * accesses to klp-related variables and structures must have mutex protection,
41 * except within the following functions which carefully avoid the need for it:
42 *
43 * - klp_ftrace_handler()
44 * - klp_update_patch_state()
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060045 */
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -060046DEFINE_MUTEX(klp_mutex);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060047
Petr Mladek958ef1e2019-01-09 13:43:23 +010048/*
49 * Actively used patches: enabled or in transition. Note that replaced
50 * or disabled patches are not listed even though the related kernel
51 * module still can be loaded.
52 */
Petr Mladek68007282019-01-09 13:43:22 +010053LIST_HEAD(klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -060054
55static struct kobject *klp_root_kobj;
56
57static bool klp_is_module(struct klp_object *obj)
58{
59 return obj->name;
60}
61
Seth Jenningsb700e7f2014-12-16 11:58:19 -060062/* sets obj->mod if object is not vmlinux and module is found */
63static void klp_find_object_module(struct klp_object *obj)
64{
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010065 struct module *mod;
66
Seth Jenningsb700e7f2014-12-16 11:58:19 -060067 if (!klp_is_module(obj))
68 return;
69
70 mutex_lock(&module_mutex);
71 /*
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010072 * We do not want to block removal of patched modules and therefore
73 * we do not take a reference here. The patches are removed by
Jessica Yu7e545d62016-03-16 20:55:39 -040074 * klp_module_going() instead.
Seth Jenningsb700e7f2014-12-16 11:58:19 -060075 */
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010076 mod = find_module(obj->name);
77 /*
Jessica Yu7e545d62016-03-16 20:55:39 -040078 * Do not mess work of klp_module_coming() and klp_module_going().
79 * Note that the patch might still be needed before klp_module_going()
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010080 * is called. Module functions can be called even in the GOING state
81 * until mod->exit() finishes. This is especially important for
82 * patches that modify semantic of the functions.
83 */
84 if (mod && mod->klp_alive)
85 obj->mod = mod;
86
Seth Jenningsb700e7f2014-12-16 11:58:19 -060087 mutex_unlock(&module_mutex);
88}
89
Seth Jenningsb700e7f2014-12-16 11:58:19 -060090static bool klp_initialized(void)
91{
Nicholas Mc Guiree76ff062015-05-11 07:52:29 +020092 return !!klp_root_kobj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -060093}
94
Jason Barone1452b62019-01-09 13:43:25 +010095static struct klp_func *klp_find_func(struct klp_object *obj,
96 struct klp_func *old_func)
97{
98 struct klp_func *func;
99
100 klp_for_each_func(obj, func) {
101 if ((strcmp(old_func->old_name, func->old_name) == 0) &&
102 (old_func->old_sympos == func->old_sympos)) {
103 return func;
104 }
105 }
106
107 return NULL;
108}
109
110static struct klp_object *klp_find_object(struct klp_patch *patch,
111 struct klp_object *old_obj)
112{
113 struct klp_object *obj;
114
115 klp_for_each_object(patch, obj) {
116 if (klp_is_module(old_obj)) {
117 if (klp_is_module(obj) &&
118 strcmp(old_obj->name, obj->name) == 0) {
119 return obj;
120 }
121 } else if (!klp_is_module(obj)) {
122 return obj;
123 }
124 }
125
126 return NULL;
127}
128
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600129struct klp_find_arg {
130 const char *objname;
131 const char *name;
132 unsigned long addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600133 unsigned long count;
Chris J Argesb2b018e2015-12-01 20:40:54 -0600134 unsigned long pos;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600135};
136
137static int klp_find_callback(void *data, const char *name,
138 struct module *mod, unsigned long addr)
139{
140 struct klp_find_arg *args = data;
141
142 if ((mod && !args->objname) || (!mod && args->objname))
143 return 0;
144
145 if (strcmp(args->name, name))
146 return 0;
147
148 if (args->objname && strcmp(args->objname, mod->name))
149 return 0;
150
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600151 args->addr = addr;
152 args->count++;
153
Chris J Argesb2b018e2015-12-01 20:40:54 -0600154 /*
155 * Finish the search when the symbol is found for the desired position
156 * or the position is not defined for a non-unique symbol.
157 */
158 if ((args->pos && (args->count == args->pos)) ||
159 (!args->pos && (args->count > 1)))
160 return 1;
161
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600162 return 0;
163}
164
165static int klp_find_object_symbol(const char *objname, const char *name,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600166 unsigned long sympos, unsigned long *addr)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600167{
168 struct klp_find_arg args = {
169 .objname = objname,
170 .name = name,
171 .addr = 0,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600172 .count = 0,
173 .pos = sympos,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600174 };
175
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200176 mutex_lock(&module_mutex);
Zhou Chengming72f04b52017-03-28 21:10:35 +0800177 if (objname)
178 module_kallsyms_on_each_symbol(klp_find_callback, &args);
179 else
180 kallsyms_on_each_symbol(klp_find_callback, &args);
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200181 mutex_unlock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600182
Chris J Argesb2b018e2015-12-01 20:40:54 -0600183 /*
184 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
185 * otherwise ensure the symbol position count matches sympos.
186 */
187 if (args.addr == 0)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600188 pr_err("symbol '%s' not found in symbol table\n", name);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600189 else if (args.count > 1 && sympos == 0) {
Petr Mladekf995b5f2016-03-09 15:20:59 +0100190 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
191 name, objname);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600192 } else if (sympos != args.count && sympos > 0) {
193 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
194 sympos, name, objname ? objname : "vmlinux");
195 } else {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600196 *addr = args.addr;
197 return 0;
198 }
199
200 *addr = 0;
201 return -EINVAL;
202}
203
Jessica Yu425595a2016-03-22 20:03:18 -0400204static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600205{
Jessica Yu425595a2016-03-22 20:03:18 -0400206 int i, cnt, vmlinux, ret;
207 char objname[MODULE_NAME_LEN];
208 char symname[KSYM_NAME_LEN];
209 char *strtab = pmod->core_kallsyms.strtab;
210 Elf_Rela *relas;
211 Elf_Sym *sym;
212 unsigned long sympos, addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600213
Chris J Argesb2b018e2015-12-01 20:40:54 -0600214 /*
Jessica Yu425595a2016-03-22 20:03:18 -0400215 * Since the field widths for objname and symname in the sscanf()
216 * call are hard-coded and correspond to MODULE_NAME_LEN and
217 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
218 * and KSYM_NAME_LEN have the values we expect them to have.
219 *
220 * Because the value of MODULE_NAME_LEN can differ among architectures,
221 * we use the smallest/strictest upper bound possible (56, based on
222 * the current definition of MODULE_NAME_LEN) to prevent overflows.
Chris J Argesb2b018e2015-12-01 20:40:54 -0600223 */
Jessica Yu425595a2016-03-22 20:03:18 -0400224 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
225
226 relas = (Elf_Rela *) relasec->sh_addr;
227 /* For each rela in this klp relocation section */
228 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
229 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
230 if (sym->st_shndx != SHN_LIVEPATCH) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500231 pr_err("symbol %s is not marked as a livepatch symbol\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400232 strtab + sym->st_name);
233 return -EINVAL;
234 }
235
236 /* Format: .klp.sym.objname.symname,sympos */
237 cnt = sscanf(strtab + sym->st_name,
238 ".klp.sym.%55[^.].%127[^,],%lu",
239 objname, symname, &sympos);
240 if (cnt != 3) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500241 pr_err("symbol %s has an incorrectly formatted name\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400242 strtab + sym->st_name);
243 return -EINVAL;
244 }
245
246 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
247 vmlinux = !strcmp(objname, "vmlinux");
248 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
249 symname, sympos, &addr);
250 if (ret)
251 return ret;
252
253 sym->st_value = addr;
254 }
255
256 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600257}
258
259static int klp_write_object_relocations(struct module *pmod,
260 struct klp_object *obj)
261{
Jessica Yu425595a2016-03-22 20:03:18 -0400262 int i, cnt, ret = 0;
263 const char *objname, *secname;
264 char sec_objname[MODULE_NAME_LEN];
265 Elf_Shdr *sec;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600266
267 if (WARN_ON(!klp_is_object_loaded(obj)))
268 return -EINVAL;
269
Jessica Yu425595a2016-03-22 20:03:18 -0400270 objname = klp_is_module(obj) ? obj->name : "vmlinux";
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600271
Jessica Yu425595a2016-03-22 20:03:18 -0400272 /* For each klp relocation section */
273 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
274 sec = pmod->klp_info->sechdrs + i;
275 secname = pmod->klp_info->secstrings + sec->sh_name;
276 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
277 continue;
Josh Poimboeufb56b36e2015-12-03 16:33:26 -0600278
Jessica Yu425595a2016-03-22 20:03:18 -0400279 /*
280 * Format: .klp.rela.sec_objname.section_name
281 * See comment in klp_resolve_symbols() for an explanation
282 * of the selected field width value.
283 */
284 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
285 if (cnt != 1) {
Josh Poimboeuf77f8f392017-04-13 17:59:15 -0500286 pr_err("section %s has an incorrectly formatted name\n",
Jessica Yu425595a2016-03-22 20:03:18 -0400287 secname);
288 ret = -EINVAL;
289 break;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600290 }
Jessica Yu425595a2016-03-22 20:03:18 -0400291
292 if (strcmp(objname, sec_objname))
293 continue;
294
295 ret = klp_resolve_symbols(sec, pmod);
296 if (ret)
297 break;
298
299 ret = apply_relocate_add(pmod->klp_info->sechdrs,
300 pmod->core_kallsyms.strtab,
301 pmod->klp_info->symndx, i, pmod);
302 if (ret)
303 break;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600304 }
305
Josh Poimboeufb56b36e2015-12-03 16:33:26 -0600306 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600307}
308
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600309/*
310 * Sysfs Interface
311 *
312 * /sys/kernel/livepatch
313 * /sys/kernel/livepatch/<patch>
314 * /sys/kernel/livepatch/<patch>/enabled
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600315 * /sys/kernel/livepatch/<patch>/transition
Miroslav Benes43347d52017-11-15 14:50:13 +0100316 * /sys/kernel/livepatch/<patch>/signal
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100317 * /sys/kernel/livepatch/<patch>/force
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600318 * /sys/kernel/livepatch/<patch>/<object>
Chris J Arges444f9e92015-12-01 20:40:56 -0600319 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600320 */
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100321static int __klp_disable_patch(struct klp_patch *patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600322
323static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
324 const char *buf, size_t count)
325{
326 struct klp_patch *patch;
327 int ret;
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600328 bool enabled;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600329
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600330 ret = kstrtobool(buf, &enabled);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600331 if (ret)
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600332 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600333
334 patch = container_of(kobj, struct klp_patch, kobj);
335
336 mutex_lock(&klp_mutex);
337
Josh Poimboeuf68ae4b22017-02-13 19:42:38 -0600338 if (patch->enabled == enabled) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600339 /* already in requested state */
340 ret = -EINVAL;
Petr Mladek958ef1e2019-01-09 13:43:23 +0100341 goto out;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600342 }
343
Petr Mladek958ef1e2019-01-09 13:43:23 +0100344 /*
345 * Allow to reverse a pending transition in both ways. It might be
346 * necessary to complete the transition without forcing and breaking
347 * the system integrity.
348 *
349 * Do not allow to re-enable a disabled patch.
350 */
351 if (patch == klp_transition_patch)
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600352 klp_reverse_transition();
Petr Mladek958ef1e2019-01-09 13:43:23 +0100353 else if (!enabled)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600354 ret = __klp_disable_patch(patch);
Petr Mladek958ef1e2019-01-09 13:43:23 +0100355 else
356 ret = -EINVAL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600357
Petr Mladek958ef1e2019-01-09 13:43:23 +0100358out:
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600359 mutex_unlock(&klp_mutex);
360
Petr Mladek958ef1e2019-01-09 13:43:23 +0100361 if (ret)
362 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600363 return count;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600364}
365
366static ssize_t enabled_show(struct kobject *kobj,
367 struct kobj_attribute *attr, char *buf)
368{
369 struct klp_patch *patch;
370
371 patch = container_of(kobj, struct klp_patch, kobj);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600372 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600373}
374
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600375static ssize_t transition_show(struct kobject *kobj,
376 struct kobj_attribute *attr, char *buf)
377{
378 struct klp_patch *patch;
379
380 patch = container_of(kobj, struct klp_patch, kobj);
381 return snprintf(buf, PAGE_SIZE-1, "%d\n",
382 patch == klp_transition_patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600383}
384
Miroslav Benes43347d52017-11-15 14:50:13 +0100385static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
386 const char *buf, size_t count)
387{
388 struct klp_patch *patch;
389 int ret;
390 bool val;
391
Miroslav Benes43347d52017-11-15 14:50:13 +0100392 ret = kstrtobool(buf, &val);
393 if (ret)
394 return ret;
395
Miroslav Benes88690162017-12-21 14:40:43 +0100396 if (!val)
397 return count;
398
399 mutex_lock(&klp_mutex);
400
401 patch = container_of(kobj, struct klp_patch, kobj);
402 if (patch != klp_transition_patch) {
403 mutex_unlock(&klp_mutex);
404 return -EINVAL;
405 }
406
407 klp_send_signals();
408
409 mutex_unlock(&klp_mutex);
Miroslav Benes43347d52017-11-15 14:50:13 +0100410
411 return count;
412}
413
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100414static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
415 const char *buf, size_t count)
416{
417 struct klp_patch *patch;
418 int ret;
419 bool val;
420
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100421 ret = kstrtobool(buf, &val);
422 if (ret)
423 return ret;
424
Miroslav Benes88690162017-12-21 14:40:43 +0100425 if (!val)
426 return count;
427
428 mutex_lock(&klp_mutex);
429
430 patch = container_of(kobj, struct klp_patch, kobj);
431 if (patch != klp_transition_patch) {
432 mutex_unlock(&klp_mutex);
433 return -EINVAL;
434 }
435
436 klp_force_transition();
437
438 mutex_unlock(&klp_mutex);
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100439
440 return count;
441}
442
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600443static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600444static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
Miroslav Benes43347d52017-11-15 14:50:13 +0100445static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100446static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600447static struct attribute *klp_patch_attrs[] = {
448 &enabled_kobj_attr.attr,
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600449 &transition_kobj_attr.attr,
Miroslav Benes43347d52017-11-15 14:50:13 +0100450 &signal_kobj_attr.attr,
Miroslav Benesc99a2be2017-11-22 11:29:21 +0100451 &force_kobj_attr.attr,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600452 NULL
453};
454
Jason Barone1452b62019-01-09 13:43:25 +0100455static void klp_free_object_dynamic(struct klp_object *obj)
456{
457 kfree(obj->name);
458 kfree(obj);
459}
460
461static struct klp_object *klp_alloc_object_dynamic(const char *name)
462{
463 struct klp_object *obj;
464
465 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
466 if (!obj)
467 return NULL;
468
469 if (name) {
470 obj->name = kstrdup(name, GFP_KERNEL);
471 if (!obj->name) {
472 kfree(obj);
473 return NULL;
474 }
475 }
476
477 INIT_LIST_HEAD(&obj->func_list);
478 obj->dynamic = true;
479
480 return obj;
481}
482
483static void klp_free_func_nop(struct klp_func *func)
484{
485 kfree(func->old_name);
486 kfree(func);
487}
488
489static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
490 struct klp_object *obj)
491{
492 struct klp_func *func;
493
494 func = kzalloc(sizeof(*func), GFP_KERNEL);
495 if (!func)
496 return NULL;
497
498 if (old_func->old_name) {
499 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
500 if (!func->old_name) {
501 kfree(func);
502 return NULL;
503 }
504 }
505
506 /*
507 * func->new_func is same as func->old_func. These addresses are
508 * set when the object is loaded, see klp_init_object_loaded().
509 */
510 func->old_sympos = old_func->old_sympos;
511 func->nop = true;
512
513 return func;
514}
515
516static int klp_add_object_nops(struct klp_patch *patch,
517 struct klp_object *old_obj)
518{
519 struct klp_object *obj;
520 struct klp_func *func, *old_func;
521
522 obj = klp_find_object(patch, old_obj);
523
524 if (!obj) {
525 obj = klp_alloc_object_dynamic(old_obj->name);
526 if (!obj)
527 return -ENOMEM;
528
529 list_add_tail(&obj->node, &patch->obj_list);
530 }
531
532 klp_for_each_func(old_obj, old_func) {
533 func = klp_find_func(obj, old_func);
534 if (func)
535 continue;
536
537 func = klp_alloc_func_nop(old_func, obj);
538 if (!func)
539 return -ENOMEM;
540
541 list_add_tail(&func->node, &obj->func_list);
542 }
543
544 return 0;
545}
546
547/*
548 * Add 'nop' functions which simply return to the caller to run
549 * the original function. The 'nop' functions are added to a
550 * patch to facilitate a 'replace' mode.
551 */
552static int klp_add_nops(struct klp_patch *patch)
553{
554 struct klp_patch *old_patch;
555 struct klp_object *old_obj;
556
Petr Mladekecba29f2019-02-04 14:56:50 +0100557 klp_for_each_patch(old_patch) {
Jason Barone1452b62019-01-09 13:43:25 +0100558 klp_for_each_object(old_patch, old_obj) {
559 int err;
560
561 err = klp_add_object_nops(patch, old_obj);
562 if (err)
563 return err;
564 }
565 }
566
567 return 0;
568}
569
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600570static void klp_kobj_release_patch(struct kobject *kobj)
571{
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600572 struct klp_patch *patch;
573
574 patch = container_of(kobj, struct klp_patch, kobj);
575 complete(&patch->finish);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600576}
577
578static struct kobj_type klp_ktype_patch = {
579 .release = klp_kobj_release_patch,
580 .sysfs_ops = &kobj_sysfs_ops,
581 .default_attrs = klp_patch_attrs,
582};
583
Miroslav Benescad706d2015-05-19 12:01:18 +0200584static void klp_kobj_release_object(struct kobject *kobj)
585{
Jason Barone1452b62019-01-09 13:43:25 +0100586 struct klp_object *obj;
587
588 obj = container_of(kobj, struct klp_object, kobj);
589
590 if (obj->dynamic)
591 klp_free_object_dynamic(obj);
Miroslav Benescad706d2015-05-19 12:01:18 +0200592}
593
594static struct kobj_type klp_ktype_object = {
595 .release = klp_kobj_release_object,
596 .sysfs_ops = &kobj_sysfs_ops,
597};
598
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600599static void klp_kobj_release_func(struct kobject *kobj)
600{
Jason Barone1452b62019-01-09 13:43:25 +0100601 struct klp_func *func;
602
603 func = container_of(kobj, struct klp_func, kobj);
604
605 if (func->nop)
606 klp_free_func_nop(func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600607}
608
609static struct kobj_type klp_ktype_func = {
610 .release = klp_kobj_release_func,
611 .sysfs_ops = &kobj_sysfs_ops,
612};
613
Petr Mladekd697bad2019-01-09 13:43:26 +0100614static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600615{
Jason Barone1452b62019-01-09 13:43:25 +0100616 struct klp_func *func, *tmp_func;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600617
Jason Barone1452b62019-01-09 13:43:25 +0100618 klp_for_each_func_safe(obj, func, tmp_func) {
Petr Mladekd697bad2019-01-09 13:43:26 +0100619 if (nops_only && !func->nop)
620 continue;
621
622 list_del(&func->node);
623
Petr Mladek0430f782019-01-09 13:43:21 +0100624 /* Might be called from klp_init_patch() error path. */
Jason Barone1452b62019-01-09 13:43:25 +0100625 if (func->kobj_added) {
Petr Mladek0430f782019-01-09 13:43:21 +0100626 kobject_put(&func->kobj);
Jason Barone1452b62019-01-09 13:43:25 +0100627 } else if (func->nop) {
628 klp_free_func_nop(func);
629 }
Petr Mladek0430f782019-01-09 13:43:21 +0100630 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600631}
632
633/* Clean up when a patched object is unloaded */
634static void klp_free_object_loaded(struct klp_object *obj)
635{
636 struct klp_func *func;
637
638 obj->mod = NULL;
639
Jason Barone1452b62019-01-09 13:43:25 +0100640 klp_for_each_func(obj, func) {
Petr Mladek19514912019-01-09 13:43:19 +0100641 func->old_func = NULL;
Jason Barone1452b62019-01-09 13:43:25 +0100642
643 if (func->nop)
644 func->new_func = NULL;
645 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600646}
647
Petr Mladekd697bad2019-01-09 13:43:26 +0100648static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600649{
Jason Barone1452b62019-01-09 13:43:25 +0100650 struct klp_object *obj, *tmp_obj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600651
Jason Barone1452b62019-01-09 13:43:25 +0100652 klp_for_each_object_safe(patch, obj, tmp_obj) {
Petr Mladekd697bad2019-01-09 13:43:26 +0100653 __klp_free_funcs(obj, nops_only);
654
655 if (nops_only && !obj->dynamic)
656 continue;
657
658 list_del(&obj->node);
Petr Mladek0430f782019-01-09 13:43:21 +0100659
660 /* Might be called from klp_init_patch() error path. */
Jason Barone1452b62019-01-09 13:43:25 +0100661 if (obj->kobj_added) {
Petr Mladek0430f782019-01-09 13:43:21 +0100662 kobject_put(&obj->kobj);
Jason Barone1452b62019-01-09 13:43:25 +0100663 } else if (obj->dynamic) {
664 klp_free_object_dynamic(obj);
665 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600666 }
667}
668
Petr Mladekd697bad2019-01-09 13:43:26 +0100669static void klp_free_objects(struct klp_patch *patch)
670{
671 __klp_free_objects(patch, false);
672}
673
674static void klp_free_objects_dynamic(struct klp_patch *patch)
675{
676 __klp_free_objects(patch, true);
677}
678
Petr Mladek0430f782019-01-09 13:43:21 +0100679/*
680 * This function implements the free operations that can be called safely
681 * under klp_mutex.
682 *
683 * The operation must be completed by calling klp_free_patch_finish()
684 * outside klp_mutex.
685 */
Petr Mladek958ef1e2019-01-09 13:43:23 +0100686void klp_free_patch_start(struct klp_patch *patch)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600687{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600688 if (!list_empty(&patch->list))
689 list_del(&patch->list);
Petr Mladek0430f782019-01-09 13:43:21 +0100690
691 klp_free_objects(patch);
692}
693
694/*
695 * This function implements the free part that must be called outside
696 * klp_mutex.
697 *
698 * It must be called after klp_free_patch_start(). And it has to be
699 * the last function accessing the livepatch structures when the patch
700 * gets disabled.
701 */
702static void klp_free_patch_finish(struct klp_patch *patch)
703{
704 /*
705 * Avoid deadlock with enabled_store() sysfs callback by
706 * calling this outside klp_mutex. It is safe because
707 * this is called when the patch gets disabled and it
708 * cannot get enabled again.
709 */
710 if (patch->kobj_added) {
711 kobject_put(&patch->kobj);
712 wait_for_completion(&patch->finish);
713 }
Petr Mladek958ef1e2019-01-09 13:43:23 +0100714
715 /* Put the module after the last access to struct klp_patch. */
716 if (!patch->forced)
717 module_put(patch->mod);
718}
719
720/*
721 * The livepatch might be freed from sysfs interface created by the patch.
722 * This work allows to wait until the interface is destroyed in a separate
723 * context.
724 */
725static void klp_free_patch_work_fn(struct work_struct *work)
726{
727 struct klp_patch *patch =
728 container_of(work, struct klp_patch, free_work);
729
730 klp_free_patch_finish(patch);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600731}
732
733static int klp_init_func(struct klp_object *obj, struct klp_func *func)
734{
Petr Mladek0430f782019-01-09 13:43:21 +0100735 int ret;
736
Jason Barone1452b62019-01-09 13:43:25 +0100737 if (!func->old_name)
738 return -EINVAL;
739
740 /*
741 * NOPs get the address later. The patched module must be loaded,
742 * see klp_init_object_loaded().
743 */
744 if (!func->new_func && !func->nop)
Miroslav Benesf09d9082016-04-28 16:34:08 +0200745 return -EINVAL;
746
Kamalesh Babulal6e9df952018-07-20 15:16:42 +0530747 if (strlen(func->old_name) >= KSYM_NAME_LEN)
748 return -EINVAL;
749
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600750 INIT_LIST_HEAD(&func->stack_node);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600751 func->patched = false;
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -0600752 func->transition = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600753
Chris J Arges444f9e92015-12-01 20:40:56 -0600754 /* The format for the sysfs directory is <function,sympos> where sympos
755 * is the nth occurrence of this symbol in kallsyms for the patched
756 * object. If the user selects 0 for old_sympos, then 1 will be used
757 * since a unique symbol will be the first occurrence.
758 */
Petr Mladek0430f782019-01-09 13:43:21 +0100759 ret = kobject_init_and_add(&func->kobj, &klp_ktype_func,
760 &obj->kobj, "%s,%lu", func->old_name,
761 func->old_sympos ? func->old_sympos : 1);
762 if (!ret)
763 func->kobj_added = true;
764
765 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600766}
767
Jessica Yu255e7322016-08-17 20:58:28 -0400768/* Arches may override this to finish any remaining arch-specific tasks */
769void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
770 struct klp_object *obj)
771{
772}
773
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600774/* parts of the initialization that is done only when the object is loaded */
775static int klp_init_object_loaded(struct klp_patch *patch,
776 struct klp_object *obj)
777{
778 struct klp_func *func;
779 int ret;
780
Jessica Yu255e7322016-08-17 20:58:28 -0400781 module_disable_ro(patch->mod);
Jessica Yu425595a2016-03-22 20:03:18 -0400782 ret = klp_write_object_relocations(patch->mod, obj);
Jessica Yu255e7322016-08-17 20:58:28 -0400783 if (ret) {
784 module_enable_ro(patch->mod, true);
Jessica Yu425595a2016-03-22 20:03:18 -0400785 return ret;
Jessica Yu255e7322016-08-17 20:58:28 -0400786 }
787
788 arch_klp_init_object_loaded(patch, obj);
789 module_enable_ro(patch->mod, true);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600790
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200791 klp_for_each_func(obj, func) {
Chris J Argesb2b018e2015-12-01 20:40:54 -0600792 ret = klp_find_object_symbol(obj->name, func->old_name,
793 func->old_sympos,
Petr Mladek19514912019-01-09 13:43:19 +0100794 (unsigned long *)&func->old_func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600795 if (ret)
796 return ret;
Josh Poimboeuff5e547f2017-02-13 19:42:39 -0600797
Petr Mladek19514912019-01-09 13:43:19 +0100798 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
Josh Poimboeuff5e547f2017-02-13 19:42:39 -0600799 &func->old_size, NULL);
800 if (!ret) {
801 pr_err("kallsyms size lookup failed for '%s'\n",
802 func->old_name);
803 return -ENOENT;
804 }
805
Jason Barone1452b62019-01-09 13:43:25 +0100806 if (func->nop)
807 func->new_func = func->old_func;
808
Josh Poimboeuff5e547f2017-02-13 19:42:39 -0600809 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
810 &func->new_size, NULL);
811 if (!ret) {
812 pr_err("kallsyms size lookup failed for '%s' replacement\n",
813 func->old_name);
814 return -ENOENT;
815 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600816 }
817
818 return 0;
819}
820
821static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
822{
823 struct klp_func *func;
824 int ret;
825 const char *name;
826
Kamalesh Babulal6e9df952018-07-20 15:16:42 +0530827 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
828 return -EINVAL;
829
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600830 obj->patched = false;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100831 obj->mod = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600832
833 klp_find_object_module(obj);
834
835 name = klp_is_module(obj) ? obj->name : "vmlinux";
Miroslav Benescad706d2015-05-19 12:01:18 +0200836 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
837 &patch->kobj, "%s", name);
838 if (ret)
839 return ret;
Petr Mladek0430f782019-01-09 13:43:21 +0100840 obj->kobj_added = true;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600841
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200842 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600843 ret = klp_init_func(obj, func);
844 if (ret)
Petr Mladek0430f782019-01-09 13:43:21 +0100845 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600846 }
847
Petr Mladek0430f782019-01-09 13:43:21 +0100848 if (klp_is_object_loaded(obj))
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600849 ret = klp_init_object_loaded(patch, obj);
Petr Mladek0430f782019-01-09 13:43:21 +0100850
851 return ret;
852}
853
854static int klp_init_patch_early(struct klp_patch *patch)
855{
856 struct klp_object *obj;
857 struct klp_func *func;
858
859 if (!patch->objs)
860 return -EINVAL;
861
862 INIT_LIST_HEAD(&patch->list);
Jason Baron20e55022019-01-09 13:43:24 +0100863 INIT_LIST_HEAD(&patch->obj_list);
Petr Mladek0430f782019-01-09 13:43:21 +0100864 patch->kobj_added = false;
865 patch->enabled = false;
Petr Mladek68007282019-01-09 13:43:22 +0100866 patch->forced = false;
Petr Mladek958ef1e2019-01-09 13:43:23 +0100867 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
Petr Mladek0430f782019-01-09 13:43:21 +0100868 init_completion(&patch->finish);
869
Jason Baron20e55022019-01-09 13:43:24 +0100870 klp_for_each_object_static(patch, obj) {
Petr Mladek0430f782019-01-09 13:43:21 +0100871 if (!obj->funcs)
872 return -EINVAL;
873
Jason Baron20e55022019-01-09 13:43:24 +0100874 INIT_LIST_HEAD(&obj->func_list);
Petr Mladek0430f782019-01-09 13:43:21 +0100875 obj->kobj_added = false;
Jason Baron20e55022019-01-09 13:43:24 +0100876 list_add_tail(&obj->node, &patch->obj_list);
Petr Mladek0430f782019-01-09 13:43:21 +0100877
Jason Baron20e55022019-01-09 13:43:24 +0100878 klp_for_each_func_static(obj, func) {
Petr Mladek0430f782019-01-09 13:43:21 +0100879 func->kobj_added = false;
Jason Baron20e55022019-01-09 13:43:24 +0100880 list_add_tail(&func->node, &obj->func_list);
881 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600882 }
883
Petr Mladek958ef1e2019-01-09 13:43:23 +0100884 if (!try_module_get(patch->mod))
885 return -ENODEV;
886
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600887 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600888}
889
890static int klp_init_patch(struct klp_patch *patch)
891{
892 struct klp_object *obj;
893 int ret;
894
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600895 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100896 klp_root_kobj, "%s", patch->mod->name);
Petr Mladek958ef1e2019-01-09 13:43:23 +0100897 if (ret)
Josh Poimboeuf3ec24772017-03-06 11:20:29 -0600898 return ret;
Petr Mladek0430f782019-01-09 13:43:21 +0100899 patch->kobj_added = true;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600900
Jason Barone1452b62019-01-09 13:43:25 +0100901 if (patch->replace) {
902 ret = klp_add_nops(patch);
903 if (ret)
904 return ret;
905 }
906
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200907 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600908 ret = klp_init_object(patch, obj);
909 if (ret)
Petr Mladek958ef1e2019-01-09 13:43:23 +0100910 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600911 }
912
Josh Poimboeuf99590ba2015-01-09 14:03:04 -0600913 list_add_tail(&patch->list, &klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600914
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600915 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600916}
917
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100918static int __klp_disable_patch(struct klp_patch *patch)
919{
920 struct klp_object *obj;
921
922 if (WARN_ON(!patch->enabled))
923 return -EINVAL;
924
925 if (klp_transition_patch)
926 return -EBUSY;
927
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100928 klp_init_transition(patch, KLP_UNPATCHED);
929
930 klp_for_each_object(patch, obj)
931 if (obj->patched)
932 klp_pre_unpatch_callback(obj);
933
934 /*
935 * Enforce the order of the func->transition writes in
936 * klp_init_transition() and the TIF_PATCH_PENDING writes in
937 * klp_start_transition(). In the rare case where klp_ftrace_handler()
938 * is called shortly after klp_update_patch_state() switches the task,
939 * this ensures the handler sees that func->transition is set.
940 */
941 smp_wmb();
942
943 klp_start_transition();
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100944 patch->enabled = false;
Petr Mladek958ef1e2019-01-09 13:43:23 +0100945 klp_try_complete_transition();
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100946
947 return 0;
948}
949
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100950static int __klp_enable_patch(struct klp_patch *patch)
951{
952 struct klp_object *obj;
953 int ret;
954
955 if (klp_transition_patch)
956 return -EBUSY;
957
958 if (WARN_ON(patch->enabled))
959 return -EINVAL;
960
Petr Mladek958ef1e2019-01-09 13:43:23 +0100961 if (!patch->kobj_added)
962 return -EINVAL;
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100963
964 pr_notice("enabling patch '%s'\n", patch->mod->name);
965
966 klp_init_transition(patch, KLP_PATCHED);
967
968 /*
969 * Enforce the order of the func->transition writes in
970 * klp_init_transition() and the ops->func_stack writes in
971 * klp_patch_object(), so that klp_ftrace_handler() will see the
972 * func->transition updates before the handler is registered and the
973 * new funcs become visible to the handler.
974 */
975 smp_wmb();
976
977 klp_for_each_object(patch, obj) {
978 if (!klp_is_object_loaded(obj))
979 continue;
980
981 ret = klp_pre_patch_callback(obj);
982 if (ret) {
983 pr_warn("pre-patch callback failed for object '%s'\n",
984 klp_is_module(obj) ? obj->name : "vmlinux");
985 goto err;
986 }
987
988 ret = klp_patch_object(obj);
989 if (ret) {
990 pr_warn("failed to patch object '%s'\n",
991 klp_is_module(obj) ? obj->name : "vmlinux");
992 goto err;
993 }
994 }
995
996 klp_start_transition();
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100997 patch->enabled = true;
Petr Mladek958ef1e2019-01-09 13:43:23 +0100998 klp_try_complete_transition();
Petr Mladek26c3e98e2019-01-09 13:43:20 +0100999
1000 return 0;
1001err:
1002 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1003
1004 klp_cancel_transition();
1005 return ret;
1006}
1007
1008/**
Petr Mladek958ef1e2019-01-09 13:43:23 +01001009 * klp_enable_patch() - enable the livepatch
1010 * @patch: patch to be enabled
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001011 *
Petr Mladek958ef1e2019-01-09 13:43:23 +01001012 * Initializes the data structure associated with the patch, creates the sysfs
1013 * interface, performs the needed symbol lookups and code relocations,
1014 * registers the patched functions with ftrace.
1015 *
1016 * This function is supposed to be called from the livepatch module_init()
1017 * callback.
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001018 *
1019 * Return: 0 on success, otherwise error
1020 */
1021int klp_enable_patch(struct klp_patch *patch)
1022{
1023 int ret;
1024
Petr Mladek958ef1e2019-01-09 13:43:23 +01001025 if (!patch || !patch->mod)
1026 return -EINVAL;
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001027
Petr Mladek958ef1e2019-01-09 13:43:23 +01001028 if (!is_livepatch_module(patch->mod)) {
1029 pr_err("module %s is not marked as a livepatch module\n",
1030 patch->mod->name);
1031 return -EINVAL;
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001032 }
1033
Petr Mladek958ef1e2019-01-09 13:43:23 +01001034 if (!klp_initialized())
1035 return -ENODEV;
1036
1037 if (!klp_have_reliable_stack()) {
1038 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
Alice Ferrazzi375bfca2019-02-05 03:33:28 +09001039 return -EOPNOTSUPP;
Petr Mladek958ef1e2019-01-09 13:43:23 +01001040 }
1041
1042
1043 mutex_lock(&klp_mutex);
1044
1045 ret = klp_init_patch_early(patch);
1046 if (ret) {
1047 mutex_unlock(&klp_mutex);
1048 return ret;
1049 }
1050
1051 ret = klp_init_patch(patch);
1052 if (ret)
1053 goto err;
1054
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001055 ret = __klp_enable_patch(patch);
Petr Mladek958ef1e2019-01-09 13:43:23 +01001056 if (ret)
1057 goto err;
1058
1059 mutex_unlock(&klp_mutex);
1060
1061 return 0;
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001062
1063err:
Petr Mladek958ef1e2019-01-09 13:43:23 +01001064 klp_free_patch_start(patch);
1065
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001066 mutex_unlock(&klp_mutex);
Petr Mladek958ef1e2019-01-09 13:43:23 +01001067
1068 klp_free_patch_finish(patch);
1069
Petr Mladek26c3e98e2019-01-09 13:43:20 +01001070 return ret;
1071}
1072EXPORT_SYMBOL_GPL(klp_enable_patch);
1073
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001074/*
Jason Barone1452b62019-01-09 13:43:25 +01001075 * This function removes replaced patches.
1076 *
1077 * We could be pretty aggressive here. It is called in the situation where
1078 * these structures are no longer accessible. All functions are redirected
1079 * by the klp_transition_patch. They use either a new code or they are in
1080 * the original code because of the special nop function patches.
1081 *
1082 * The only exception is when the transition was forced. In this case,
1083 * klp_ftrace_handler() might still see the replaced patch on the stack.
1084 * Fortunately, it is carefully designed to work with removed functions
1085 * thanks to RCU. We only have to keep the patches on the system. Also
1086 * this is handled transparently by patch->module_put.
1087 */
1088void klp_discard_replaced_patches(struct klp_patch *new_patch)
1089{
1090 struct klp_patch *old_patch, *tmp_patch;
1091
Petr Mladekecba29f2019-02-04 14:56:50 +01001092 klp_for_each_patch_safe(old_patch, tmp_patch) {
Jason Barone1452b62019-01-09 13:43:25 +01001093 if (old_patch == new_patch)
1094 return;
1095
1096 old_patch->enabled = false;
1097 klp_unpatch_objects(old_patch);
1098 klp_free_patch_start(old_patch);
1099 schedule_work(&old_patch->free_work);
1100 }
1101}
1102
1103/*
Petr Mladekd697bad2019-01-09 13:43:26 +01001104 * This function removes the dynamically allocated 'nop' functions.
1105 *
1106 * We could be pretty aggressive. NOPs do not change the existing
1107 * behavior except for adding unnecessary delay by the ftrace handler.
1108 *
1109 * It is safe even when the transition was forced. The ftrace handler
1110 * will see a valid ops->func_stack entry thanks to RCU.
1111 *
1112 * We could even free the NOPs structures. They must be the last entry
1113 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1114 * It does the same as klp_synchronize_transition() to make sure that
1115 * nobody is inside the ftrace handler once the operation finishes.
1116 *
1117 * IMPORTANT: It must be called right after removing the replaced patches!
1118 */
1119void klp_discard_nops(struct klp_patch *new_patch)
1120{
1121 klp_unpatch_objects_dynamic(klp_transition_patch);
1122 klp_free_objects_dynamic(klp_transition_patch);
1123}
1124
1125/*
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001126 * Remove parts of patches that touch a given kernel module. The list of
1127 * patches processed might be limited. When limit is NULL, all patches
1128 * will be handled.
1129 */
1130static void klp_cleanup_module_patches_limited(struct module *mod,
1131 struct klp_patch *limit)
1132{
1133 struct klp_patch *patch;
1134 struct klp_object *obj;
1135
Petr Mladekecba29f2019-02-04 14:56:50 +01001136 klp_for_each_patch(patch) {
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001137 if (patch == limit)
1138 break;
1139
1140 klp_for_each_object(patch, obj) {
1141 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1142 continue;
1143
1144 /*
1145 * Only unpatch the module if the patch is enabled or
1146 * is in transition.
1147 */
1148 if (patch->enabled || patch == klp_transition_patch) {
Jiri Kosinafc41efc182017-11-15 10:53:24 +01001149
1150 if (patch != klp_transition_patch)
1151 klp_pre_unpatch_callback(obj);
1152
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001153 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1154 patch->mod->name, obj->mod->name);
1155 klp_unpatch_object(obj);
Jiri Kosinafc41efc182017-11-15 10:53:24 +01001156
1157 klp_post_unpatch_callback(obj);
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001158 }
1159
1160 klp_free_object_loaded(obj);
1161 break;
1162 }
1163 }
1164}
1165
Jessica Yu7e545d62016-03-16 20:55:39 -04001166int klp_module_coming(struct module *mod)
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001167{
Minfei Huang36e505c2015-05-15 10:22:48 +08001168 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001169 struct klp_patch *patch;
1170 struct klp_object *obj;
1171
Jessica Yu7e545d62016-03-16 20:55:39 -04001172 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1173 return -EINVAL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001174
1175 mutex_lock(&klp_mutex);
Petr Mladek8cb2c2d2015-03-12 12:55:13 +01001176 /*
Jessica Yu7e545d62016-03-16 20:55:39 -04001177 * Each module has to know that klp_module_coming()
1178 * has been called. We never know what module will
1179 * get patched by a new patch.
Petr Mladek8cb2c2d2015-03-12 12:55:13 +01001180 */
Jessica Yu7e545d62016-03-16 20:55:39 -04001181 mod->klp_alive = true;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +01001182
Petr Mladekecba29f2019-02-04 14:56:50 +01001183 klp_for_each_patch(patch) {
Jiri Slaby8cdd0432015-05-19 12:01:19 +02001184 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001185 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1186 continue;
1187
Jessica Yu7e545d62016-03-16 20:55:39 -04001188 obj->mod = mod;
1189
1190 ret = klp_init_object_loaded(patch, obj);
1191 if (ret) {
1192 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1193 patch->mod->name, obj->mod->name, ret);
1194 goto err;
1195 }
1196
Josh Poimboeufd83a7cb2017-02-13 19:42:40 -06001197 /*
1198 * Only patch the module if the patch is enabled or is
1199 * in transition.
1200 */
1201 if (!patch->enabled && patch != klp_transition_patch)
Jessica Yu7e545d62016-03-16 20:55:39 -04001202 break;
1203
1204 pr_notice("applying patch '%s' to loading module '%s'\n",
1205 patch->mod->name, obj->mod->name);
1206
Joe Lawrence93862e32017-10-13 15:08:41 -04001207 ret = klp_pre_patch_callback(obj);
1208 if (ret) {
1209 pr_warn("pre-patch callback failed for object '%s'\n",
1210 obj->name);
1211 goto err;
1212 }
1213
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -06001214 ret = klp_patch_object(obj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001215 if (ret) {
1216 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1217 patch->mod->name, obj->mod->name, ret);
Joe Lawrence93862e32017-10-13 15:08:41 -04001218
Petr Mladek5aaf1ab2017-10-20 16:56:50 +02001219 klp_post_unpatch_callback(obj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001220 goto err;
1221 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001222
Joe Lawrence93862e32017-10-13 15:08:41 -04001223 if (patch != klp_transition_patch)
1224 klp_post_patch_callback(obj);
1225
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001226 break;
1227 }
1228 }
1229
1230 mutex_unlock(&klp_mutex);
1231
1232 return 0;
Jessica Yu7e545d62016-03-16 20:55:39 -04001233
1234err:
1235 /*
1236 * If a patch is unsuccessfully applied, return
1237 * error to the module loader.
1238 */
1239 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1240 patch->mod->name, obj->mod->name, obj->mod->name);
1241 mod->klp_alive = false;
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001242 klp_cleanup_module_patches_limited(mod, patch);
Jessica Yu7e545d62016-03-16 20:55:39 -04001243 mutex_unlock(&klp_mutex);
1244
1245 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001246}
1247
Jessica Yu7e545d62016-03-16 20:55:39 -04001248void klp_module_going(struct module *mod)
1249{
Jessica Yu7e545d62016-03-16 20:55:39 -04001250 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1251 mod->state != MODULE_STATE_COMING))
1252 return;
1253
1254 mutex_lock(&klp_mutex);
1255 /*
1256 * Each module has to know that klp_module_going()
1257 * has been called. We never know what module will
1258 * get patched by a new patch.
1259 */
1260 mod->klp_alive = false;
1261
Joe Lawrenceef8daf82017-10-02 11:56:48 -04001262 klp_cleanup_module_patches_limited(mod, NULL);
Jessica Yu7e545d62016-03-16 20:55:39 -04001263
1264 mutex_unlock(&klp_mutex);
1265}
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001266
Minfei Huang26029d82015-05-22 22:26:29 +08001267static int __init klp_init(void)
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001268{
1269 int ret;
1270
Jiri Kosinab9dfe0b2015-01-09 10:53:21 +01001271 ret = klp_check_compiler_support();
1272 if (ret) {
1273 pr_info("Your compiler is too old; turning off.\n");
1274 return -EINVAL;
1275 }
1276
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001277 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001278 if (!klp_root_kobj)
1279 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001280
1281 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001282}
1283
1284module_init(klp_init);