blob: 2dbd355cee07cf36d22d7b088d80f0beea69cfee [file] [log] [blame]
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h>
29#include <linux/kallsyms.h>
30#include <linux/livepatch.h>
Jessica Yu425595a2016-03-22 20:03:18 -040031#include <linux/elf.h>
32#include <linux/moduleloader.h>
Josh Poimboeufb56b36e2015-12-03 16:33:26 -060033#include <asm/cacheflush.h>
Seth Jenningsb700e7f2014-12-16 11:58:19 -060034
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060035/**
36 * struct klp_ops - structure for tracking registered ftrace ops structs
37 *
38 * A single ftrace_ops is shared between all enabled replacement functions
39 * (klp_func structs) which have the same old_addr. This allows the switch
40 * between function versions to happen instantaneously by updating the klp_ops
41 * struct's func_stack list. The winner is the klp_func at the top of the
42 * func_stack (front of the list).
43 *
44 * @node: node for the global klp_ops list
45 * @func_stack: list head for the stack of klp_func's (active func is on top)
46 * @fops: registered ftrace ops struct
Seth Jenningsb700e7f2014-12-16 11:58:19 -060047 */
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060048struct klp_ops {
49 struct list_head node;
50 struct list_head func_stack;
51 struct ftrace_ops fops;
52};
Seth Jenningsb700e7f2014-12-16 11:58:19 -060053
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060054/*
55 * The klp_mutex protects the global lists and state transitions of any
56 * structure reachable from them. References to any structure must be obtained
57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
58 * ensure it gets consistent data).
59 */
Seth Jenningsb700e7f2014-12-16 11:58:19 -060060static DEFINE_MUTEX(klp_mutex);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060061
Seth Jenningsb700e7f2014-12-16 11:58:19 -060062static LIST_HEAD(klp_patches);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060063static LIST_HEAD(klp_ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -060064
65static struct kobject *klp_root_kobj;
66
Josh Poimboeuf46c5a012017-02-13 19:42:30 -060067/* TODO: temporary stub */
68void klp_update_patch_state(struct task_struct *task) {}
69
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060070static struct klp_ops *klp_find_ops(unsigned long old_addr)
71{
72 struct klp_ops *ops;
73 struct klp_func *func;
74
75 list_for_each_entry(ops, &klp_ops, node) {
76 func = list_first_entry(&ops->func_stack, struct klp_func,
77 stack_node);
78 if (func->old_addr == old_addr)
79 return ops;
80 }
81
82 return NULL;
83}
84
Seth Jenningsb700e7f2014-12-16 11:58:19 -060085static bool klp_is_module(struct klp_object *obj)
86{
87 return obj->name;
88}
89
90static bool klp_is_object_loaded(struct klp_object *obj)
91{
92 return !obj->name || obj->mod;
93}
94
95/* sets obj->mod if object is not vmlinux and module is found */
96static void klp_find_object_module(struct klp_object *obj)
97{
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010098 struct module *mod;
99
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600100 if (!klp_is_module(obj))
101 return;
102
103 mutex_lock(&module_mutex);
104 /*
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100105 * We do not want to block removal of patched modules and therefore
106 * we do not take a reference here. The patches are removed by
Jessica Yu7e545d62016-03-16 20:55:39 -0400107 * klp_module_going() instead.
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600108 */
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100109 mod = find_module(obj->name);
110 /*
Jessica Yu7e545d62016-03-16 20:55:39 -0400111 * Do not mess work of klp_module_coming() and klp_module_going().
112 * Note that the patch might still be needed before klp_module_going()
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100113 * is called. Module functions can be called even in the GOING state
114 * until mod->exit() finishes. This is especially important for
115 * patches that modify semantic of the functions.
116 */
117 if (mod && mod->klp_alive)
118 obj->mod = mod;
119
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600120 mutex_unlock(&module_mutex);
121}
122
123/* klp_mutex must be held by caller */
124static bool klp_is_patch_registered(struct klp_patch *patch)
125{
126 struct klp_patch *mypatch;
127
128 list_for_each_entry(mypatch, &klp_patches, list)
129 if (mypatch == patch)
130 return true;
131
132 return false;
133}
134
135static bool klp_initialized(void)
136{
Nicholas Mc Guiree76ff062015-05-11 07:52:29 +0200137 return !!klp_root_kobj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600138}
139
140struct klp_find_arg {
141 const char *objname;
142 const char *name;
143 unsigned long addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600144 unsigned long count;
Chris J Argesb2b018e2015-12-01 20:40:54 -0600145 unsigned long pos;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600146};
147
148static int klp_find_callback(void *data, const char *name,
149 struct module *mod, unsigned long addr)
150{
151 struct klp_find_arg *args = data;
152
153 if ((mod && !args->objname) || (!mod && args->objname))
154 return 0;
155
156 if (strcmp(args->name, name))
157 return 0;
158
159 if (args->objname && strcmp(args->objname, mod->name))
160 return 0;
161
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600162 args->addr = addr;
163 args->count++;
164
Chris J Argesb2b018e2015-12-01 20:40:54 -0600165 /*
166 * Finish the search when the symbol is found for the desired position
167 * or the position is not defined for a non-unique symbol.
168 */
169 if ((args->pos && (args->count == args->pos)) ||
170 (!args->pos && (args->count > 1)))
171 return 1;
172
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600173 return 0;
174}
175
176static int klp_find_object_symbol(const char *objname, const char *name,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600177 unsigned long sympos, unsigned long *addr)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600178{
179 struct klp_find_arg args = {
180 .objname = objname,
181 .name = name,
182 .addr = 0,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600183 .count = 0,
184 .pos = sympos,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600185 };
186
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200187 mutex_lock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600188 kallsyms_on_each_symbol(klp_find_callback, &args);
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200189 mutex_unlock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600190
Chris J Argesb2b018e2015-12-01 20:40:54 -0600191 /*
192 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
193 * otherwise ensure the symbol position count matches sympos.
194 */
195 if (args.addr == 0)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600196 pr_err("symbol '%s' not found in symbol table\n", name);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600197 else if (args.count > 1 && sympos == 0) {
Petr Mladekf995b5f2016-03-09 15:20:59 +0100198 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
199 name, objname);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600200 } else if (sympos != args.count && sympos > 0) {
201 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
202 sympos, name, objname ? objname : "vmlinux");
203 } else {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600204 *addr = args.addr;
205 return 0;
206 }
207
208 *addr = 0;
209 return -EINVAL;
210}
211
Jessica Yu425595a2016-03-22 20:03:18 -0400212static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600213{
Jessica Yu425595a2016-03-22 20:03:18 -0400214 int i, cnt, vmlinux, ret;
215 char objname[MODULE_NAME_LEN];
216 char symname[KSYM_NAME_LEN];
217 char *strtab = pmod->core_kallsyms.strtab;
218 Elf_Rela *relas;
219 Elf_Sym *sym;
220 unsigned long sympos, addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600221
Chris J Argesb2b018e2015-12-01 20:40:54 -0600222 /*
Jessica Yu425595a2016-03-22 20:03:18 -0400223 * Since the field widths for objname and symname in the sscanf()
224 * call are hard-coded and correspond to MODULE_NAME_LEN and
225 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
226 * and KSYM_NAME_LEN have the values we expect them to have.
227 *
228 * Because the value of MODULE_NAME_LEN can differ among architectures,
229 * we use the smallest/strictest upper bound possible (56, based on
230 * the current definition of MODULE_NAME_LEN) to prevent overflows.
Chris J Argesb2b018e2015-12-01 20:40:54 -0600231 */
Jessica Yu425595a2016-03-22 20:03:18 -0400232 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
233
234 relas = (Elf_Rela *) relasec->sh_addr;
235 /* For each rela in this klp relocation section */
236 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
237 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
238 if (sym->st_shndx != SHN_LIVEPATCH) {
239 pr_err("symbol %s is not marked as a livepatch symbol",
240 strtab + sym->st_name);
241 return -EINVAL;
242 }
243
244 /* Format: .klp.sym.objname.symname,sympos */
245 cnt = sscanf(strtab + sym->st_name,
246 ".klp.sym.%55[^.].%127[^,],%lu",
247 objname, symname, &sympos);
248 if (cnt != 3) {
249 pr_err("symbol %s has an incorrectly formatted name",
250 strtab + sym->st_name);
251 return -EINVAL;
252 }
253
254 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
255 vmlinux = !strcmp(objname, "vmlinux");
256 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
257 symname, sympos, &addr);
258 if (ret)
259 return ret;
260
261 sym->st_value = addr;
262 }
263
264 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600265}
266
267static int klp_write_object_relocations(struct module *pmod,
268 struct klp_object *obj)
269{
Jessica Yu425595a2016-03-22 20:03:18 -0400270 int i, cnt, ret = 0;
271 const char *objname, *secname;
272 char sec_objname[MODULE_NAME_LEN];
273 Elf_Shdr *sec;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600274
275 if (WARN_ON(!klp_is_object_loaded(obj)))
276 return -EINVAL;
277
Jessica Yu425595a2016-03-22 20:03:18 -0400278 objname = klp_is_module(obj) ? obj->name : "vmlinux";
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600279
Jessica Yu425595a2016-03-22 20:03:18 -0400280 /* For each klp relocation section */
281 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
282 sec = pmod->klp_info->sechdrs + i;
283 secname = pmod->klp_info->secstrings + sec->sh_name;
284 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
285 continue;
Josh Poimboeufb56b36e2015-12-03 16:33:26 -0600286
Jessica Yu425595a2016-03-22 20:03:18 -0400287 /*
288 * Format: .klp.rela.sec_objname.section_name
289 * See comment in klp_resolve_symbols() for an explanation
290 * of the selected field width value.
291 */
292 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
293 if (cnt != 1) {
294 pr_err("section %s has an incorrectly formatted name",
295 secname);
296 ret = -EINVAL;
297 break;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600298 }
Jessica Yu425595a2016-03-22 20:03:18 -0400299
300 if (strcmp(objname, sec_objname))
301 continue;
302
303 ret = klp_resolve_symbols(sec, pmod);
304 if (ret)
305 break;
306
307 ret = apply_relocate_add(pmod->klp_info->sechdrs,
308 pmod->core_kallsyms.strtab,
309 pmod->klp_info->symndx, i, pmod);
310 if (ret)
311 break;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600312 }
313
Josh Poimboeufb56b36e2015-12-03 16:33:26 -0600314 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600315}
316
317static void notrace klp_ftrace_handler(unsigned long ip,
318 unsigned long parent_ip,
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600319 struct ftrace_ops *fops,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600320 struct pt_regs *regs)
321{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600322 struct klp_ops *ops;
323 struct klp_func *func;
324
325 ops = container_of(fops, struct klp_ops, fops);
326
327 rcu_read_lock();
328 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
329 stack_node);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600330 if (WARN_ON_ONCE(!func))
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100331 goto unlock;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600332
Li Binb5bfc512014-12-19 14:11:17 +0800333 klp_arch_set_pc(regs, (unsigned long)func->new_func);
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100334unlock:
335 rcu_read_unlock();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600336}
337
Michael Ellerman28e7cbd2016-03-24 22:04:02 +1100338/*
339 * Convert a function address into the appropriate ftrace location.
340 *
341 * Usually this is just the address of the function, but on some architectures
342 * it's more complicated so allow them to provide a custom behaviour.
343 */
344#ifndef klp_get_ftrace_location
345static unsigned long klp_get_ftrace_location(unsigned long faddr)
346{
347 return faddr;
348}
349#endif
350
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600351static void klp_unpatch_func(struct klp_func *func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600352{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600353 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600354
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600355 if (WARN_ON(!func->patched))
Minfei Huang225f58f2015-07-14 11:15:37 +0800356 return;
357 if (WARN_ON(!func->old_addr))
358 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600359
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600360 ops = klp_find_ops(func->old_addr);
361 if (WARN_ON(!ops))
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600362 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600363
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600364 if (list_is_singular(&ops->func_stack)) {
Michael Ellerman28e7cbd2016-03-24 22:04:02 +1100365 unsigned long ftrace_loc;
366
367 ftrace_loc = klp_get_ftrace_location(func->old_addr);
368 if (WARN_ON(!ftrace_loc))
369 return;
370
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600371 WARN_ON(unregister_ftrace_function(&ops->fops));
Michael Ellerman28e7cbd2016-03-24 22:04:02 +1100372 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600373
374 list_del_rcu(&func->stack_node);
375 list_del(&ops->node);
376 kfree(ops);
377 } else {
378 list_del_rcu(&func->stack_node);
379 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600380
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600381 func->patched = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600382}
383
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600384static int klp_patch_func(struct klp_func *func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600385{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600386 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600387 int ret;
388
389 if (WARN_ON(!func->old_addr))
390 return -EINVAL;
391
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600392 if (WARN_ON(func->patched))
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600393 return -EINVAL;
394
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600395 ops = klp_find_ops(func->old_addr);
396 if (!ops) {
Michael Ellerman28e7cbd2016-03-24 22:04:02 +1100397 unsigned long ftrace_loc;
398
399 ftrace_loc = klp_get_ftrace_location(func->old_addr);
400 if (!ftrace_loc) {
401 pr_err("failed to find location for function '%s'\n",
402 func->old_name);
403 return -EINVAL;
404 }
405
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600406 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
407 if (!ops)
408 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600409
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600410 ops->fops.func = klp_ftrace_handler;
411 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
412 FTRACE_OPS_FL_DYNAMIC |
413 FTRACE_OPS_FL_IPMODIFY;
414
415 list_add(&ops->node, &klp_ops);
416
417 INIT_LIST_HEAD(&ops->func_stack);
418 list_add_rcu(&func->stack_node, &ops->func_stack);
419
Michael Ellerman28e7cbd2016-03-24 22:04:02 +1100420 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600421 if (ret) {
422 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
423 func->old_name, ret);
424 goto err;
425 }
426
427 ret = register_ftrace_function(&ops->fops);
428 if (ret) {
429 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
430 func->old_name, ret);
Michael Ellerman28e7cbd2016-03-24 22:04:02 +1100431 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600432 goto err;
433 }
434
435
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600436 } else {
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600437 list_add_rcu(&func->stack_node, &ops->func_stack);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600438 }
439
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600440 func->patched = true;
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600441
Josh Poimboeufdbed7dd2015-01-20 16:07:55 -0600442 return 0;
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600443
444err:
445 list_del_rcu(&func->stack_node);
446 list_del(&ops->node);
447 kfree(ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600448 return ret;
449}
450
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600451static void klp_unpatch_object(struct klp_object *obj)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600452{
453 struct klp_func *func;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600454
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200455 klp_for_each_func(obj, func)
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600456 if (func->patched)
457 klp_unpatch_func(func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600458
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600459 obj->patched = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600460}
461
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600462static int klp_patch_object(struct klp_object *obj)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600463{
464 struct klp_func *func;
465 int ret;
466
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600467 if (WARN_ON(obj->patched))
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600468 return -EINVAL;
469
470 if (WARN_ON(!klp_is_object_loaded(obj)))
471 return -EINVAL;
472
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200473 klp_for_each_func(obj, func) {
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600474 ret = klp_patch_func(func);
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600475 if (ret) {
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600476 klp_unpatch_object(obj);
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600477 return ret;
478 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600479 }
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600480 obj->patched = true;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600481
482 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600483}
484
485static int __klp_disable_patch(struct klp_patch *patch)
486{
487 struct klp_object *obj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600488
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600489 /* enforce stacking: only the last enabled patch can be disabled */
490 if (!list_is_last(&patch->list, &klp_patches) &&
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600491 list_next_entry(patch, list)->enabled)
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600492 return -EBUSY;
493
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600494 pr_notice("disabling patch '%s'\n", patch->mod->name);
495
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200496 klp_for_each_object(patch, obj) {
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600497 if (obj->patched)
498 klp_unpatch_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600499 }
500
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600501 patch->enabled = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600502
503 return 0;
504}
505
506/**
507 * klp_disable_patch() - disables a registered patch
508 * @patch: The registered, enabled patch to be disabled
509 *
510 * Unregisters the patched functions from ftrace.
511 *
512 * Return: 0 on success, otherwise error
513 */
514int klp_disable_patch(struct klp_patch *patch)
515{
516 int ret;
517
518 mutex_lock(&klp_mutex);
519
520 if (!klp_is_patch_registered(patch)) {
521 ret = -EINVAL;
522 goto err;
523 }
524
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600525 if (!patch->enabled) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600526 ret = -EINVAL;
527 goto err;
528 }
529
530 ret = __klp_disable_patch(patch);
531
532err:
533 mutex_unlock(&klp_mutex);
534 return ret;
535}
536EXPORT_SYMBOL_GPL(klp_disable_patch);
537
538static int __klp_enable_patch(struct klp_patch *patch)
539{
540 struct klp_object *obj;
541 int ret;
542
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600543 if (WARN_ON(patch->enabled))
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600544 return -EINVAL;
545
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600546 /* enforce stacking: only the first disabled patch can be enabled */
547 if (patch->list.prev != &klp_patches &&
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600548 !list_prev_entry(patch, list)->enabled)
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600549 return -EBUSY;
550
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600551 pr_notice("enabling patch '%s'\n", patch->mod->name);
552
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200553 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600554 if (!klp_is_object_loaded(obj))
555 continue;
556
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600557 ret = klp_patch_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600558 if (ret)
559 goto unregister;
560 }
561
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600562 patch->enabled = true;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600563
564 return 0;
565
566unregister:
567 WARN_ON(__klp_disable_patch(patch));
568 return ret;
569}
570
571/**
572 * klp_enable_patch() - enables a registered patch
573 * @patch: The registered, disabled patch to be enabled
574 *
575 * Performs the needed symbol lookups and code relocations,
576 * then registers the patched functions with ftrace.
577 *
578 * Return: 0 on success, otherwise error
579 */
580int klp_enable_patch(struct klp_patch *patch)
581{
582 int ret;
583
584 mutex_lock(&klp_mutex);
585
586 if (!klp_is_patch_registered(patch)) {
587 ret = -EINVAL;
588 goto err;
589 }
590
591 ret = __klp_enable_patch(patch);
592
593err:
594 mutex_unlock(&klp_mutex);
595 return ret;
596}
597EXPORT_SYMBOL_GPL(klp_enable_patch);
598
599/*
600 * Sysfs Interface
601 *
602 * /sys/kernel/livepatch
603 * /sys/kernel/livepatch/<patch>
604 * /sys/kernel/livepatch/<patch>/enabled
605 * /sys/kernel/livepatch/<patch>/<object>
Chris J Arges444f9e92015-12-01 20:40:56 -0600606 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600607 */
608
609static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
610 const char *buf, size_t count)
611{
612 struct klp_patch *patch;
613 int ret;
614 unsigned long val;
615
616 ret = kstrtoul(buf, 10, &val);
617 if (ret)
618 return -EINVAL;
619
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600620 if (val > 1)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600621 return -EINVAL;
622
623 patch = container_of(kobj, struct klp_patch, kobj);
624
625 mutex_lock(&klp_mutex);
626
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600627 if (patch->enabled == val) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600628 /* already in requested state */
629 ret = -EINVAL;
630 goto err;
631 }
632
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600633 if (val) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600634 ret = __klp_enable_patch(patch);
635 if (ret)
636 goto err;
637 } else {
638 ret = __klp_disable_patch(patch);
639 if (ret)
640 goto err;
641 }
642
643 mutex_unlock(&klp_mutex);
644
645 return count;
646
647err:
648 mutex_unlock(&klp_mutex);
649 return ret;
650}
651
652static ssize_t enabled_show(struct kobject *kobj,
653 struct kobj_attribute *attr, char *buf)
654{
655 struct klp_patch *patch;
656
657 patch = container_of(kobj, struct klp_patch, kobj);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600658 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600659}
660
661static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
662static struct attribute *klp_patch_attrs[] = {
663 &enabled_kobj_attr.attr,
664 NULL
665};
666
667static void klp_kobj_release_patch(struct kobject *kobj)
668{
669 /*
670 * Once we have a consistency model we'll need to module_put() the
671 * patch module here. See klp_register_patch() for more details.
672 */
673}
674
675static struct kobj_type klp_ktype_patch = {
676 .release = klp_kobj_release_patch,
677 .sysfs_ops = &kobj_sysfs_ops,
678 .default_attrs = klp_patch_attrs,
679};
680
Miroslav Benescad706d2015-05-19 12:01:18 +0200681static void klp_kobj_release_object(struct kobject *kobj)
682{
683}
684
685static struct kobj_type klp_ktype_object = {
686 .release = klp_kobj_release_object,
687 .sysfs_ops = &kobj_sysfs_ops,
688};
689
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600690static void klp_kobj_release_func(struct kobject *kobj)
691{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600692}
693
694static struct kobj_type klp_ktype_func = {
695 .release = klp_kobj_release_func,
696 .sysfs_ops = &kobj_sysfs_ops,
697};
698
699/*
700 * Free all functions' kobjects in the array up to some limit. When limit is
701 * NULL, all kobjects are freed.
702 */
703static void klp_free_funcs_limited(struct klp_object *obj,
704 struct klp_func *limit)
705{
706 struct klp_func *func;
707
708 for (func = obj->funcs; func->old_name && func != limit; func++)
709 kobject_put(&func->kobj);
710}
711
712/* Clean up when a patched object is unloaded */
713static void klp_free_object_loaded(struct klp_object *obj)
714{
715 struct klp_func *func;
716
717 obj->mod = NULL;
718
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200719 klp_for_each_func(obj, func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600720 func->old_addr = 0;
721}
722
723/*
724 * Free all objects' kobjects in the array up to some limit. When limit is
725 * NULL, all kobjects are freed.
726 */
727static void klp_free_objects_limited(struct klp_patch *patch,
728 struct klp_object *limit)
729{
730 struct klp_object *obj;
731
732 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
733 klp_free_funcs_limited(obj, NULL);
Miroslav Benescad706d2015-05-19 12:01:18 +0200734 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600735 }
736}
737
738static void klp_free_patch(struct klp_patch *patch)
739{
740 klp_free_objects_limited(patch, NULL);
741 if (!list_empty(&patch->list))
742 list_del(&patch->list);
743 kobject_put(&patch->kobj);
744}
745
746static int klp_init_func(struct klp_object *obj, struct klp_func *func)
747{
Miroslav Benesf09d9082016-04-28 16:34:08 +0200748 if (!func->old_name || !func->new_func)
749 return -EINVAL;
750
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600751 INIT_LIST_HEAD(&func->stack_node);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600752 func->patched = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600753
Chris J Arges444f9e92015-12-01 20:40:56 -0600754 /* The format for the sysfs directory is <function,sympos> where sympos
755 * is the nth occurrence of this symbol in kallsyms for the patched
756 * object. If the user selects 0 for old_sympos, then 1 will be used
757 * since a unique symbol will be the first occurrence.
758 */
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600759 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
Chris J Arges444f9e92015-12-01 20:40:56 -0600760 &obj->kobj, "%s,%lu", func->old_name,
761 func->old_sympos ? func->old_sympos : 1);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600762}
763
Jessica Yu255e7322016-08-17 20:58:28 -0400764/* Arches may override this to finish any remaining arch-specific tasks */
765void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
766 struct klp_object *obj)
767{
768}
769
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600770/* parts of the initialization that is done only when the object is loaded */
771static int klp_init_object_loaded(struct klp_patch *patch,
772 struct klp_object *obj)
773{
774 struct klp_func *func;
775 int ret;
776
Jessica Yu255e7322016-08-17 20:58:28 -0400777 module_disable_ro(patch->mod);
Jessica Yu425595a2016-03-22 20:03:18 -0400778 ret = klp_write_object_relocations(patch->mod, obj);
Jessica Yu255e7322016-08-17 20:58:28 -0400779 if (ret) {
780 module_enable_ro(patch->mod, true);
Jessica Yu425595a2016-03-22 20:03:18 -0400781 return ret;
Jessica Yu255e7322016-08-17 20:58:28 -0400782 }
783
784 arch_klp_init_object_loaded(patch, obj);
785 module_enable_ro(patch->mod, true);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600786
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200787 klp_for_each_func(obj, func) {
Chris J Argesb2b018e2015-12-01 20:40:54 -0600788 ret = klp_find_object_symbol(obj->name, func->old_name,
789 func->old_sympos,
790 &func->old_addr);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600791 if (ret)
792 return ret;
793 }
794
795 return 0;
796}
797
798static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
799{
800 struct klp_func *func;
801 int ret;
802 const char *name;
803
804 if (!obj->funcs)
805 return -EINVAL;
806
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600807 obj->patched = false;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100808 obj->mod = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600809
810 klp_find_object_module(obj);
811
812 name = klp_is_module(obj) ? obj->name : "vmlinux";
Miroslav Benescad706d2015-05-19 12:01:18 +0200813 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
814 &patch->kobj, "%s", name);
815 if (ret)
816 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600817
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200818 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600819 ret = klp_init_func(obj, func);
820 if (ret)
821 goto free;
822 }
823
824 if (klp_is_object_loaded(obj)) {
825 ret = klp_init_object_loaded(patch, obj);
826 if (ret)
827 goto free;
828 }
829
830 return 0;
831
832free:
833 klp_free_funcs_limited(obj, func);
Miroslav Benescad706d2015-05-19 12:01:18 +0200834 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600835 return ret;
836}
837
838static int klp_init_patch(struct klp_patch *patch)
839{
840 struct klp_object *obj;
841 int ret;
842
843 if (!patch->objs)
844 return -EINVAL;
845
846 mutex_lock(&klp_mutex);
847
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600848 patch->enabled = false;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600849
850 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100851 klp_root_kobj, "%s", patch->mod->name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600852 if (ret)
853 goto unlock;
854
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200855 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600856 ret = klp_init_object(patch, obj);
857 if (ret)
858 goto free;
859 }
860
Josh Poimboeuf99590ba2015-01-09 14:03:04 -0600861 list_add_tail(&patch->list, &klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600862
863 mutex_unlock(&klp_mutex);
864
865 return 0;
866
867free:
868 klp_free_objects_limited(patch, obj);
869 kobject_put(&patch->kobj);
870unlock:
871 mutex_unlock(&klp_mutex);
872 return ret;
873}
874
875/**
876 * klp_unregister_patch() - unregisters a patch
877 * @patch: Disabled patch to be unregistered
878 *
879 * Frees the data structures and removes the sysfs interface.
880 *
881 * Return: 0 on success, otherwise error
882 */
883int klp_unregister_patch(struct klp_patch *patch)
884{
885 int ret = 0;
886
887 mutex_lock(&klp_mutex);
888
889 if (!klp_is_patch_registered(patch)) {
890 ret = -EINVAL;
891 goto out;
892 }
893
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600894 if (patch->enabled) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600895 ret = -EBUSY;
896 goto out;
897 }
898
899 klp_free_patch(patch);
900
901out:
902 mutex_unlock(&klp_mutex);
903 return ret;
904}
905EXPORT_SYMBOL_GPL(klp_unregister_patch);
906
907/**
908 * klp_register_patch() - registers a patch
909 * @patch: Patch to be registered
910 *
911 * Initializes the data structure associated with the patch and
912 * creates the sysfs interface.
913 *
914 * Return: 0 on success, otherwise error
915 */
916int klp_register_patch(struct klp_patch *patch)
917{
918 int ret;
919
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600920 if (!patch || !patch->mod)
921 return -EINVAL;
922
Jessica Yu425595a2016-03-22 20:03:18 -0400923 if (!is_livepatch_module(patch->mod)) {
924 pr_err("module %s is not marked as a livepatch module",
925 patch->mod->name);
926 return -EINVAL;
927 }
928
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600929 if (!klp_initialized())
930 return -ENODEV;
931
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600932 /*
933 * A reference is taken on the patch module to prevent it from being
934 * unloaded. Right now, we don't allow patch modules to unload since
935 * there is currently no method to determine if a thread is still
936 * running in the patched code contained in the patch module once
937 * the ftrace registration is successful.
938 */
939 if (!try_module_get(patch->mod))
940 return -ENODEV;
941
942 ret = klp_init_patch(patch);
943 if (ret)
944 module_put(patch->mod);
945
946 return ret;
947}
948EXPORT_SYMBOL_GPL(klp_register_patch);
949
Jessica Yu7e545d62016-03-16 20:55:39 -0400950int klp_module_coming(struct module *mod)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600951{
Minfei Huang36e505c2015-05-15 10:22:48 +0800952 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600953 struct klp_patch *patch;
954 struct klp_object *obj;
955
Jessica Yu7e545d62016-03-16 20:55:39 -0400956 if (WARN_ON(mod->state != MODULE_STATE_COMING))
957 return -EINVAL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600958
959 mutex_lock(&klp_mutex);
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100960 /*
Jessica Yu7e545d62016-03-16 20:55:39 -0400961 * Each module has to know that klp_module_coming()
962 * has been called. We never know what module will
963 * get patched by a new patch.
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100964 */
Jessica Yu7e545d62016-03-16 20:55:39 -0400965 mod->klp_alive = true;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100966
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600967 list_for_each_entry(patch, &klp_patches, list) {
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200968 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600969 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
970 continue;
971
Jessica Yu7e545d62016-03-16 20:55:39 -0400972 obj->mod = mod;
973
974 ret = klp_init_object_loaded(patch, obj);
975 if (ret) {
976 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
977 patch->mod->name, obj->mod->name, ret);
978 goto err;
979 }
980
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600981 if (!patch->enabled)
Jessica Yu7e545d62016-03-16 20:55:39 -0400982 break;
983
984 pr_notice("applying patch '%s' to loading module '%s'\n",
985 patch->mod->name, obj->mod->name);
986
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -0600987 ret = klp_patch_object(obj);
Jessica Yu7e545d62016-03-16 20:55:39 -0400988 if (ret) {
989 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
990 patch->mod->name, obj->mod->name, ret);
991 goto err;
992 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600993
994 break;
995 }
996 }
997
998 mutex_unlock(&klp_mutex);
999
1000 return 0;
Jessica Yu7e545d62016-03-16 20:55:39 -04001001
1002err:
1003 /*
1004 * If a patch is unsuccessfully applied, return
1005 * error to the module loader.
1006 */
1007 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1008 patch->mod->name, obj->mod->name, obj->mod->name);
1009 mod->klp_alive = false;
1010 klp_free_object_loaded(obj);
1011 mutex_unlock(&klp_mutex);
1012
1013 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001014}
1015
Jessica Yu7e545d62016-03-16 20:55:39 -04001016void klp_module_going(struct module *mod)
1017{
1018 struct klp_patch *patch;
1019 struct klp_object *obj;
1020
1021 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1022 mod->state != MODULE_STATE_COMING))
1023 return;
1024
1025 mutex_lock(&klp_mutex);
1026 /*
1027 * Each module has to know that klp_module_going()
1028 * has been called. We never know what module will
1029 * get patched by a new patch.
1030 */
1031 mod->klp_alive = false;
1032
1033 list_for_each_entry(patch, &klp_patches, list) {
1034 klp_for_each_object(patch, obj) {
1035 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1036 continue;
1037
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -06001038 if (patch->enabled) {
Jessica Yu7e545d62016-03-16 20:55:39 -04001039 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1040 patch->mod->name, obj->mod->name);
Josh Poimboeuf0dade9f2017-02-13 19:42:35 -06001041 klp_unpatch_object(obj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001042 }
1043
1044 klp_free_object_loaded(obj);
1045 break;
1046 }
1047 }
1048
1049 mutex_unlock(&klp_mutex);
1050}
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001051
Minfei Huang26029d82015-05-22 22:26:29 +08001052static int __init klp_init(void)
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001053{
1054 int ret;
1055
Jiri Kosinab9dfe0b2015-01-09 10:53:21 +01001056 ret = klp_check_compiler_support();
1057 if (ret) {
1058 pr_info("Your compiler is too old; turning off.\n");
1059 return -EINVAL;
1060 }
1061
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001062 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
Jessica Yu7e545d62016-03-16 20:55:39 -04001063 if (!klp_root_kobj)
1064 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001065
1066 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001067}
1068
1069module_init(klp_init);