blob: e416f96e938dfe0469ec87f8c34d5ddf63cf45b3 [file] [log] [blame]
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h>
29#include <linux/kallsyms.h>
30#include <linux/livepatch.h>
31
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060032/**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 *
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
Seth Jenningsb700e7f2014-12-16 11:58:19 -060044 */
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060045struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49};
Seth Jenningsb700e7f2014-12-16 11:58:19 -060050
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060051/*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
Seth Jenningsb700e7f2014-12-16 11:58:19 -060057static DEFINE_MUTEX(klp_mutex);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060058
Seth Jenningsb700e7f2014-12-16 11:58:19 -060059static LIST_HEAD(klp_patches);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060060static LIST_HEAD(klp_ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -060061
62static struct kobject *klp_root_kobj;
63
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060064static struct klp_ops *klp_find_ops(unsigned long old_addr)
65{
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77}
78
Seth Jenningsb700e7f2014-12-16 11:58:19 -060079static bool klp_is_module(struct klp_object *obj)
80{
81 return obj->name;
82}
83
84static bool klp_is_object_loaded(struct klp_object *obj)
85{
86 return !obj->name || obj->mod;
87}
88
89/* sets obj->mod if object is not vmlinux and module is found */
90static void klp_find_object_module(struct klp_object *obj)
91{
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010092 struct module *mod;
93
Seth Jenningsb700e7f2014-12-16 11:58:19 -060094 if (!klp_is_module(obj))
95 return;
96
97 mutex_lock(&module_mutex);
98 /*
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010099 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600102 */
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
110 */
111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600114 mutex_unlock(&module_mutex);
115}
116
117/* klp_mutex must be held by caller */
118static bool klp_is_patch_registered(struct klp_patch *patch)
119{
120 struct klp_patch *mypatch;
121
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
124 return true;
125
126 return false;
127}
128
129static bool klp_initialized(void)
130{
Nicholas Mc Guiree76ff062015-05-11 07:52:29 +0200131 return !!klp_root_kobj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600132}
133
134struct klp_find_arg {
135 const char *objname;
136 const char *name;
137 unsigned long addr;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600138 unsigned long count;
Chris J Argesb2b018e2015-12-01 20:40:54 -0600139 unsigned long pos;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600140};
141
142static int klp_find_callback(void *data, const char *name,
143 struct module *mod, unsigned long addr)
144{
145 struct klp_find_arg *args = data;
146
147 if ((mod && !args->objname) || (!mod && args->objname))
148 return 0;
149
150 if (strcmp(args->name, name))
151 return 0;
152
153 if (args->objname && strcmp(args->objname, mod->name))
154 return 0;
155
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600156 args->addr = addr;
157 args->count++;
158
Chris J Argesb2b018e2015-12-01 20:40:54 -0600159 /*
160 * Finish the search when the symbol is found for the desired position
161 * or the position is not defined for a non-unique symbol.
162 */
163 if ((args->pos && (args->count == args->pos)) ||
164 (!args->pos && (args->count > 1)))
165 return 1;
166
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600167 return 0;
168}
169
170static int klp_find_object_symbol(const char *objname, const char *name,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600171 unsigned long sympos, unsigned long *addr)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600172{
173 struct klp_find_arg args = {
174 .objname = objname,
175 .name = name,
176 .addr = 0,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600177 .count = 0,
178 .pos = sympos,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600179 };
180
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200181 mutex_lock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600182 kallsyms_on_each_symbol(klp_find_callback, &args);
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200183 mutex_unlock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600184
Chris J Argesb2b018e2015-12-01 20:40:54 -0600185 /*
186 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
187 * otherwise ensure the symbol position count matches sympos.
188 */
189 if (args.addr == 0)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600190 pr_err("symbol '%s' not found in symbol table\n", name);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600191 else if (args.count > 1 && sympos == 0) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600192 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
193 args.count, name, objname);
Chris J Argesb2b018e2015-12-01 20:40:54 -0600194 } else if (sympos != args.count && sympos > 0) {
195 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
196 sympos, name, objname ? objname : "vmlinux");
197 } else {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600198 *addr = args.addr;
199 return 0;
200 }
201
202 *addr = 0;
203 return -EINVAL;
204}
205
206struct klp_verify_args {
207 const char *name;
208 const unsigned long addr;
209};
210
211static int klp_verify_callback(void *data, const char *name,
212 struct module *mod, unsigned long addr)
213{
214 struct klp_verify_args *args = data;
215
216 if (!mod &&
217 !strcmp(args->name, name) &&
218 args->addr == addr)
219 return 1;
220
221 return 0;
222}
223
224static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
225{
226 struct klp_verify_args args = {
227 .name = name,
228 .addr = addr,
229 };
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200230 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600231
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200232 mutex_lock(&module_mutex);
233 ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
234 mutex_unlock(&module_mutex);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600235
Miroslav Benes9a1bd632015-06-01 17:48:37 +0200236 if (!ret) {
237 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
238 name, addr);
239 return -EINVAL;
240 }
241
242 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600243}
244
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600245/*
246 * external symbols are located outside the parent object (where the parent
247 * object is either vmlinux or the kmod being patched).
248 */
249static int klp_find_external_symbol(struct module *pmod, const char *name,
250 unsigned long *addr)
251{
252 const struct kernel_symbol *sym;
253
254 /* first, check if it's an exported symbol */
255 preempt_disable();
256 sym = find_symbol(name, NULL, NULL, true, true);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600257 if (sym) {
258 *addr = sym->value;
Peter Zijlstrac064a0d2015-02-28 22:24:48 +0100259 preempt_enable();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600260 return 0;
261 }
Peter Zijlstrac064a0d2015-02-28 22:24:48 +0100262 preempt_enable();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600263
Chris J Argesb2b018e2015-12-01 20:40:54 -0600264 /*
265 * Check if it's in another .o within the patch module. This also
266 * checks that the external symbol is unique.
267 */
268 return klp_find_object_symbol(pmod->name, name, 0, addr);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600269}
270
271static int klp_write_object_relocations(struct module *pmod,
272 struct klp_object *obj)
273{
274 int ret;
275 struct klp_reloc *reloc;
276
277 if (WARN_ON(!klp_is_object_loaded(obj)))
278 return -EINVAL;
279
280 if (WARN_ON(!obj->relocs))
281 return -EINVAL;
282
283 for (reloc = obj->relocs; reloc->name; reloc++) {
284 if (!klp_is_module(obj)) {
Zhou Chengminge41b1042015-11-06 14:25:00 +0800285
286#if defined(CONFIG_RANDOMIZE_BASE)
287 /* If KASLR has been enabled, adjust old value accordingly */
288 if (kaslr_enabled())
289 reloc->val += kaslr_offset();
290#endif
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600291 ret = klp_verify_vmlinux_symbol(reloc->name,
292 reloc->val);
293 if (ret)
294 return ret;
295 } else {
296 /* module, reloc->val needs to be discovered */
297 if (reloc->external)
298 ret = klp_find_external_symbol(pmod,
299 reloc->name,
300 &reloc->val);
301 else
302 ret = klp_find_object_symbol(obj->mod->name,
303 reloc->name,
Chris J Argesb2b018e2015-12-01 20:40:54 -0600304 0, &reloc->val);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600305 if (ret)
306 return ret;
307 }
308 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
309 reloc->val + reloc->addend);
310 if (ret) {
311 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
312 reloc->name, reloc->val, ret);
313 return ret;
314 }
315 }
316
317 return 0;
318}
319
320static void notrace klp_ftrace_handler(unsigned long ip,
321 unsigned long parent_ip,
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600322 struct ftrace_ops *fops,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600323 struct pt_regs *regs)
324{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600325 struct klp_ops *ops;
326 struct klp_func *func;
327
328 ops = container_of(fops, struct klp_ops, fops);
329
330 rcu_read_lock();
331 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
332 stack_node);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600333 if (WARN_ON_ONCE(!func))
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100334 goto unlock;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600335
Li Binb5bfc512014-12-19 14:11:17 +0800336 klp_arch_set_pc(regs, (unsigned long)func->new_func);
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100337unlock:
338 rcu_read_unlock();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600339}
340
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600341static void klp_disable_func(struct klp_func *func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600342{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600343 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600344
Minfei Huang225f58f2015-07-14 11:15:37 +0800345 if (WARN_ON(func->state != KLP_ENABLED))
346 return;
347 if (WARN_ON(!func->old_addr))
348 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600349
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600350 ops = klp_find_ops(func->old_addr);
351 if (WARN_ON(!ops))
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600352 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600353
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600354 if (list_is_singular(&ops->func_stack)) {
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600355 WARN_ON(unregister_ftrace_function(&ops->fops));
356 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600357
358 list_del_rcu(&func->stack_node);
359 list_del(&ops->node);
360 kfree(ops);
361 } else {
362 list_del_rcu(&func->stack_node);
363 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600364
365 func->state = KLP_DISABLED;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600366}
367
368static int klp_enable_func(struct klp_func *func)
369{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600370 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600371 int ret;
372
373 if (WARN_ON(!func->old_addr))
374 return -EINVAL;
375
376 if (WARN_ON(func->state != KLP_DISABLED))
377 return -EINVAL;
378
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600379 ops = klp_find_ops(func->old_addr);
380 if (!ops) {
381 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
382 if (!ops)
383 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600384
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600385 ops->fops.func = klp_ftrace_handler;
386 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
387 FTRACE_OPS_FL_DYNAMIC |
388 FTRACE_OPS_FL_IPMODIFY;
389
390 list_add(&ops->node, &klp_ops);
391
392 INIT_LIST_HEAD(&ops->func_stack);
393 list_add_rcu(&func->stack_node, &ops->func_stack);
394
395 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
396 if (ret) {
397 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
398 func->old_name, ret);
399 goto err;
400 }
401
402 ret = register_ftrace_function(&ops->fops);
403 if (ret) {
404 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
405 func->old_name, ret);
406 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
407 goto err;
408 }
409
410
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600411 } else {
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600412 list_add_rcu(&func->stack_node, &ops->func_stack);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600413 }
414
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600415 func->state = KLP_ENABLED;
416
Josh Poimboeufdbed7dd2015-01-20 16:07:55 -0600417 return 0;
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600418
419err:
420 list_del_rcu(&func->stack_node);
421 list_del(&ops->node);
422 kfree(ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600423 return ret;
424}
425
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600426static void klp_disable_object(struct klp_object *obj)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600427{
428 struct klp_func *func;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600429
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200430 klp_for_each_func(obj, func)
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600431 if (func->state == KLP_ENABLED)
432 klp_disable_func(func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600433
434 obj->state = KLP_DISABLED;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600435}
436
437static int klp_enable_object(struct klp_object *obj)
438{
439 struct klp_func *func;
440 int ret;
441
442 if (WARN_ON(obj->state != KLP_DISABLED))
443 return -EINVAL;
444
445 if (WARN_ON(!klp_is_object_loaded(obj)))
446 return -EINVAL;
447
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200448 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600449 ret = klp_enable_func(func);
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600450 if (ret) {
451 klp_disable_object(obj);
452 return ret;
453 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600454 }
455 obj->state = KLP_ENABLED;
456
457 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600458}
459
460static int __klp_disable_patch(struct klp_patch *patch)
461{
462 struct klp_object *obj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600463
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600464 /* enforce stacking: only the last enabled patch can be disabled */
465 if (!list_is_last(&patch->list, &klp_patches) &&
466 list_next_entry(patch, list)->state == KLP_ENABLED)
467 return -EBUSY;
468
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600469 pr_notice("disabling patch '%s'\n", patch->mod->name);
470
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200471 klp_for_each_object(patch, obj) {
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600472 if (obj->state == KLP_ENABLED)
473 klp_disable_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600474 }
475
476 patch->state = KLP_DISABLED;
477
478 return 0;
479}
480
481/**
482 * klp_disable_patch() - disables a registered patch
483 * @patch: The registered, enabled patch to be disabled
484 *
485 * Unregisters the patched functions from ftrace.
486 *
487 * Return: 0 on success, otherwise error
488 */
489int klp_disable_patch(struct klp_patch *patch)
490{
491 int ret;
492
493 mutex_lock(&klp_mutex);
494
495 if (!klp_is_patch_registered(patch)) {
496 ret = -EINVAL;
497 goto err;
498 }
499
500 if (patch->state == KLP_DISABLED) {
501 ret = -EINVAL;
502 goto err;
503 }
504
505 ret = __klp_disable_patch(patch);
506
507err:
508 mutex_unlock(&klp_mutex);
509 return ret;
510}
511EXPORT_SYMBOL_GPL(klp_disable_patch);
512
513static int __klp_enable_patch(struct klp_patch *patch)
514{
515 struct klp_object *obj;
516 int ret;
517
518 if (WARN_ON(patch->state != KLP_DISABLED))
519 return -EINVAL;
520
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600521 /* enforce stacking: only the first disabled patch can be enabled */
522 if (patch->list.prev != &klp_patches &&
523 list_prev_entry(patch, list)->state == KLP_DISABLED)
524 return -EBUSY;
525
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600526 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
527 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
528
529 pr_notice("enabling patch '%s'\n", patch->mod->name);
530
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200531 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600532 if (!klp_is_object_loaded(obj))
533 continue;
534
535 ret = klp_enable_object(obj);
536 if (ret)
537 goto unregister;
538 }
539
540 patch->state = KLP_ENABLED;
541
542 return 0;
543
544unregister:
545 WARN_ON(__klp_disable_patch(patch));
546 return ret;
547}
548
549/**
550 * klp_enable_patch() - enables a registered patch
551 * @patch: The registered, disabled patch to be enabled
552 *
553 * Performs the needed symbol lookups and code relocations,
554 * then registers the patched functions with ftrace.
555 *
556 * Return: 0 on success, otherwise error
557 */
558int klp_enable_patch(struct klp_patch *patch)
559{
560 int ret;
561
562 mutex_lock(&klp_mutex);
563
564 if (!klp_is_patch_registered(patch)) {
565 ret = -EINVAL;
566 goto err;
567 }
568
569 ret = __klp_enable_patch(patch);
570
571err:
572 mutex_unlock(&klp_mutex);
573 return ret;
574}
575EXPORT_SYMBOL_GPL(klp_enable_patch);
576
577/*
578 * Sysfs Interface
579 *
580 * /sys/kernel/livepatch
581 * /sys/kernel/livepatch/<patch>
582 * /sys/kernel/livepatch/<patch>/enabled
583 * /sys/kernel/livepatch/<patch>/<object>
584 * /sys/kernel/livepatch/<patch>/<object>/<func>
585 */
586
587static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
588 const char *buf, size_t count)
589{
590 struct klp_patch *patch;
591 int ret;
592 unsigned long val;
593
594 ret = kstrtoul(buf, 10, &val);
595 if (ret)
596 return -EINVAL;
597
598 if (val != KLP_DISABLED && val != KLP_ENABLED)
599 return -EINVAL;
600
601 patch = container_of(kobj, struct klp_patch, kobj);
602
603 mutex_lock(&klp_mutex);
604
605 if (val == patch->state) {
606 /* already in requested state */
607 ret = -EINVAL;
608 goto err;
609 }
610
611 if (val == KLP_ENABLED) {
612 ret = __klp_enable_patch(patch);
613 if (ret)
614 goto err;
615 } else {
616 ret = __klp_disable_patch(patch);
617 if (ret)
618 goto err;
619 }
620
621 mutex_unlock(&klp_mutex);
622
623 return count;
624
625err:
626 mutex_unlock(&klp_mutex);
627 return ret;
628}
629
630static ssize_t enabled_show(struct kobject *kobj,
631 struct kobj_attribute *attr, char *buf)
632{
633 struct klp_patch *patch;
634
635 patch = container_of(kobj, struct klp_patch, kobj);
636 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
637}
638
639static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
640static struct attribute *klp_patch_attrs[] = {
641 &enabled_kobj_attr.attr,
642 NULL
643};
644
645static void klp_kobj_release_patch(struct kobject *kobj)
646{
647 /*
648 * Once we have a consistency model we'll need to module_put() the
649 * patch module here. See klp_register_patch() for more details.
650 */
651}
652
653static struct kobj_type klp_ktype_patch = {
654 .release = klp_kobj_release_patch,
655 .sysfs_ops = &kobj_sysfs_ops,
656 .default_attrs = klp_patch_attrs,
657};
658
Miroslav Benescad706d2015-05-19 12:01:18 +0200659static void klp_kobj_release_object(struct kobject *kobj)
660{
661}
662
663static struct kobj_type klp_ktype_object = {
664 .release = klp_kobj_release_object,
665 .sysfs_ops = &kobj_sysfs_ops,
666};
667
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600668static void klp_kobj_release_func(struct kobject *kobj)
669{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600670}
671
672static struct kobj_type klp_ktype_func = {
673 .release = klp_kobj_release_func,
674 .sysfs_ops = &kobj_sysfs_ops,
675};
676
677/*
678 * Free all functions' kobjects in the array up to some limit. When limit is
679 * NULL, all kobjects are freed.
680 */
681static void klp_free_funcs_limited(struct klp_object *obj,
682 struct klp_func *limit)
683{
684 struct klp_func *func;
685
686 for (func = obj->funcs; func->old_name && func != limit; func++)
687 kobject_put(&func->kobj);
688}
689
690/* Clean up when a patched object is unloaded */
691static void klp_free_object_loaded(struct klp_object *obj)
692{
693 struct klp_func *func;
694
695 obj->mod = NULL;
696
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200697 klp_for_each_func(obj, func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600698 func->old_addr = 0;
699}
700
701/*
702 * Free all objects' kobjects in the array up to some limit. When limit is
703 * NULL, all kobjects are freed.
704 */
705static void klp_free_objects_limited(struct klp_patch *patch,
706 struct klp_object *limit)
707{
708 struct klp_object *obj;
709
710 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
711 klp_free_funcs_limited(obj, NULL);
Miroslav Benescad706d2015-05-19 12:01:18 +0200712 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600713 }
714}
715
716static void klp_free_patch(struct klp_patch *patch)
717{
718 klp_free_objects_limited(patch, NULL);
719 if (!list_empty(&patch->list))
720 list_del(&patch->list);
721 kobject_put(&patch->kobj);
722}
723
724static int klp_init_func(struct klp_object *obj, struct klp_func *func)
725{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600726 INIT_LIST_HEAD(&func->stack_node);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600727 func->state = KLP_DISABLED;
728
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600729 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
Miroslav Benescad706d2015-05-19 12:01:18 +0200730 &obj->kobj, "%s", func->old_name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600731}
732
733/* parts of the initialization that is done only when the object is loaded */
734static int klp_init_object_loaded(struct klp_patch *patch,
735 struct klp_object *obj)
736{
737 struct klp_func *func;
738 int ret;
739
740 if (obj->relocs) {
741 ret = klp_write_object_relocations(patch->mod, obj);
742 if (ret)
743 return ret;
744 }
745
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200746 klp_for_each_func(obj, func) {
Chris J Argesb2b018e2015-12-01 20:40:54 -0600747 ret = klp_find_object_symbol(obj->name, func->old_name,
748 func->old_sympos,
749 &func->old_addr);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600750 if (ret)
751 return ret;
752 }
753
754 return 0;
755}
756
757static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
758{
759 struct klp_func *func;
760 int ret;
761 const char *name;
762
763 if (!obj->funcs)
764 return -EINVAL;
765
766 obj->state = KLP_DISABLED;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100767 obj->mod = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600768
769 klp_find_object_module(obj);
770
771 name = klp_is_module(obj) ? obj->name : "vmlinux";
Miroslav Benescad706d2015-05-19 12:01:18 +0200772 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
773 &patch->kobj, "%s", name);
774 if (ret)
775 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600776
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200777 klp_for_each_func(obj, func) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600778 ret = klp_init_func(obj, func);
779 if (ret)
780 goto free;
781 }
782
783 if (klp_is_object_loaded(obj)) {
784 ret = klp_init_object_loaded(patch, obj);
785 if (ret)
786 goto free;
787 }
788
789 return 0;
790
791free:
792 klp_free_funcs_limited(obj, func);
Miroslav Benescad706d2015-05-19 12:01:18 +0200793 kobject_put(&obj->kobj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600794 return ret;
795}
796
797static int klp_init_patch(struct klp_patch *patch)
798{
799 struct klp_object *obj;
800 int ret;
801
802 if (!patch->objs)
803 return -EINVAL;
804
805 mutex_lock(&klp_mutex);
806
807 patch->state = KLP_DISABLED;
808
809 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100810 klp_root_kobj, "%s", patch->mod->name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600811 if (ret)
812 goto unlock;
813
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200814 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600815 ret = klp_init_object(patch, obj);
816 if (ret)
817 goto free;
818 }
819
Josh Poimboeuf99590ba2015-01-09 14:03:04 -0600820 list_add_tail(&patch->list, &klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600821
822 mutex_unlock(&klp_mutex);
823
824 return 0;
825
826free:
827 klp_free_objects_limited(patch, obj);
828 kobject_put(&patch->kobj);
829unlock:
830 mutex_unlock(&klp_mutex);
831 return ret;
832}
833
834/**
835 * klp_unregister_patch() - unregisters a patch
836 * @patch: Disabled patch to be unregistered
837 *
838 * Frees the data structures and removes the sysfs interface.
839 *
840 * Return: 0 on success, otherwise error
841 */
842int klp_unregister_patch(struct klp_patch *patch)
843{
844 int ret = 0;
845
846 mutex_lock(&klp_mutex);
847
848 if (!klp_is_patch_registered(patch)) {
849 ret = -EINVAL;
850 goto out;
851 }
852
853 if (patch->state == KLP_ENABLED) {
854 ret = -EBUSY;
855 goto out;
856 }
857
858 klp_free_patch(patch);
859
860out:
861 mutex_unlock(&klp_mutex);
862 return ret;
863}
864EXPORT_SYMBOL_GPL(klp_unregister_patch);
865
866/**
867 * klp_register_patch() - registers a patch
868 * @patch: Patch to be registered
869 *
870 * Initializes the data structure associated with the patch and
871 * creates the sysfs interface.
872 *
873 * Return: 0 on success, otherwise error
874 */
875int klp_register_patch(struct klp_patch *patch)
876{
877 int ret;
878
879 if (!klp_initialized())
880 return -ENODEV;
881
882 if (!patch || !patch->mod)
883 return -EINVAL;
884
885 /*
886 * A reference is taken on the patch module to prevent it from being
887 * unloaded. Right now, we don't allow patch modules to unload since
888 * there is currently no method to determine if a thread is still
889 * running in the patched code contained in the patch module once
890 * the ftrace registration is successful.
891 */
892 if (!try_module_get(patch->mod))
893 return -ENODEV;
894
895 ret = klp_init_patch(patch);
896 if (ret)
897 module_put(patch->mod);
898
899 return ret;
900}
901EXPORT_SYMBOL_GPL(klp_register_patch);
902
Minfei Huang36e505c2015-05-15 10:22:48 +0800903static int klp_module_notify_coming(struct klp_patch *patch,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600904 struct klp_object *obj)
905{
906 struct module *pmod = patch->mod;
907 struct module *mod = obj->mod;
908 int ret;
909
910 ret = klp_init_object_loaded(patch, obj);
Minfei Huang36e505c2015-05-15 10:22:48 +0800911 if (ret) {
912 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
913 pmod->name, mod->name, ret);
914 return ret;
915 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600916
917 if (patch->state == KLP_DISABLED)
Minfei Huang36e505c2015-05-15 10:22:48 +0800918 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600919
920 pr_notice("applying patch '%s' to loading module '%s'\n",
921 pmod->name, mod->name);
922
923 ret = klp_enable_object(obj);
Minfei Huang36e505c2015-05-15 10:22:48 +0800924 if (ret)
925 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
926 pmod->name, mod->name, ret);
927 return ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600928}
929
930static void klp_module_notify_going(struct klp_patch *patch,
931 struct klp_object *obj)
932{
933 struct module *pmod = patch->mod;
934 struct module *mod = obj->mod;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600935
936 if (patch->state == KLP_DISABLED)
937 goto disabled;
938
939 pr_notice("reverting patch '%s' on unloading module '%s'\n",
940 pmod->name, mod->name);
941
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600942 klp_disable_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600943
944disabled:
945 klp_free_object_loaded(obj);
946}
947
948static int klp_module_notify(struct notifier_block *nb, unsigned long action,
949 void *data)
950{
Minfei Huang36e505c2015-05-15 10:22:48 +0800951 int ret;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600952 struct module *mod = data;
953 struct klp_patch *patch;
954 struct klp_object *obj;
955
956 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
957 return 0;
958
959 mutex_lock(&klp_mutex);
960
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100961 /*
962 * Each module has to know that the notifier has been called.
963 * We never know what module will get patched by a new patch.
964 */
965 if (action == MODULE_STATE_COMING)
966 mod->klp_alive = true;
967 else /* MODULE_STATE_GOING */
968 mod->klp_alive = false;
969
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600970 list_for_each_entry(patch, &klp_patches, list) {
Jiri Slaby8cdd0432015-05-19 12:01:19 +0200971 klp_for_each_object(patch, obj) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600972 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
973 continue;
974
975 if (action == MODULE_STATE_COMING) {
976 obj->mod = mod;
Minfei Huang36e505c2015-05-15 10:22:48 +0800977 ret = klp_module_notify_coming(patch, obj);
978 if (ret) {
979 obj->mod = NULL;
980 pr_warn("patch '%s' is in an inconsistent state!\n",
981 patch->mod->name);
982 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600983 } else /* MODULE_STATE_GOING */
984 klp_module_notify_going(patch, obj);
985
986 break;
987 }
988 }
989
990 mutex_unlock(&klp_mutex);
991
992 return 0;
993}
994
995static struct notifier_block klp_module_nb = {
996 .notifier_call = klp_module_notify,
997 .priority = INT_MIN+1, /* called late but before ftrace notifier */
998};
999
Minfei Huang26029d82015-05-22 22:26:29 +08001000static int __init klp_init(void)
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001001{
1002 int ret;
1003
Jiri Kosinab9dfe0b2015-01-09 10:53:21 +01001004 ret = klp_check_compiler_support();
1005 if (ret) {
1006 pr_info("Your compiler is too old; turning off.\n");
1007 return -EINVAL;
1008 }
1009
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001010 ret = register_module_notifier(&klp_module_nb);
1011 if (ret)
1012 return ret;
1013
1014 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1015 if (!klp_root_kobj) {
1016 ret = -ENOMEM;
1017 goto unregister;
1018 }
1019
1020 return 0;
1021
1022unregister:
1023 unregister_module_notifier(&klp_module_nb);
1024 return ret;
1025}
1026
1027module_init(klp_init);