blob: 0e7c23c6cf3fd555353f156a2504000f3be4ff90 [file] [log] [blame]
Seth Jenningsb700e7f2014-12-16 11:58:19 -06001/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h>
29#include <linux/kallsyms.h>
30#include <linux/livepatch.h>
31
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060032/**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 *
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
Seth Jenningsb700e7f2014-12-16 11:58:19 -060044 */
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060045struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49};
Seth Jenningsb700e7f2014-12-16 11:58:19 -060050
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060051/*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
Seth Jenningsb700e7f2014-12-16 11:58:19 -060057static DEFINE_MUTEX(klp_mutex);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060058
Seth Jenningsb700e7f2014-12-16 11:58:19 -060059static LIST_HEAD(klp_patches);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060060static LIST_HEAD(klp_ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -060061
62static struct kobject *klp_root_kobj;
63
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -060064static struct klp_ops *klp_find_ops(unsigned long old_addr)
65{
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77}
78
Seth Jenningsb700e7f2014-12-16 11:58:19 -060079static bool klp_is_module(struct klp_object *obj)
80{
81 return obj->name;
82}
83
84static bool klp_is_object_loaded(struct klp_object *obj)
85{
86 return !obj->name || obj->mod;
87}
88
89/* sets obj->mod if object is not vmlinux and module is found */
90static void klp_find_object_module(struct klp_object *obj)
91{
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010092 struct module *mod;
93
Seth Jenningsb700e7f2014-12-16 11:58:19 -060094 if (!klp_is_module(obj))
95 return;
96
97 mutex_lock(&module_mutex);
98 /*
Petr Mladek8cb2c2d2015-03-12 12:55:13 +010099 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600102 */
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
110 */
111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600114 mutex_unlock(&module_mutex);
115}
116
117/* klp_mutex must be held by caller */
118static bool klp_is_patch_registered(struct klp_patch *patch)
119{
120 struct klp_patch *mypatch;
121
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
124 return true;
125
126 return false;
127}
128
129static bool klp_initialized(void)
130{
131 return klp_root_kobj;
132}
133
134struct klp_find_arg {
135 const char *objname;
136 const char *name;
137 unsigned long addr;
138 /*
139 * If count == 0, the symbol was not found. If count == 1, a unique
140 * match was found and addr is set. If count > 1, there is
141 * unresolvable ambiguity among "count" number of symbols with the same
142 * name in the same object.
143 */
144 unsigned long count;
145};
146
147static int klp_find_callback(void *data, const char *name,
148 struct module *mod, unsigned long addr)
149{
150 struct klp_find_arg *args = data;
151
152 if ((mod && !args->objname) || (!mod && args->objname))
153 return 0;
154
155 if (strcmp(args->name, name))
156 return 0;
157
158 if (args->objname && strcmp(args->objname, mod->name))
159 return 0;
160
161 /*
162 * args->addr might be overwritten if another match is found
163 * but klp_find_object_symbol() handles this and only returns the
164 * addr if count == 1.
165 */
166 args->addr = addr;
167 args->count++;
168
169 return 0;
170}
171
172static int klp_find_object_symbol(const char *objname, const char *name,
173 unsigned long *addr)
174{
175 struct klp_find_arg args = {
176 .objname = objname,
177 .name = name,
178 .addr = 0,
179 .count = 0
180 };
181
182 kallsyms_on_each_symbol(klp_find_callback, &args);
183
184 if (args.count == 0)
185 pr_err("symbol '%s' not found in symbol table\n", name);
186 else if (args.count > 1)
187 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
188 args.count, name, objname);
189 else {
190 *addr = args.addr;
191 return 0;
192 }
193
194 *addr = 0;
195 return -EINVAL;
196}
197
198struct klp_verify_args {
199 const char *name;
200 const unsigned long addr;
201};
202
203static int klp_verify_callback(void *data, const char *name,
204 struct module *mod, unsigned long addr)
205{
206 struct klp_verify_args *args = data;
207
208 if (!mod &&
209 !strcmp(args->name, name) &&
210 args->addr == addr)
211 return 1;
212
213 return 0;
214}
215
216static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
217{
218 struct klp_verify_args args = {
219 .name = name,
220 .addr = addr,
221 };
222
223 if (kallsyms_on_each_symbol(klp_verify_callback, &args))
224 return 0;
225
Josh Poimboeuff638f4d2015-02-06 10:36:32 -0600226 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600227 name, addr);
228 return -EINVAL;
229}
230
231static int klp_find_verify_func_addr(struct klp_object *obj,
232 struct klp_func *func)
233{
234 int ret;
235
236#if defined(CONFIG_RANDOMIZE_BASE)
Jiri Kosina5d4351b2015-04-27 13:25:23 +0200237 /* If KASLR has been enabled, adjust old_addr accordingly */
238 if (kaslr_enabled() && func->old_addr)
239 func->old_addr += kaslr_offset();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600240#endif
241
242 if (!func->old_addr || klp_is_module(obj))
243 ret = klp_find_object_symbol(obj->name, func->old_name,
244 &func->old_addr);
245 else
246 ret = klp_verify_vmlinux_symbol(func->old_name,
247 func->old_addr);
248
249 return ret;
250}
251
252/*
253 * external symbols are located outside the parent object (where the parent
254 * object is either vmlinux or the kmod being patched).
255 */
256static int klp_find_external_symbol(struct module *pmod, const char *name,
257 unsigned long *addr)
258{
259 const struct kernel_symbol *sym;
260
261 /* first, check if it's an exported symbol */
262 preempt_disable();
263 sym = find_symbol(name, NULL, NULL, true, true);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600264 if (sym) {
265 *addr = sym->value;
Peter Zijlstrac064a0d2015-02-28 22:24:48 +0100266 preempt_enable();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600267 return 0;
268 }
Peter Zijlstrac064a0d2015-02-28 22:24:48 +0100269 preempt_enable();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600270
271 /* otherwise check if it's in another .o within the patch module */
272 return klp_find_object_symbol(pmod->name, name, addr);
273}
274
275static int klp_write_object_relocations(struct module *pmod,
276 struct klp_object *obj)
277{
278 int ret;
279 struct klp_reloc *reloc;
280
281 if (WARN_ON(!klp_is_object_loaded(obj)))
282 return -EINVAL;
283
284 if (WARN_ON(!obj->relocs))
285 return -EINVAL;
286
287 for (reloc = obj->relocs; reloc->name; reloc++) {
288 if (!klp_is_module(obj)) {
289 ret = klp_verify_vmlinux_symbol(reloc->name,
290 reloc->val);
291 if (ret)
292 return ret;
293 } else {
294 /* module, reloc->val needs to be discovered */
295 if (reloc->external)
296 ret = klp_find_external_symbol(pmod,
297 reloc->name,
298 &reloc->val);
299 else
300 ret = klp_find_object_symbol(obj->mod->name,
301 reloc->name,
302 &reloc->val);
303 if (ret)
304 return ret;
305 }
306 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
307 reloc->val + reloc->addend);
308 if (ret) {
309 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
310 reloc->name, reloc->val, ret);
311 return ret;
312 }
313 }
314
315 return 0;
316}
317
318static void notrace klp_ftrace_handler(unsigned long ip,
319 unsigned long parent_ip,
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600320 struct ftrace_ops *fops,
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600321 struct pt_regs *regs)
322{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600323 struct klp_ops *ops;
324 struct klp_func *func;
325
326 ops = container_of(fops, struct klp_ops, fops);
327
328 rcu_read_lock();
329 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
330 stack_node);
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600331 if (WARN_ON_ONCE(!func))
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100332 goto unlock;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600333
Li Binb5bfc512014-12-19 14:11:17 +0800334 klp_arch_set_pc(regs, (unsigned long)func->new_func);
Petr Mladekc4ce0da2015-02-18 18:02:13 +0100335unlock:
336 rcu_read_unlock();
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600337}
338
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600339static void klp_disable_func(struct klp_func *func)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600340{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600341 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600342
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600343 WARN_ON(func->state != KLP_ENABLED);
344 WARN_ON(!func->old_addr);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600345
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600346 ops = klp_find_ops(func->old_addr);
347 if (WARN_ON(!ops))
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600348 return;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600349
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600350 if (list_is_singular(&ops->func_stack)) {
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600351 WARN_ON(unregister_ftrace_function(&ops->fops));
352 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600353
354 list_del_rcu(&func->stack_node);
355 list_del(&ops->node);
356 kfree(ops);
357 } else {
358 list_del_rcu(&func->stack_node);
359 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600360
361 func->state = KLP_DISABLED;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600362}
363
364static int klp_enable_func(struct klp_func *func)
365{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600366 struct klp_ops *ops;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600367 int ret;
368
369 if (WARN_ON(!func->old_addr))
370 return -EINVAL;
371
372 if (WARN_ON(func->state != KLP_DISABLED))
373 return -EINVAL;
374
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600375 ops = klp_find_ops(func->old_addr);
376 if (!ops) {
377 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
378 if (!ops)
379 return -ENOMEM;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600380
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600381 ops->fops.func = klp_ftrace_handler;
382 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
383 FTRACE_OPS_FL_DYNAMIC |
384 FTRACE_OPS_FL_IPMODIFY;
385
386 list_add(&ops->node, &klp_ops);
387
388 INIT_LIST_HEAD(&ops->func_stack);
389 list_add_rcu(&func->stack_node, &ops->func_stack);
390
391 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
392 if (ret) {
393 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
394 func->old_name, ret);
395 goto err;
396 }
397
398 ret = register_ftrace_function(&ops->fops);
399 if (ret) {
400 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
401 func->old_name, ret);
402 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
403 goto err;
404 }
405
406
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600407 } else {
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600408 list_add_rcu(&func->stack_node, &ops->func_stack);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600409 }
410
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600411 func->state = KLP_ENABLED;
412
Josh Poimboeufdbed7dd2015-01-20 16:07:55 -0600413 return 0;
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600414
415err:
416 list_del_rcu(&func->stack_node);
417 list_del(&ops->node);
418 kfree(ops);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600419 return ret;
420}
421
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600422static void klp_disable_object(struct klp_object *obj)
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600423{
424 struct klp_func *func;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600425
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600426 for (func = obj->funcs; func->old_name; func++)
427 if (func->state == KLP_ENABLED)
428 klp_disable_func(func);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600429
430 obj->state = KLP_DISABLED;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600431}
432
433static int klp_enable_object(struct klp_object *obj)
434{
435 struct klp_func *func;
436 int ret;
437
438 if (WARN_ON(obj->state != KLP_DISABLED))
439 return -EINVAL;
440
441 if (WARN_ON(!klp_is_object_loaded(obj)))
442 return -EINVAL;
443
444 for (func = obj->funcs; func->old_name; func++) {
445 ret = klp_enable_func(func);
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600446 if (ret) {
447 klp_disable_object(obj);
448 return ret;
449 }
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600450 }
451 obj->state = KLP_ENABLED;
452
453 return 0;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600454}
455
456static int __klp_disable_patch(struct klp_patch *patch)
457{
458 struct klp_object *obj;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600459
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600460 /* enforce stacking: only the last enabled patch can be disabled */
461 if (!list_is_last(&patch->list, &klp_patches) &&
462 list_next_entry(patch, list)->state == KLP_ENABLED)
463 return -EBUSY;
464
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600465 pr_notice("disabling patch '%s'\n", patch->mod->name);
466
467 for (obj = patch->objs; obj->funcs; obj++) {
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600468 if (obj->state == KLP_ENABLED)
469 klp_disable_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600470 }
471
472 patch->state = KLP_DISABLED;
473
474 return 0;
475}
476
477/**
478 * klp_disable_patch() - disables a registered patch
479 * @patch: The registered, enabled patch to be disabled
480 *
481 * Unregisters the patched functions from ftrace.
482 *
483 * Return: 0 on success, otherwise error
484 */
485int klp_disable_patch(struct klp_patch *patch)
486{
487 int ret;
488
489 mutex_lock(&klp_mutex);
490
491 if (!klp_is_patch_registered(patch)) {
492 ret = -EINVAL;
493 goto err;
494 }
495
496 if (patch->state == KLP_DISABLED) {
497 ret = -EINVAL;
498 goto err;
499 }
500
501 ret = __klp_disable_patch(patch);
502
503err:
504 mutex_unlock(&klp_mutex);
505 return ret;
506}
507EXPORT_SYMBOL_GPL(klp_disable_patch);
508
509static int __klp_enable_patch(struct klp_patch *patch)
510{
511 struct klp_object *obj;
512 int ret;
513
514 if (WARN_ON(patch->state != KLP_DISABLED))
515 return -EINVAL;
516
Josh Poimboeuf83a90bb2015-01-20 09:26:18 -0600517 /* enforce stacking: only the first disabled patch can be enabled */
518 if (patch->list.prev != &klp_patches &&
519 list_prev_entry(patch, list)->state == KLP_DISABLED)
520 return -EBUSY;
521
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600522 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
523 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
524
525 pr_notice("enabling patch '%s'\n", patch->mod->name);
526
527 for (obj = patch->objs; obj->funcs; obj++) {
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600528 if (!klp_is_object_loaded(obj))
529 continue;
530
531 ret = klp_enable_object(obj);
532 if (ret)
533 goto unregister;
534 }
535
536 patch->state = KLP_ENABLED;
537
538 return 0;
539
540unregister:
541 WARN_ON(__klp_disable_patch(patch));
542 return ret;
543}
544
545/**
546 * klp_enable_patch() - enables a registered patch
547 * @patch: The registered, disabled patch to be enabled
548 *
549 * Performs the needed symbol lookups and code relocations,
550 * then registers the patched functions with ftrace.
551 *
552 * Return: 0 on success, otherwise error
553 */
554int klp_enable_patch(struct klp_patch *patch)
555{
556 int ret;
557
558 mutex_lock(&klp_mutex);
559
560 if (!klp_is_patch_registered(patch)) {
561 ret = -EINVAL;
562 goto err;
563 }
564
565 ret = __klp_enable_patch(patch);
566
567err:
568 mutex_unlock(&klp_mutex);
569 return ret;
570}
571EXPORT_SYMBOL_GPL(klp_enable_patch);
572
573/*
574 * Sysfs Interface
575 *
576 * /sys/kernel/livepatch
577 * /sys/kernel/livepatch/<patch>
578 * /sys/kernel/livepatch/<patch>/enabled
579 * /sys/kernel/livepatch/<patch>/<object>
580 * /sys/kernel/livepatch/<patch>/<object>/<func>
581 */
582
583static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
584 const char *buf, size_t count)
585{
586 struct klp_patch *patch;
587 int ret;
588 unsigned long val;
589
590 ret = kstrtoul(buf, 10, &val);
591 if (ret)
592 return -EINVAL;
593
594 if (val != KLP_DISABLED && val != KLP_ENABLED)
595 return -EINVAL;
596
597 patch = container_of(kobj, struct klp_patch, kobj);
598
599 mutex_lock(&klp_mutex);
600
601 if (val == patch->state) {
602 /* already in requested state */
603 ret = -EINVAL;
604 goto err;
605 }
606
607 if (val == KLP_ENABLED) {
608 ret = __klp_enable_patch(patch);
609 if (ret)
610 goto err;
611 } else {
612 ret = __klp_disable_patch(patch);
613 if (ret)
614 goto err;
615 }
616
617 mutex_unlock(&klp_mutex);
618
619 return count;
620
621err:
622 mutex_unlock(&klp_mutex);
623 return ret;
624}
625
626static ssize_t enabled_show(struct kobject *kobj,
627 struct kobj_attribute *attr, char *buf)
628{
629 struct klp_patch *patch;
630
631 patch = container_of(kobj, struct klp_patch, kobj);
632 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
633}
634
635static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
636static struct attribute *klp_patch_attrs[] = {
637 &enabled_kobj_attr.attr,
638 NULL
639};
640
641static void klp_kobj_release_patch(struct kobject *kobj)
642{
643 /*
644 * Once we have a consistency model we'll need to module_put() the
645 * patch module here. See klp_register_patch() for more details.
646 */
647}
648
649static struct kobj_type klp_ktype_patch = {
650 .release = klp_kobj_release_patch,
651 .sysfs_ops = &kobj_sysfs_ops,
652 .default_attrs = klp_patch_attrs,
653};
654
655static void klp_kobj_release_func(struct kobject *kobj)
656{
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600657}
658
659static struct kobj_type klp_ktype_func = {
660 .release = klp_kobj_release_func,
661 .sysfs_ops = &kobj_sysfs_ops,
662};
663
664/*
665 * Free all functions' kobjects in the array up to some limit. When limit is
666 * NULL, all kobjects are freed.
667 */
668static void klp_free_funcs_limited(struct klp_object *obj,
669 struct klp_func *limit)
670{
671 struct klp_func *func;
672
673 for (func = obj->funcs; func->old_name && func != limit; func++)
674 kobject_put(&func->kobj);
675}
676
677/* Clean up when a patched object is unloaded */
678static void klp_free_object_loaded(struct klp_object *obj)
679{
680 struct klp_func *func;
681
682 obj->mod = NULL;
683
684 for (func = obj->funcs; func->old_name; func++)
685 func->old_addr = 0;
686}
687
688/*
689 * Free all objects' kobjects in the array up to some limit. When limit is
690 * NULL, all kobjects are freed.
691 */
692static void klp_free_objects_limited(struct klp_patch *patch,
693 struct klp_object *limit)
694{
695 struct klp_object *obj;
696
697 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
698 klp_free_funcs_limited(obj, NULL);
699 kobject_put(obj->kobj);
700 }
701}
702
703static void klp_free_patch(struct klp_patch *patch)
704{
705 klp_free_objects_limited(patch, NULL);
706 if (!list_empty(&patch->list))
707 list_del(&patch->list);
708 kobject_put(&patch->kobj);
709}
710
711static int klp_init_func(struct klp_object *obj, struct klp_func *func)
712{
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600713 INIT_LIST_HEAD(&func->stack_node);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600714 func->state = KLP_DISABLED;
715
Josh Poimboeuf3c33f5b2015-01-20 09:26:19 -0600716 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100717 obj->kobj, "%s", func->old_name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600718}
719
720/* parts of the initialization that is done only when the object is loaded */
721static int klp_init_object_loaded(struct klp_patch *patch,
722 struct klp_object *obj)
723{
724 struct klp_func *func;
725 int ret;
726
727 if (obj->relocs) {
728 ret = klp_write_object_relocations(patch->mod, obj);
729 if (ret)
730 return ret;
731 }
732
733 for (func = obj->funcs; func->old_name; func++) {
734 ret = klp_find_verify_func_addr(obj, func);
735 if (ret)
736 return ret;
737 }
738
739 return 0;
740}
741
742static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
743{
744 struct klp_func *func;
745 int ret;
746 const char *name;
747
748 if (!obj->funcs)
749 return -EINVAL;
750
751 obj->state = KLP_DISABLED;
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100752 obj->mod = NULL;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600753
754 klp_find_object_module(obj);
755
756 name = klp_is_module(obj) ? obj->name : "vmlinux";
757 obj->kobj = kobject_create_and_add(name, &patch->kobj);
758 if (!obj->kobj)
759 return -ENOMEM;
760
761 for (func = obj->funcs; func->old_name; func++) {
762 ret = klp_init_func(obj, func);
763 if (ret)
764 goto free;
765 }
766
767 if (klp_is_object_loaded(obj)) {
768 ret = klp_init_object_loaded(patch, obj);
769 if (ret)
770 goto free;
771 }
772
773 return 0;
774
775free:
776 klp_free_funcs_limited(obj, func);
777 kobject_put(obj->kobj);
778 return ret;
779}
780
781static int klp_init_patch(struct klp_patch *patch)
782{
783 struct klp_object *obj;
784 int ret;
785
786 if (!patch->objs)
787 return -EINVAL;
788
789 mutex_lock(&klp_mutex);
790
791 patch->state = KLP_DISABLED;
792
793 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
Jiri Kosinae0b561e2015-02-15 10:03:20 +0100794 klp_root_kobj, "%s", patch->mod->name);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600795 if (ret)
796 goto unlock;
797
798 for (obj = patch->objs; obj->funcs; obj++) {
799 ret = klp_init_object(patch, obj);
800 if (ret)
801 goto free;
802 }
803
Josh Poimboeuf99590ba2015-01-09 14:03:04 -0600804 list_add_tail(&patch->list, &klp_patches);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600805
806 mutex_unlock(&klp_mutex);
807
808 return 0;
809
810free:
811 klp_free_objects_limited(patch, obj);
812 kobject_put(&patch->kobj);
813unlock:
814 mutex_unlock(&klp_mutex);
815 return ret;
816}
817
818/**
819 * klp_unregister_patch() - unregisters a patch
820 * @patch: Disabled patch to be unregistered
821 *
822 * Frees the data structures and removes the sysfs interface.
823 *
824 * Return: 0 on success, otherwise error
825 */
826int klp_unregister_patch(struct klp_patch *patch)
827{
828 int ret = 0;
829
830 mutex_lock(&klp_mutex);
831
832 if (!klp_is_patch_registered(patch)) {
833 ret = -EINVAL;
834 goto out;
835 }
836
837 if (patch->state == KLP_ENABLED) {
838 ret = -EBUSY;
839 goto out;
840 }
841
842 klp_free_patch(patch);
843
844out:
845 mutex_unlock(&klp_mutex);
846 return ret;
847}
848EXPORT_SYMBOL_GPL(klp_unregister_patch);
849
850/**
851 * klp_register_patch() - registers a patch
852 * @patch: Patch to be registered
853 *
854 * Initializes the data structure associated with the patch and
855 * creates the sysfs interface.
856 *
857 * Return: 0 on success, otherwise error
858 */
859int klp_register_patch(struct klp_patch *patch)
860{
861 int ret;
862
863 if (!klp_initialized())
864 return -ENODEV;
865
866 if (!patch || !patch->mod)
867 return -EINVAL;
868
869 /*
870 * A reference is taken on the patch module to prevent it from being
871 * unloaded. Right now, we don't allow patch modules to unload since
872 * there is currently no method to determine if a thread is still
873 * running in the patched code contained in the patch module once
874 * the ftrace registration is successful.
875 */
876 if (!try_module_get(patch->mod))
877 return -ENODEV;
878
879 ret = klp_init_patch(patch);
880 if (ret)
881 module_put(patch->mod);
882
883 return ret;
884}
885EXPORT_SYMBOL_GPL(klp_register_patch);
886
887static void klp_module_notify_coming(struct klp_patch *patch,
888 struct klp_object *obj)
889{
890 struct module *pmod = patch->mod;
891 struct module *mod = obj->mod;
892 int ret;
893
894 ret = klp_init_object_loaded(patch, obj);
895 if (ret)
896 goto err;
897
898 if (patch->state == KLP_DISABLED)
899 return;
900
901 pr_notice("applying patch '%s' to loading module '%s'\n",
902 pmod->name, mod->name);
903
904 ret = klp_enable_object(obj);
905 if (!ret)
906 return;
907
908err:
909 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
910 pmod->name, mod->name, ret);
911}
912
913static void klp_module_notify_going(struct klp_patch *patch,
914 struct klp_object *obj)
915{
916 struct module *pmod = patch->mod;
917 struct module *mod = obj->mod;
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600918
919 if (patch->state == KLP_DISABLED)
920 goto disabled;
921
922 pr_notice("reverting patch '%s' on unloading module '%s'\n",
923 pmod->name, mod->name);
924
Josh Poimboeuf0937e3b2015-02-09 11:31:13 -0600925 klp_disable_object(obj);
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600926
927disabled:
928 klp_free_object_loaded(obj);
929}
930
931static int klp_module_notify(struct notifier_block *nb, unsigned long action,
932 void *data)
933{
934 struct module *mod = data;
935 struct klp_patch *patch;
936 struct klp_object *obj;
937
938 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
939 return 0;
940
941 mutex_lock(&klp_mutex);
942
Petr Mladek8cb2c2d2015-03-12 12:55:13 +0100943 /*
944 * Each module has to know that the notifier has been called.
945 * We never know what module will get patched by a new patch.
946 */
947 if (action == MODULE_STATE_COMING)
948 mod->klp_alive = true;
949 else /* MODULE_STATE_GOING */
950 mod->klp_alive = false;
951
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600952 list_for_each_entry(patch, &klp_patches, list) {
953 for (obj = patch->objs; obj->funcs; obj++) {
954 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
955 continue;
956
957 if (action == MODULE_STATE_COMING) {
958 obj->mod = mod;
959 klp_module_notify_coming(patch, obj);
960 } else /* MODULE_STATE_GOING */
961 klp_module_notify_going(patch, obj);
962
963 break;
964 }
965 }
966
967 mutex_unlock(&klp_mutex);
968
969 return 0;
970}
971
972static struct notifier_block klp_module_nb = {
973 .notifier_call = klp_module_notify,
974 .priority = INT_MIN+1, /* called late but before ftrace notifier */
975};
976
977static int klp_init(void)
978{
979 int ret;
980
Jiri Kosinab9dfe0b2015-01-09 10:53:21 +0100981 ret = klp_check_compiler_support();
982 if (ret) {
983 pr_info("Your compiler is too old; turning off.\n");
984 return -EINVAL;
985 }
986
Seth Jenningsb700e7f2014-12-16 11:58:19 -0600987 ret = register_module_notifier(&klp_module_nb);
988 if (ret)
989 return ret;
990
991 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
992 if (!klp_root_kobj) {
993 ret = -ENOMEM;
994 goto unregister;
995 }
996
997 return 0;
998
999unregister:
1000 unregister_module_notifier(&klp_module_nb);
1001 return ret;
1002}
1003
1004module_init(klp_init);