sh_eth: kill 'ret' variable in sh_eth_ring_init()
[linux-2.6/btrfs-unstable.git] / kernel / livepatch / core.c
blob6e5344112419ca43ace3474466db35d1692752ab
1 /*
2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
32 /**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
45 struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
57 static DEFINE_MUTEX(klp_mutex);
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
62 static struct kobject *klp_root_kobj;
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
66 struct klp_ops *ops;
67 struct klp_func *func;
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
76 return NULL;
79 static bool klp_is_module(struct klp_object *obj)
81 return obj->name;
84 static bool klp_is_object_loaded(struct klp_object *obj)
86 return !obj->name || obj->mod;
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
92 struct module *mod;
94 if (!klp_is_module(obj))
95 return;
97 mutex_lock(&module_mutex);
99 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
103 mod = find_module(obj->name);
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
111 if (mod && mod->klp_alive)
112 obj->mod = mod;
114 mutex_unlock(&module_mutex);
117 /* klp_mutex must be held by caller */
118 static bool klp_is_patch_registered(struct klp_patch *patch)
120 struct klp_patch *mypatch;
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
124 return true;
126 return false;
129 static bool klp_initialized(void)
131 return !!klp_root_kobj;
134 struct klp_find_arg {
135 const char *objname;
136 const char *name;
137 unsigned long addr;
139 * If count == 0, the symbol was not found. If count == 1, a unique
140 * match was found and addr is set. If count > 1, there is
141 * unresolvable ambiguity among "count" number of symbols with the same
142 * name in the same object.
144 unsigned long count;
147 static int klp_find_callback(void *data, const char *name,
148 struct module *mod, unsigned long addr)
150 struct klp_find_arg *args = data;
152 if ((mod && !args->objname) || (!mod && args->objname))
153 return 0;
155 if (strcmp(args->name, name))
156 return 0;
158 if (args->objname && strcmp(args->objname, mod->name))
159 return 0;
162 * args->addr might be overwritten if another match is found
163 * but klp_find_object_symbol() handles this and only returns the
164 * addr if count == 1.
166 args->addr = addr;
167 args->count++;
169 return 0;
172 static int klp_find_object_symbol(const char *objname, const char *name,
173 unsigned long *addr)
175 struct klp_find_arg args = {
176 .objname = objname,
177 .name = name,
178 .addr = 0,
179 .count = 0
182 mutex_lock(&module_mutex);
183 kallsyms_on_each_symbol(klp_find_callback, &args);
184 mutex_unlock(&module_mutex);
186 if (args.count == 0)
187 pr_err("symbol '%s' not found in symbol table\n", name);
188 else if (args.count > 1)
189 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 args.count, name, objname);
191 else {
192 *addr = args.addr;
193 return 0;
196 *addr = 0;
197 return -EINVAL;
200 struct klp_verify_args {
201 const char *name;
202 const unsigned long addr;
205 static int klp_verify_callback(void *data, const char *name,
206 struct module *mod, unsigned long addr)
208 struct klp_verify_args *args = data;
210 if (!mod &&
211 !strcmp(args->name, name) &&
212 args->addr == addr)
213 return 1;
215 return 0;
218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
220 struct klp_verify_args args = {
221 .name = name,
222 .addr = addr,
224 int ret;
226 mutex_lock(&module_mutex);
227 ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 mutex_unlock(&module_mutex);
230 if (!ret) {
231 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
232 name, addr);
233 return -EINVAL;
236 return 0;
239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 struct klp_func *func)
242 int ret;
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 /* If KASLR has been enabled, adjust old_addr accordingly */
246 if (kaslr_enabled() && func->old_addr)
247 func->old_addr += kaslr_offset();
248 #endif
250 if (!func->old_addr || klp_is_module(obj))
251 ret = klp_find_object_symbol(obj->name, func->old_name,
252 &func->old_addr);
253 else
254 ret = klp_verify_vmlinux_symbol(func->old_name,
255 func->old_addr);
257 return ret;
261 * external symbols are located outside the parent object (where the parent
262 * object is either vmlinux or the kmod being patched).
264 static int klp_find_external_symbol(struct module *pmod, const char *name,
265 unsigned long *addr)
267 const struct kernel_symbol *sym;
269 /* first, check if it's an exported symbol */
270 preempt_disable();
271 sym = find_symbol(name, NULL, NULL, true, true);
272 if (sym) {
273 *addr = sym->value;
274 preempt_enable();
275 return 0;
277 preempt_enable();
279 /* otherwise check if it's in another .o within the patch module */
280 return klp_find_object_symbol(pmod->name, name, addr);
283 static int klp_write_object_relocations(struct module *pmod,
284 struct klp_object *obj)
286 int ret;
287 struct klp_reloc *reloc;
289 if (WARN_ON(!klp_is_object_loaded(obj)))
290 return -EINVAL;
292 if (WARN_ON(!obj->relocs))
293 return -EINVAL;
295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) {
297 ret = klp_verify_vmlinux_symbol(reloc->name,
298 reloc->val);
299 if (ret)
300 return ret;
301 } else {
302 /* module, reloc->val needs to be discovered */
303 if (reloc->external)
304 ret = klp_find_external_symbol(pmod,
305 reloc->name,
306 &reloc->val);
307 else
308 ret = klp_find_object_symbol(obj->mod->name,
309 reloc->name,
310 &reloc->val);
311 if (ret)
312 return ret;
314 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
315 reloc->val + reloc->addend);
316 if (ret) {
317 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
318 reloc->name, reloc->val, ret);
319 return ret;
323 return 0;
326 static void notrace klp_ftrace_handler(unsigned long ip,
327 unsigned long parent_ip,
328 struct ftrace_ops *fops,
329 struct pt_regs *regs)
331 struct klp_ops *ops;
332 struct klp_func *func;
334 ops = container_of(fops, struct klp_ops, fops);
336 rcu_read_lock();
337 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
338 stack_node);
339 if (WARN_ON_ONCE(!func))
340 goto unlock;
342 klp_arch_set_pc(regs, (unsigned long)func->new_func);
343 unlock:
344 rcu_read_unlock();
347 static void klp_disable_func(struct klp_func *func)
349 struct klp_ops *ops;
351 if (WARN_ON(func->state != KLP_ENABLED))
352 return;
353 if (WARN_ON(!func->old_addr))
354 return;
356 ops = klp_find_ops(func->old_addr);
357 if (WARN_ON(!ops))
358 return;
360 if (list_is_singular(&ops->func_stack)) {
361 WARN_ON(unregister_ftrace_function(&ops->fops));
362 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
364 list_del_rcu(&func->stack_node);
365 list_del(&ops->node);
366 kfree(ops);
367 } else {
368 list_del_rcu(&func->stack_node);
371 func->state = KLP_DISABLED;
374 static int klp_enable_func(struct klp_func *func)
376 struct klp_ops *ops;
377 int ret;
379 if (WARN_ON(!func->old_addr))
380 return -EINVAL;
382 if (WARN_ON(func->state != KLP_DISABLED))
383 return -EINVAL;
385 ops = klp_find_ops(func->old_addr);
386 if (!ops) {
387 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
388 if (!ops)
389 return -ENOMEM;
391 ops->fops.func = klp_ftrace_handler;
392 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
393 FTRACE_OPS_FL_DYNAMIC |
394 FTRACE_OPS_FL_IPMODIFY;
396 list_add(&ops->node, &klp_ops);
398 INIT_LIST_HEAD(&ops->func_stack);
399 list_add_rcu(&func->stack_node, &ops->func_stack);
401 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
402 if (ret) {
403 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
404 func->old_name, ret);
405 goto err;
408 ret = register_ftrace_function(&ops->fops);
409 if (ret) {
410 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
411 func->old_name, ret);
412 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
413 goto err;
417 } else {
418 list_add_rcu(&func->stack_node, &ops->func_stack);
421 func->state = KLP_ENABLED;
423 return 0;
425 err:
426 list_del_rcu(&func->stack_node);
427 list_del(&ops->node);
428 kfree(ops);
429 return ret;
432 static void klp_disable_object(struct klp_object *obj)
434 struct klp_func *func;
436 klp_for_each_func(obj, func)
437 if (func->state == KLP_ENABLED)
438 klp_disable_func(func);
440 obj->state = KLP_DISABLED;
443 static int klp_enable_object(struct klp_object *obj)
445 struct klp_func *func;
446 int ret;
448 if (WARN_ON(obj->state != KLP_DISABLED))
449 return -EINVAL;
451 if (WARN_ON(!klp_is_object_loaded(obj)))
452 return -EINVAL;
454 klp_for_each_func(obj, func) {
455 ret = klp_enable_func(func);
456 if (ret) {
457 klp_disable_object(obj);
458 return ret;
461 obj->state = KLP_ENABLED;
463 return 0;
466 static int __klp_disable_patch(struct klp_patch *patch)
468 struct klp_object *obj;
470 /* enforce stacking: only the last enabled patch can be disabled */
471 if (!list_is_last(&patch->list, &klp_patches) &&
472 list_next_entry(patch, list)->state == KLP_ENABLED)
473 return -EBUSY;
475 pr_notice("disabling patch '%s'\n", patch->mod->name);
477 klp_for_each_object(patch, obj) {
478 if (obj->state == KLP_ENABLED)
479 klp_disable_object(obj);
482 patch->state = KLP_DISABLED;
484 return 0;
488 * klp_disable_patch() - disables a registered patch
489 * @patch: The registered, enabled patch to be disabled
491 * Unregisters the patched functions from ftrace.
493 * Return: 0 on success, otherwise error
495 int klp_disable_patch(struct klp_patch *patch)
497 int ret;
499 mutex_lock(&klp_mutex);
501 if (!klp_is_patch_registered(patch)) {
502 ret = -EINVAL;
503 goto err;
506 if (patch->state == KLP_DISABLED) {
507 ret = -EINVAL;
508 goto err;
511 ret = __klp_disable_patch(patch);
513 err:
514 mutex_unlock(&klp_mutex);
515 return ret;
517 EXPORT_SYMBOL_GPL(klp_disable_patch);
519 static int __klp_enable_patch(struct klp_patch *patch)
521 struct klp_object *obj;
522 int ret;
524 if (WARN_ON(patch->state != KLP_DISABLED))
525 return -EINVAL;
527 /* enforce stacking: only the first disabled patch can be enabled */
528 if (patch->list.prev != &klp_patches &&
529 list_prev_entry(patch, list)->state == KLP_DISABLED)
530 return -EBUSY;
532 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
533 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
535 pr_notice("enabling patch '%s'\n", patch->mod->name);
537 klp_for_each_object(patch, obj) {
538 if (!klp_is_object_loaded(obj))
539 continue;
541 ret = klp_enable_object(obj);
542 if (ret)
543 goto unregister;
546 patch->state = KLP_ENABLED;
548 return 0;
550 unregister:
551 WARN_ON(__klp_disable_patch(patch));
552 return ret;
556 * klp_enable_patch() - enables a registered patch
557 * @patch: The registered, disabled patch to be enabled
559 * Performs the needed symbol lookups and code relocations,
560 * then registers the patched functions with ftrace.
562 * Return: 0 on success, otherwise error
564 int klp_enable_patch(struct klp_patch *patch)
566 int ret;
568 mutex_lock(&klp_mutex);
570 if (!klp_is_patch_registered(patch)) {
571 ret = -EINVAL;
572 goto err;
575 ret = __klp_enable_patch(patch);
577 err:
578 mutex_unlock(&klp_mutex);
579 return ret;
581 EXPORT_SYMBOL_GPL(klp_enable_patch);
584 * Sysfs Interface
586 * /sys/kernel/livepatch
587 * /sys/kernel/livepatch/<patch>
588 * /sys/kernel/livepatch/<patch>/enabled
589 * /sys/kernel/livepatch/<patch>/<object>
590 * /sys/kernel/livepatch/<patch>/<object>/<func>
593 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
594 const char *buf, size_t count)
596 struct klp_patch *patch;
597 int ret;
598 unsigned long val;
600 ret = kstrtoul(buf, 10, &val);
601 if (ret)
602 return -EINVAL;
604 if (val != KLP_DISABLED && val != KLP_ENABLED)
605 return -EINVAL;
607 patch = container_of(kobj, struct klp_patch, kobj);
609 mutex_lock(&klp_mutex);
611 if (val == patch->state) {
612 /* already in requested state */
613 ret = -EINVAL;
614 goto err;
617 if (val == KLP_ENABLED) {
618 ret = __klp_enable_patch(patch);
619 if (ret)
620 goto err;
621 } else {
622 ret = __klp_disable_patch(patch);
623 if (ret)
624 goto err;
627 mutex_unlock(&klp_mutex);
629 return count;
631 err:
632 mutex_unlock(&klp_mutex);
633 return ret;
636 static ssize_t enabled_show(struct kobject *kobj,
637 struct kobj_attribute *attr, char *buf)
639 struct klp_patch *patch;
641 patch = container_of(kobj, struct klp_patch, kobj);
642 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
645 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
646 static struct attribute *klp_patch_attrs[] = {
647 &enabled_kobj_attr.attr,
648 NULL
651 static void klp_kobj_release_patch(struct kobject *kobj)
654 * Once we have a consistency model we'll need to module_put() the
655 * patch module here. See klp_register_patch() for more details.
659 static struct kobj_type klp_ktype_patch = {
660 .release = klp_kobj_release_patch,
661 .sysfs_ops = &kobj_sysfs_ops,
662 .default_attrs = klp_patch_attrs,
665 static void klp_kobj_release_object(struct kobject *kobj)
669 static struct kobj_type klp_ktype_object = {
670 .release = klp_kobj_release_object,
671 .sysfs_ops = &kobj_sysfs_ops,
674 static void klp_kobj_release_func(struct kobject *kobj)
678 static struct kobj_type klp_ktype_func = {
679 .release = klp_kobj_release_func,
680 .sysfs_ops = &kobj_sysfs_ops,
684 * Free all functions' kobjects in the array up to some limit. When limit is
685 * NULL, all kobjects are freed.
687 static void klp_free_funcs_limited(struct klp_object *obj,
688 struct klp_func *limit)
690 struct klp_func *func;
692 for (func = obj->funcs; func->old_name && func != limit; func++)
693 kobject_put(&func->kobj);
696 /* Clean up when a patched object is unloaded */
697 static void klp_free_object_loaded(struct klp_object *obj)
699 struct klp_func *func;
701 obj->mod = NULL;
703 klp_for_each_func(obj, func)
704 func->old_addr = 0;
708 * Free all objects' kobjects in the array up to some limit. When limit is
709 * NULL, all kobjects are freed.
711 static void klp_free_objects_limited(struct klp_patch *patch,
712 struct klp_object *limit)
714 struct klp_object *obj;
716 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
717 klp_free_funcs_limited(obj, NULL);
718 kobject_put(&obj->kobj);
722 static void klp_free_patch(struct klp_patch *patch)
724 klp_free_objects_limited(patch, NULL);
725 if (!list_empty(&patch->list))
726 list_del(&patch->list);
727 kobject_put(&patch->kobj);
730 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
732 INIT_LIST_HEAD(&func->stack_node);
733 func->state = KLP_DISABLED;
735 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
736 &obj->kobj, "%s", func->old_name);
739 /* parts of the initialization that is done only when the object is loaded */
740 static int klp_init_object_loaded(struct klp_patch *patch,
741 struct klp_object *obj)
743 struct klp_func *func;
744 int ret;
746 if (obj->relocs) {
747 ret = klp_write_object_relocations(patch->mod, obj);
748 if (ret)
749 return ret;
752 klp_for_each_func(obj, func) {
753 ret = klp_find_verify_func_addr(obj, func);
754 if (ret)
755 return ret;
758 return 0;
761 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
763 struct klp_func *func;
764 int ret;
765 const char *name;
767 if (!obj->funcs)
768 return -EINVAL;
770 obj->state = KLP_DISABLED;
771 obj->mod = NULL;
773 klp_find_object_module(obj);
775 name = klp_is_module(obj) ? obj->name : "vmlinux";
776 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
777 &patch->kobj, "%s", name);
778 if (ret)
779 return ret;
781 klp_for_each_func(obj, func) {
782 ret = klp_init_func(obj, func);
783 if (ret)
784 goto free;
787 if (klp_is_object_loaded(obj)) {
788 ret = klp_init_object_loaded(patch, obj);
789 if (ret)
790 goto free;
793 return 0;
795 free:
796 klp_free_funcs_limited(obj, func);
797 kobject_put(&obj->kobj);
798 return ret;
801 static int klp_init_patch(struct klp_patch *patch)
803 struct klp_object *obj;
804 int ret;
806 if (!patch->objs)
807 return -EINVAL;
809 mutex_lock(&klp_mutex);
811 patch->state = KLP_DISABLED;
813 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
814 klp_root_kobj, "%s", patch->mod->name);
815 if (ret)
816 goto unlock;
818 klp_for_each_object(patch, obj) {
819 ret = klp_init_object(patch, obj);
820 if (ret)
821 goto free;
824 list_add_tail(&patch->list, &klp_patches);
826 mutex_unlock(&klp_mutex);
828 return 0;
830 free:
831 klp_free_objects_limited(patch, obj);
832 kobject_put(&patch->kobj);
833 unlock:
834 mutex_unlock(&klp_mutex);
835 return ret;
839 * klp_unregister_patch() - unregisters a patch
840 * @patch: Disabled patch to be unregistered
842 * Frees the data structures and removes the sysfs interface.
844 * Return: 0 on success, otherwise error
846 int klp_unregister_patch(struct klp_patch *patch)
848 int ret = 0;
850 mutex_lock(&klp_mutex);
852 if (!klp_is_patch_registered(patch)) {
853 ret = -EINVAL;
854 goto out;
857 if (patch->state == KLP_ENABLED) {
858 ret = -EBUSY;
859 goto out;
862 klp_free_patch(patch);
864 out:
865 mutex_unlock(&klp_mutex);
866 return ret;
868 EXPORT_SYMBOL_GPL(klp_unregister_patch);
871 * klp_register_patch() - registers a patch
872 * @patch: Patch to be registered
874 * Initializes the data structure associated with the patch and
875 * creates the sysfs interface.
877 * Return: 0 on success, otherwise error
879 int klp_register_patch(struct klp_patch *patch)
881 int ret;
883 if (!klp_initialized())
884 return -ENODEV;
886 if (!patch || !patch->mod)
887 return -EINVAL;
890 * A reference is taken on the patch module to prevent it from being
891 * unloaded. Right now, we don't allow patch modules to unload since
892 * there is currently no method to determine if a thread is still
893 * running in the patched code contained in the patch module once
894 * the ftrace registration is successful.
896 if (!try_module_get(patch->mod))
897 return -ENODEV;
899 ret = klp_init_patch(patch);
900 if (ret)
901 module_put(patch->mod);
903 return ret;
905 EXPORT_SYMBOL_GPL(klp_register_patch);
907 static int klp_module_notify_coming(struct klp_patch *patch,
908 struct klp_object *obj)
910 struct module *pmod = patch->mod;
911 struct module *mod = obj->mod;
912 int ret;
914 ret = klp_init_object_loaded(patch, obj);
915 if (ret) {
916 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
917 pmod->name, mod->name, ret);
918 return ret;
921 if (patch->state == KLP_DISABLED)
922 return 0;
924 pr_notice("applying patch '%s' to loading module '%s'\n",
925 pmod->name, mod->name);
927 ret = klp_enable_object(obj);
928 if (ret)
929 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
930 pmod->name, mod->name, ret);
931 return ret;
934 static void klp_module_notify_going(struct klp_patch *patch,
935 struct klp_object *obj)
937 struct module *pmod = patch->mod;
938 struct module *mod = obj->mod;
940 if (patch->state == KLP_DISABLED)
941 goto disabled;
943 pr_notice("reverting patch '%s' on unloading module '%s'\n",
944 pmod->name, mod->name);
946 klp_disable_object(obj);
948 disabled:
949 klp_free_object_loaded(obj);
952 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
953 void *data)
955 int ret;
956 struct module *mod = data;
957 struct klp_patch *patch;
958 struct klp_object *obj;
960 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
961 return 0;
963 mutex_lock(&klp_mutex);
966 * Each module has to know that the notifier has been called.
967 * We never know what module will get patched by a new patch.
969 if (action == MODULE_STATE_COMING)
970 mod->klp_alive = true;
971 else /* MODULE_STATE_GOING */
972 mod->klp_alive = false;
974 list_for_each_entry(patch, &klp_patches, list) {
975 klp_for_each_object(patch, obj) {
976 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
977 continue;
979 if (action == MODULE_STATE_COMING) {
980 obj->mod = mod;
981 ret = klp_module_notify_coming(patch, obj);
982 if (ret) {
983 obj->mod = NULL;
984 pr_warn("patch '%s' is in an inconsistent state!\n",
985 patch->mod->name);
987 } else /* MODULE_STATE_GOING */
988 klp_module_notify_going(patch, obj);
990 break;
994 mutex_unlock(&klp_mutex);
996 return 0;
999 static struct notifier_block klp_module_nb = {
1000 .notifier_call = klp_module_notify,
1001 .priority = INT_MIN+1, /* called late but before ftrace notifier */
1004 static int __init klp_init(void)
1006 int ret;
1008 ret = klp_check_compiler_support();
1009 if (ret) {
1010 pr_info("Your compiler is too old; turning off.\n");
1011 return -EINVAL;
1014 ret = register_module_notifier(&klp_module_nb);
1015 if (ret)
1016 return ret;
1018 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1019 if (!klp_root_kobj) {
1020 ret = -ENOMEM;
1021 goto unregister;
1024 return 0;
1026 unregister:
1027 unregister_module_notifier(&klp_module_nb);
1028 return ret;
1031 module_init(klp_init);