objtool: Move kernel headers/code sync check to a script
[linux-2.6/btrfs-unstable.git] / kernel / livepatch / patch.c
blob52c4e907c14b0d4a7896d7ca9a6fa6fbc17451ba
1 /*
2 * patch.c - livepatch patching functions
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/livepatch.h>
25 #include <linux/list.h>
26 #include <linux/ftrace.h>
27 #include <linux/rculist.h>
28 #include <linux/slab.h>
29 #include <linux/bug.h>
30 #include <linux/printk.h>
31 #include "patch.h"
32 #include "transition.h"
34 static LIST_HEAD(klp_ops);
36 struct klp_ops *klp_find_ops(unsigned long old_addr)
38 struct klp_ops *ops;
39 struct klp_func *func;
41 list_for_each_entry(ops, &klp_ops, node) {
42 func = list_first_entry(&ops->func_stack, struct klp_func,
43 stack_node);
44 if (func->old_addr == old_addr)
45 return ops;
48 return NULL;
51 static void notrace klp_ftrace_handler(unsigned long ip,
52 unsigned long parent_ip,
53 struct ftrace_ops *fops,
54 struct pt_regs *regs)
56 struct klp_ops *ops;
57 struct klp_func *func;
58 int patch_state;
60 ops = container_of(fops, struct klp_ops, fops);
63 * A variant of synchronize_sched() is used to allow patching functions
64 * where RCU is not watching, see klp_synchronize_transition().
66 preempt_disable_notrace();
68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
69 stack_node);
72 * func should never be NULL because preemption should be disabled here
73 * and unregister_ftrace_function() does the equivalent of a
74 * synchronize_sched() before the func_stack removal.
76 if (WARN_ON_ONCE(!func))
77 goto unlock;
80 * In the enable path, enforce the order of the ops->func_stack and
81 * func->transition reads. The corresponding write barrier is in
82 * __klp_enable_patch().
84 * (Note that this barrier technically isn't needed in the disable
85 * path. In the rare case where klp_update_patch_state() runs before
86 * this handler, its TIF_PATCH_PENDING read and this func->transition
87 * read need to be ordered. But klp_update_patch_state() already
88 * enforces that.)
90 smp_rmb();
92 if (unlikely(func->transition)) {
95 * Enforce the order of the func->transition and
96 * current->patch_state reads. Otherwise we could read an
97 * out-of-date task state and pick the wrong function. The
98 * corresponding write barrier is in klp_init_transition().
100 smp_rmb();
102 patch_state = current->patch_state;
104 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
106 if (patch_state == KLP_UNPATCHED) {
108 * Use the previously patched version of the function.
109 * If no previous patches exist, continue with the
110 * original function.
112 func = list_entry_rcu(func->stack_node.next,
113 struct klp_func, stack_node);
115 if (&func->stack_node == &ops->func_stack)
116 goto unlock;
120 klp_arch_set_pc(regs, (unsigned long)func->new_func);
121 unlock:
122 preempt_enable_notrace();
126 * Convert a function address into the appropriate ftrace location.
128 * Usually this is just the address of the function, but on some architectures
129 * it's more complicated so allow them to provide a custom behaviour.
131 #ifndef klp_get_ftrace_location
132 static unsigned long klp_get_ftrace_location(unsigned long faddr)
134 return faddr;
136 #endif
138 static void klp_unpatch_func(struct klp_func *func)
140 struct klp_ops *ops;
142 if (WARN_ON(!func->patched))
143 return;
144 if (WARN_ON(!func->old_addr))
145 return;
147 ops = klp_find_ops(func->old_addr);
148 if (WARN_ON(!ops))
149 return;
151 if (list_is_singular(&ops->func_stack)) {
152 unsigned long ftrace_loc;
154 ftrace_loc = klp_get_ftrace_location(func->old_addr);
155 if (WARN_ON(!ftrace_loc))
156 return;
158 WARN_ON(unregister_ftrace_function(&ops->fops));
159 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
161 list_del_rcu(&func->stack_node);
162 list_del(&ops->node);
163 kfree(ops);
164 } else {
165 list_del_rcu(&func->stack_node);
168 func->patched = false;
171 static int klp_patch_func(struct klp_func *func)
173 struct klp_ops *ops;
174 int ret;
176 if (WARN_ON(!func->old_addr))
177 return -EINVAL;
179 if (WARN_ON(func->patched))
180 return -EINVAL;
182 ops = klp_find_ops(func->old_addr);
183 if (!ops) {
184 unsigned long ftrace_loc;
186 ftrace_loc = klp_get_ftrace_location(func->old_addr);
187 if (!ftrace_loc) {
188 pr_err("failed to find location for function '%s'\n",
189 func->old_name);
190 return -EINVAL;
193 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
194 if (!ops)
195 return -ENOMEM;
197 ops->fops.func = klp_ftrace_handler;
198 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
199 FTRACE_OPS_FL_DYNAMIC |
200 FTRACE_OPS_FL_IPMODIFY;
202 list_add(&ops->node, &klp_ops);
204 INIT_LIST_HEAD(&ops->func_stack);
205 list_add_rcu(&func->stack_node, &ops->func_stack);
207 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208 if (ret) {
209 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210 func->old_name, ret);
211 goto err;
214 ret = register_ftrace_function(&ops->fops);
215 if (ret) {
216 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217 func->old_name, ret);
218 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219 goto err;
223 } else {
224 list_add_rcu(&func->stack_node, &ops->func_stack);
227 func->patched = true;
229 return 0;
231 err:
232 list_del_rcu(&func->stack_node);
233 list_del(&ops->node);
234 kfree(ops);
235 return ret;
238 void klp_unpatch_object(struct klp_object *obj)
240 struct klp_func *func;
242 klp_for_each_func(obj, func)
243 if (func->patched)
244 klp_unpatch_func(func);
246 obj->patched = false;
249 int klp_patch_object(struct klp_object *obj)
251 struct klp_func *func;
252 int ret;
254 if (WARN_ON(obj->patched))
255 return -EINVAL;
257 klp_for_each_func(obj, func) {
258 ret = klp_patch_func(func);
259 if (ret) {
260 klp_unpatch_object(obj);
261 return ret;
264 obj->patched = true;
266 return 0;
269 void klp_unpatch_objects(struct klp_patch *patch)
271 struct klp_object *obj;
273 klp_for_each_object(patch, obj)
274 if (obj->patched)
275 klp_unpatch_object(obj);