percpu: pcpu_embed_first_chunk() should free unused parts after all allocs are complete
[linux-2.6.git] / kernel / jump_label.c
blob43049192b5ec9bcb57863b3729b3cb6ac18b8d0c
1 /*
2 * jump label support
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
7 */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
17 #ifdef HAVE_JUMP_LABEL
19 /* mutex to protect coming/going of the the jump_label table */
20 static DEFINE_MUTEX(jump_label_mutex);
22 void jump_label_lock(void)
24 mutex_lock(&jump_label_mutex);
27 void jump_label_unlock(void)
29 mutex_unlock(&jump_label_mutex);
32 static int jump_label_cmp(const void *a, const void *b)
34 const struct jump_entry *jea = a;
35 const struct jump_entry *jeb = b;
37 if (jea->key < jeb->key)
38 return -1;
40 if (jea->key > jeb->key)
41 return 1;
43 return 0;
46 static void
47 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49 unsigned long size;
51 size = (((unsigned long)stop - (unsigned long)start)
52 / sizeof(struct jump_entry));
53 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
56 static void jump_label_update(struct static_key *key, int enable);
58 void static_key_slow_inc(struct static_key *key)
60 if (atomic_inc_not_zero(&key->enabled))
61 return;
63 jump_label_lock();
64 if (atomic_read(&key->enabled) == 0) {
65 if (!jump_label_get_branch_default(key))
66 jump_label_update(key, JUMP_LABEL_ENABLE);
67 else
68 jump_label_update(key, JUMP_LABEL_DISABLE);
70 atomic_inc(&key->enabled);
71 jump_label_unlock();
73 EXPORT_SYMBOL_GPL(static_key_slow_inc);
75 static void __static_key_slow_dec(struct static_key *key,
76 unsigned long rate_limit, struct delayed_work *work)
78 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
79 WARN(atomic_read(&key->enabled) < 0,
80 "jump label: negative count!\n");
81 return;
84 if (rate_limit) {
85 atomic_inc(&key->enabled);
86 schedule_delayed_work(work, rate_limit);
87 } else {
88 if (!jump_label_get_branch_default(key))
89 jump_label_update(key, JUMP_LABEL_DISABLE);
90 else
91 jump_label_update(key, JUMP_LABEL_ENABLE);
93 jump_label_unlock();
96 static void jump_label_update_timeout(struct work_struct *work)
98 struct static_key_deferred *key =
99 container_of(work, struct static_key_deferred, work.work);
100 __static_key_slow_dec(&key->key, 0, NULL);
103 void static_key_slow_dec(struct static_key *key)
105 __static_key_slow_dec(key, 0, NULL);
107 EXPORT_SYMBOL_GPL(static_key_slow_dec);
109 void static_key_slow_dec_deferred(struct static_key_deferred *key)
111 __static_key_slow_dec(&key->key, key->timeout, &key->work);
113 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
115 void jump_label_rate_limit(struct static_key_deferred *key,
116 unsigned long rl)
118 key->timeout = rl;
119 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
122 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
124 if (entry->code <= (unsigned long)end &&
125 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
126 return 1;
128 return 0;
131 static int __jump_label_text_reserved(struct jump_entry *iter_start,
132 struct jump_entry *iter_stop, void *start, void *end)
134 struct jump_entry *iter;
136 iter = iter_start;
137 while (iter < iter_stop) {
138 if (addr_conflict(iter, start, end))
139 return 1;
140 iter++;
143 return 0;
147 * Update code which is definitely not currently executing.
148 * Architectures which need heavyweight synchronization to modify
149 * running code can override this to make the non-live update case
150 * cheaper.
152 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
153 enum jump_label_type type)
155 arch_jump_label_transform(entry, type);
158 static void __jump_label_update(struct static_key *key,
159 struct jump_entry *entry,
160 struct jump_entry *stop, int enable)
162 for (; (entry < stop) &&
163 (entry->key == (jump_label_t)(unsigned long)key);
164 entry++) {
166 * entry->code set to 0 invalidates module init text sections
167 * kernel_text_address() verifies we are not in core kernel
168 * init code, see jump_label_invalidate_module_init().
170 if (entry->code && kernel_text_address(entry->code))
171 arch_jump_label_transform(entry, enable);
175 static enum jump_label_type jump_label_type(struct static_key *key)
177 bool true_branch = jump_label_get_branch_default(key);
178 bool state = static_key_enabled(key);
180 if ((!true_branch && state) || (true_branch && !state))
181 return JUMP_LABEL_ENABLE;
183 return JUMP_LABEL_DISABLE;
186 void __init jump_label_init(void)
188 struct jump_entry *iter_start = __start___jump_table;
189 struct jump_entry *iter_stop = __stop___jump_table;
190 struct static_key *key = NULL;
191 struct jump_entry *iter;
193 jump_label_lock();
194 jump_label_sort_entries(iter_start, iter_stop);
196 for (iter = iter_start; iter < iter_stop; iter++) {
197 struct static_key *iterk;
199 iterk = (struct static_key *)(unsigned long)iter->key;
200 arch_jump_label_transform_static(iter, jump_label_type(iterk));
201 if (iterk == key)
202 continue;
204 key = iterk;
206 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
208 *((unsigned long *)&key->entries) += (unsigned long)iter;
209 #ifdef CONFIG_MODULES
210 key->next = NULL;
211 #endif
213 jump_label_unlock();
216 #ifdef CONFIG_MODULES
218 struct static_key_mod {
219 struct static_key_mod *next;
220 struct jump_entry *entries;
221 struct module *mod;
224 static int __jump_label_mod_text_reserved(void *start, void *end)
226 struct module *mod;
228 mod = __module_text_address((unsigned long)start);
229 if (!mod)
230 return 0;
232 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
234 return __jump_label_text_reserved(mod->jump_entries,
235 mod->jump_entries + mod->num_jump_entries,
236 start, end);
239 static void __jump_label_mod_update(struct static_key *key, int enable)
241 struct static_key_mod *mod = key->next;
243 while (mod) {
244 struct module *m = mod->mod;
246 __jump_label_update(key, mod->entries,
247 m->jump_entries + m->num_jump_entries,
248 enable);
249 mod = mod->next;
253 /***
254 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
255 * @mod: module to patch
257 * Allow for run-time selection of the optimal nops. Before the module
258 * loads patch these with arch_get_jump_label_nop(), which is specified by
259 * the arch specific jump label code.
261 void jump_label_apply_nops(struct module *mod)
263 struct jump_entry *iter_start = mod->jump_entries;
264 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
265 struct jump_entry *iter;
267 /* if the module doesn't have jump label entries, just return */
268 if (iter_start == iter_stop)
269 return;
271 for (iter = iter_start; iter < iter_stop; iter++) {
272 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
276 static int jump_label_add_module(struct module *mod)
278 struct jump_entry *iter_start = mod->jump_entries;
279 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
280 struct jump_entry *iter;
281 struct static_key *key = NULL;
282 struct static_key_mod *jlm;
284 /* if the module doesn't have jump label entries, just return */
285 if (iter_start == iter_stop)
286 return 0;
288 jump_label_sort_entries(iter_start, iter_stop);
290 for (iter = iter_start; iter < iter_stop; iter++) {
291 struct static_key *iterk;
293 iterk = (struct static_key *)(unsigned long)iter->key;
294 if (iterk == key)
295 continue;
297 key = iterk;
298 if (__module_address(iter->key) == mod) {
300 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
302 *((unsigned long *)&key->entries) += (unsigned long)iter;
303 key->next = NULL;
304 continue;
306 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
307 if (!jlm)
308 return -ENOMEM;
309 jlm->mod = mod;
310 jlm->entries = iter;
311 jlm->next = key->next;
312 key->next = jlm;
314 if (jump_label_type(key) == JUMP_LABEL_ENABLE)
315 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
318 return 0;
321 static void jump_label_del_module(struct module *mod)
323 struct jump_entry *iter_start = mod->jump_entries;
324 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
325 struct jump_entry *iter;
326 struct static_key *key = NULL;
327 struct static_key_mod *jlm, **prev;
329 for (iter = iter_start; iter < iter_stop; iter++) {
330 if (iter->key == (jump_label_t)(unsigned long)key)
331 continue;
333 key = (struct static_key *)(unsigned long)iter->key;
335 if (__module_address(iter->key) == mod)
336 continue;
338 prev = &key->next;
339 jlm = key->next;
341 while (jlm && jlm->mod != mod) {
342 prev = &jlm->next;
343 jlm = jlm->next;
346 if (jlm) {
347 *prev = jlm->next;
348 kfree(jlm);
353 static void jump_label_invalidate_module_init(struct module *mod)
355 struct jump_entry *iter_start = mod->jump_entries;
356 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
357 struct jump_entry *iter;
359 for (iter = iter_start; iter < iter_stop; iter++) {
360 if (within_module_init(iter->code, mod))
361 iter->code = 0;
365 static int
366 jump_label_module_notify(struct notifier_block *self, unsigned long val,
367 void *data)
369 struct module *mod = data;
370 int ret = 0;
372 switch (val) {
373 case MODULE_STATE_COMING:
374 jump_label_lock();
375 ret = jump_label_add_module(mod);
376 if (ret)
377 jump_label_del_module(mod);
378 jump_label_unlock();
379 break;
380 case MODULE_STATE_GOING:
381 jump_label_lock();
382 jump_label_del_module(mod);
383 jump_label_unlock();
384 break;
385 case MODULE_STATE_LIVE:
386 jump_label_lock();
387 jump_label_invalidate_module_init(mod);
388 jump_label_unlock();
389 break;
392 return notifier_from_errno(ret);
395 struct notifier_block jump_label_module_nb = {
396 .notifier_call = jump_label_module_notify,
397 .priority = 1, /* higher than tracepoints */
400 static __init int jump_label_init_module(void)
402 return register_module_notifier(&jump_label_module_nb);
404 early_initcall(jump_label_init_module);
406 #endif /* CONFIG_MODULES */
408 /***
409 * jump_label_text_reserved - check if addr range is reserved
410 * @start: start text addr
411 * @end: end text addr
413 * checks if the text addr located between @start and @end
414 * overlaps with any of the jump label patch addresses. Code
415 * that wants to modify kernel text should first verify that
416 * it does not overlap with any of the jump label addresses.
417 * Caller must hold jump_label_mutex.
419 * returns 1 if there is an overlap, 0 otherwise
421 int jump_label_text_reserved(void *start, void *end)
423 int ret = __jump_label_text_reserved(__start___jump_table,
424 __stop___jump_table, start, end);
426 if (ret)
427 return ret;
429 #ifdef CONFIG_MODULES
430 ret = __jump_label_mod_text_reserved(start, end);
431 #endif
432 return ret;
435 static void jump_label_update(struct static_key *key, int enable)
437 struct jump_entry *stop = __stop___jump_table;
438 struct jump_entry *entry = jump_label_get_entries(key);
440 #ifdef CONFIG_MODULES
441 struct module *mod = __module_address((unsigned long)key);
443 __jump_label_mod_update(key, enable);
445 if (mod)
446 stop = mod->jump_entries + mod->num_jump_entries;
447 #endif
448 /* if there are no users, entry can be NULL */
449 if (entry)
450 __jump_label_update(key, entry, stop, enable);
453 #endif