memcg: avoid deadlock caused by race between oom and cpuset_attach
[linux-2.6/mini2440.git] / kernel / tracepoint.c
blob79602740bbb5f396278dbe1665eada4c6f0259f0
1 /*
2 * Copyright (C) 2008 Mathieu Desnoyers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/types.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/rcupdate.h>
24 #include <linux/tracepoint.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
28 extern struct tracepoint __start___tracepoints[];
29 extern struct tracepoint __stop___tracepoints[];
31 /* Set to 1 to enable tracepoint debug output */
32 static const int tracepoint_debug;
35 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
36 * builtin and module tracepoints and the hash table.
38 static DEFINE_MUTEX(tracepoints_mutex);
41 * Tracepoint hash table, containing the active tracepoints.
42 * Protected by tracepoints_mutex.
44 #define TRACEPOINT_HASH_BITS 6
45 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
46 static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
49 * Note about RCU :
50 * It is used to to delay the free of multiple probes array until a quiescent
51 * state is reached.
52 * Tracepoint entries modifications are protected by the tracepoints_mutex.
54 struct tracepoint_entry {
55 struct hlist_node hlist;
56 void **funcs;
57 int refcount; /* Number of times armed. 0 if disarmed. */
58 char name[0];
61 struct tp_probes {
62 union {
63 struct rcu_head rcu;
64 struct list_head list;
65 } u;
66 void *probes[0];
69 static inline void *allocate_probes(int count)
71 struct tp_probes *p = kmalloc(count * sizeof(void *)
72 + sizeof(struct tp_probes), GFP_KERNEL);
73 return p == NULL ? NULL : p->probes;
76 static void rcu_free_old_probes(struct rcu_head *head)
78 kfree(container_of(head, struct tp_probes, u.rcu));
81 static inline void release_probes(void *old)
83 if (old) {
84 struct tp_probes *tp_probes = container_of(old,
85 struct tp_probes, probes[0]);
86 call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
90 static void debug_print_probes(struct tracepoint_entry *entry)
92 int i;
94 if (!tracepoint_debug || !entry->funcs)
95 return;
97 for (i = 0; entry->funcs[i]; i++)
98 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
101 static void *
102 tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
104 int nr_probes = 0;
105 void **old, **new;
107 WARN_ON(!probe);
109 debug_print_probes(entry);
110 old = entry->funcs;
111 if (old) {
112 /* (N -> N+1), (N != 0, 1) probes */
113 for (nr_probes = 0; old[nr_probes]; nr_probes++)
114 if (old[nr_probes] == probe)
115 return ERR_PTR(-EEXIST);
117 /* + 2 : one for new probe, one for NULL func */
118 new = allocate_probes(nr_probes + 2);
119 if (new == NULL)
120 return ERR_PTR(-ENOMEM);
121 if (old)
122 memcpy(new, old, nr_probes * sizeof(void *));
123 new[nr_probes] = probe;
124 new[nr_probes + 1] = NULL;
125 entry->refcount = nr_probes + 1;
126 entry->funcs = new;
127 debug_print_probes(entry);
128 return old;
131 static void *
132 tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
134 int nr_probes = 0, nr_del = 0, i;
135 void **old, **new;
137 old = entry->funcs;
139 if (!old)
140 return ERR_PTR(-ENOENT);
142 debug_print_probes(entry);
143 /* (N -> M), (N > 1, M >= 0) probes */
144 for (nr_probes = 0; old[nr_probes]; nr_probes++) {
145 if ((!probe || old[nr_probes] == probe))
146 nr_del++;
149 if (nr_probes - nr_del == 0) {
150 /* N -> 0, (N > 1) */
151 entry->funcs = NULL;
152 entry->refcount = 0;
153 debug_print_probes(entry);
154 return old;
155 } else {
156 int j = 0;
157 /* N -> M, (N > 1, M > 0) */
158 /* + 1 for NULL */
159 new = allocate_probes(nr_probes - nr_del + 1);
160 if (new == NULL)
161 return ERR_PTR(-ENOMEM);
162 for (i = 0; old[i]; i++)
163 if ((probe && old[i] != probe))
164 new[j++] = old[i];
165 new[nr_probes - nr_del] = NULL;
166 entry->refcount = nr_probes - nr_del;
167 entry->funcs = new;
169 debug_print_probes(entry);
170 return old;
174 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
175 * Must be called with tracepoints_mutex held.
176 * Returns NULL if not present.
178 static struct tracepoint_entry *get_tracepoint(const char *name)
180 struct hlist_head *head;
181 struct hlist_node *node;
182 struct tracepoint_entry *e;
183 u32 hash = jhash(name, strlen(name), 0);
185 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
186 hlist_for_each_entry(e, node, head, hlist) {
187 if (!strcmp(name, e->name))
188 return e;
190 return NULL;
194 * Add the tracepoint to the tracepoint hash table. Must be called with
195 * tracepoints_mutex held.
197 static struct tracepoint_entry *add_tracepoint(const char *name)
199 struct hlist_head *head;
200 struct hlist_node *node;
201 struct tracepoint_entry *e;
202 size_t name_len = strlen(name) + 1;
203 u32 hash = jhash(name, name_len-1, 0);
205 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
206 hlist_for_each_entry(e, node, head, hlist) {
207 if (!strcmp(name, e->name)) {
208 printk(KERN_NOTICE
209 "tracepoint %s busy\n", name);
210 return ERR_PTR(-EEXIST); /* Already there */
214 * Using kmalloc here to allocate a variable length element. Could
215 * cause some memory fragmentation if overused.
217 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
218 if (!e)
219 return ERR_PTR(-ENOMEM);
220 memcpy(&e->name[0], name, name_len);
221 e->funcs = NULL;
222 e->refcount = 0;
223 hlist_add_head(&e->hlist, head);
224 return e;
228 * Remove the tracepoint from the tracepoint hash table. Must be called with
229 * mutex_lock held.
231 static inline void remove_tracepoint(struct tracepoint_entry *e)
233 hlist_del(&e->hlist);
234 kfree(e);
238 * Sets the probe callback corresponding to one tracepoint.
240 static void set_tracepoint(struct tracepoint_entry **entry,
241 struct tracepoint *elem, int active)
243 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
246 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
247 * probe callbacks array is consistent before setting a pointer to it.
248 * This array is referenced by __DO_TRACE from
249 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
250 * is used.
252 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
253 elem->state = active;
257 * Disable a tracepoint and its probe callback.
258 * Note: only waiting an RCU period after setting elem->call to the empty
259 * function insures that the original callback is not used anymore. This insured
260 * by preempt_disable around the call site.
262 static void disable_tracepoint(struct tracepoint *elem)
264 elem->state = 0;
265 rcu_assign_pointer(elem->funcs, NULL);
269 * tracepoint_update_probe_range - Update a probe range
270 * @begin: beginning of the range
271 * @end: end of the range
273 * Updates the probe callback corresponding to a range of tracepoints.
275 void tracepoint_update_probe_range(struct tracepoint *begin,
276 struct tracepoint *end)
278 struct tracepoint *iter;
279 struct tracepoint_entry *mark_entry;
281 mutex_lock(&tracepoints_mutex);
282 for (iter = begin; iter < end; iter++) {
283 mark_entry = get_tracepoint(iter->name);
284 if (mark_entry) {
285 set_tracepoint(&mark_entry, iter,
286 !!mark_entry->refcount);
287 } else {
288 disable_tracepoint(iter);
291 mutex_unlock(&tracepoints_mutex);
295 * Update probes, removing the faulty probes.
297 static void tracepoint_update_probes(void)
299 /* Core kernel tracepoints */
300 tracepoint_update_probe_range(__start___tracepoints,
301 __stop___tracepoints);
302 /* tracepoints in modules. */
303 module_update_tracepoints();
306 static void *tracepoint_add_probe(const char *name, void *probe)
308 struct tracepoint_entry *entry;
309 void *old;
311 entry = get_tracepoint(name);
312 if (!entry) {
313 entry = add_tracepoint(name);
314 if (IS_ERR(entry))
315 return entry;
317 old = tracepoint_entry_add_probe(entry, probe);
318 if (IS_ERR(old) && !entry->refcount)
319 remove_tracepoint(entry);
320 return old;
324 * tracepoint_probe_register - Connect a probe to a tracepoint
325 * @name: tracepoint name
326 * @probe: probe handler
328 * Returns 0 if ok, error value on error.
329 * The probe address must at least be aligned on the architecture pointer size.
331 int tracepoint_probe_register(const char *name, void *probe)
333 void *old;
335 mutex_lock(&tracepoints_mutex);
336 old = tracepoint_add_probe(name, probe);
337 mutex_unlock(&tracepoints_mutex);
338 if (IS_ERR(old))
339 return PTR_ERR(old);
341 tracepoint_update_probes(); /* may update entry */
342 release_probes(old);
343 return 0;
345 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
347 static void *tracepoint_remove_probe(const char *name, void *probe)
349 struct tracepoint_entry *entry;
350 void *old;
352 entry = get_tracepoint(name);
353 if (!entry)
354 return ERR_PTR(-ENOENT);
355 old = tracepoint_entry_remove_probe(entry, probe);
356 if (IS_ERR(old))
357 return old;
358 if (!entry->refcount)
359 remove_tracepoint(entry);
360 return old;
364 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
365 * @name: tracepoint name
366 * @probe: probe function pointer
368 * We do not need to call a synchronize_sched to make sure the probes have
369 * finished running before doing a module unload, because the module unload
370 * itself uses stop_machine(), which insures that every preempt disabled section
371 * have finished.
373 int tracepoint_probe_unregister(const char *name, void *probe)
375 void *old;
377 mutex_lock(&tracepoints_mutex);
378 old = tracepoint_remove_probe(name, probe);
379 mutex_unlock(&tracepoints_mutex);
380 if (IS_ERR(old))
381 return PTR_ERR(old);
383 tracepoint_update_probes(); /* may update entry */
384 release_probes(old);
385 return 0;
387 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
389 static LIST_HEAD(old_probes);
390 static int need_update;
392 static void tracepoint_add_old_probes(void *old)
394 need_update = 1;
395 if (old) {
396 struct tp_probes *tp_probes = container_of(old,
397 struct tp_probes, probes[0]);
398 list_add(&tp_probes->u.list, &old_probes);
403 * tracepoint_probe_register_noupdate - register a probe but not connect
404 * @name: tracepoint name
405 * @probe: probe handler
407 * caller must call tracepoint_probe_update_all()
409 int tracepoint_probe_register_noupdate(const char *name, void *probe)
411 void *old;
413 mutex_lock(&tracepoints_mutex);
414 old = tracepoint_add_probe(name, probe);
415 if (IS_ERR(old)) {
416 mutex_unlock(&tracepoints_mutex);
417 return PTR_ERR(old);
419 tracepoint_add_old_probes(old);
420 mutex_unlock(&tracepoints_mutex);
421 return 0;
423 EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
426 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
427 * @name: tracepoint name
428 * @probe: probe function pointer
430 * caller must call tracepoint_probe_update_all()
432 int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
434 void *old;
436 mutex_lock(&tracepoints_mutex);
437 old = tracepoint_remove_probe(name, probe);
438 if (IS_ERR(old)) {
439 mutex_unlock(&tracepoints_mutex);
440 return PTR_ERR(old);
442 tracepoint_add_old_probes(old);
443 mutex_unlock(&tracepoints_mutex);
444 return 0;
446 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
449 * tracepoint_probe_update_all - update tracepoints
451 void tracepoint_probe_update_all(void)
453 LIST_HEAD(release_probes);
454 struct tp_probes *pos, *next;
456 mutex_lock(&tracepoints_mutex);
457 if (!need_update) {
458 mutex_unlock(&tracepoints_mutex);
459 return;
461 if (!list_empty(&old_probes))
462 list_replace_init(&old_probes, &release_probes);
463 need_update = 0;
464 mutex_unlock(&tracepoints_mutex);
466 tracepoint_update_probes();
467 list_for_each_entry_safe(pos, next, &release_probes, u.list) {
468 list_del(&pos->u.list);
469 call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
472 EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
475 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
476 * @tracepoint: current tracepoints (in), next tracepoint (out)
477 * @begin: beginning of the range
478 * @end: end of the range
480 * Returns whether a next tracepoint has been found (1) or not (0).
481 * Will return the first tracepoint in the range if the input tracepoint is
482 * NULL.
484 int tracepoint_get_iter_range(struct tracepoint **tracepoint,
485 struct tracepoint *begin, struct tracepoint *end)
487 if (!*tracepoint && begin != end) {
488 *tracepoint = begin;
489 return 1;
491 if (*tracepoint >= begin && *tracepoint < end)
492 return 1;
493 return 0;
495 EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
497 static void tracepoint_get_iter(struct tracepoint_iter *iter)
499 int found = 0;
501 /* Core kernel tracepoints */
502 if (!iter->module) {
503 found = tracepoint_get_iter_range(&iter->tracepoint,
504 __start___tracepoints, __stop___tracepoints);
505 if (found)
506 goto end;
508 /* tracepoints in modules. */
509 found = module_get_iter_tracepoints(iter);
510 end:
511 if (!found)
512 tracepoint_iter_reset(iter);
515 void tracepoint_iter_start(struct tracepoint_iter *iter)
517 tracepoint_get_iter(iter);
519 EXPORT_SYMBOL_GPL(tracepoint_iter_start);
521 void tracepoint_iter_next(struct tracepoint_iter *iter)
523 iter->tracepoint++;
525 * iter->tracepoint may be invalid because we blindly incremented it.
526 * Make sure it is valid by marshalling on the tracepoints, getting the
527 * tracepoints from following modules if necessary.
529 tracepoint_get_iter(iter);
531 EXPORT_SYMBOL_GPL(tracepoint_iter_next);
533 void tracepoint_iter_stop(struct tracepoint_iter *iter)
536 EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
538 void tracepoint_iter_reset(struct tracepoint_iter *iter)
540 iter->module = NULL;
541 iter->tracepoint = NULL;
543 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
545 #ifdef CONFIG_MODULES
547 int tracepoint_module_notify(struct notifier_block *self,
548 unsigned long val, void *data)
550 struct module *mod = data;
552 switch (val) {
553 case MODULE_STATE_COMING:
554 tracepoint_update_probe_range(mod->tracepoints,
555 mod->tracepoints + mod->num_tracepoints);
556 break;
557 case MODULE_STATE_GOING:
558 tracepoint_update_probe_range(mod->tracepoints,
559 mod->tracepoints + mod->num_tracepoints);
560 break;
562 return 0;
565 struct notifier_block tracepoint_module_nb = {
566 .notifier_call = tracepoint_module_notify,
567 .priority = 0,
570 static int init_tracepoints(void)
572 return register_module_notifier(&tracepoint_module_nb);
574 __initcall(init_tracepoints);
576 #endif /* CONFIG_MODULES */