kmemleak: Do not report new leaked objects if the scanning was stopped
[linux-2.6/btrfs-unstable.git] / mm / kmemleak.c
blobe094c4dbdf55bf3ffe44eb43bb0772903d8d2103
1 /*
2 * mm/kmemleak.c
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
24 * Notes on locking
25 * ----------------
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/module.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
75 #include <linux/gfp.h>
76 #include <linux/fs.h>
77 #include <linux/debugfs.h>
78 #include <linux/seq_file.h>
79 #include <linux/cpumask.h>
80 #include <linux/spinlock.h>
81 #include <linux/mutex.h>
82 #include <linux/rcupdate.h>
83 #include <linux/stacktrace.h>
84 #include <linux/cache.h>
85 #include <linux/percpu.h>
86 #include <linux/hardirq.h>
87 #include <linux/mmzone.h>
88 #include <linux/slab.h>
89 #include <linux/thread_info.h>
90 #include <linux/err.h>
91 #include <linux/uaccess.h>
92 #include <linux/string.h>
93 #include <linux/nodemask.h>
94 #include <linux/mm.h>
96 #include <asm/sections.h>
97 #include <asm/processor.h>
98 #include <asm/atomic.h>
100 #include <linux/kmemleak.h>
103 * Kmemleak configuration and common defines.
105 #define MAX_TRACE 16 /* stack trace length */
106 #define REPORTS_NR 50 /* maximum number of reported leaks */
107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
108 #define MSECS_SCAN_YIELD 10 /* CPU yielding period */
109 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
110 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
112 #define BYTES_PER_POINTER sizeof(void *)
114 /* GFP bitmask for kmemleak internal allocations */
115 #define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long offset;
121 size_t length;
125 * Structure holding the metadata for each allocated memory block.
126 * Modifications to such objects should be made while holding the
127 * object->lock. Insertions or deletions from object_list, gray_list or
128 * tree_node are already protected by the corresponding locks or mutex (see
129 * the notes on locking above). These objects are reference-counted
130 * (use_count) and freed using the RCU mechanism.
132 struct kmemleak_object {
133 spinlock_t lock;
134 unsigned long flags; /* object status flags */
135 struct list_head object_list;
136 struct list_head gray_list;
137 struct prio_tree_node tree_node;
138 struct rcu_head rcu; /* object_list lockless traversal */
139 /* object usage count; object freed when use_count == 0 */
140 atomic_t use_count;
141 unsigned long pointer;
142 size_t size;
143 /* minimum number of a pointers found before it is considered leak */
144 int min_count;
145 /* the total number of pointers found pointing to this object */
146 int count;
147 /* memory ranges to be scanned inside an object (empty for all) */
148 struct hlist_head area_list;
149 unsigned long trace[MAX_TRACE];
150 unsigned int trace_len;
151 unsigned long jiffies; /* creation timestamp */
152 pid_t pid; /* pid of the current task */
153 char comm[TASK_COMM_LEN]; /* executable name */
156 /* flag representing the memory block allocation status */
157 #define OBJECT_ALLOCATED (1 << 0)
158 /* flag set after the first reporting of an unreference object */
159 #define OBJECT_REPORTED (1 << 1)
160 /* flag set to not scan the object */
161 #define OBJECT_NO_SCAN (1 << 2)
163 /* the list of all allocated objects */
164 static LIST_HEAD(object_list);
165 /* the list of gray-colored objects (see color_gray comment below) */
166 static LIST_HEAD(gray_list);
167 /* prio search tree for object boundaries */
168 static struct prio_tree_root object_tree_root;
169 /* rw_lock protecting the access to object_list and prio_tree_root */
170 static DEFINE_RWLOCK(kmemleak_lock);
172 /* allocation caches for kmemleak internal data */
173 static struct kmem_cache *object_cache;
174 static struct kmem_cache *scan_area_cache;
176 /* set if tracing memory operations is enabled */
177 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
178 /* set in the late_initcall if there were no errors */
179 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
180 /* enables or disables early logging of the memory operations */
181 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
182 /* set if a fata kmemleak error has occurred */
183 static atomic_t kmemleak_error = ATOMIC_INIT(0);
185 /* minimum and maximum address that may be valid pointers */
186 static unsigned long min_addr = ULONG_MAX;
187 static unsigned long max_addr;
189 /* used for yielding the CPU to other tasks during scanning */
190 static unsigned long next_scan_yield;
191 static struct task_struct *scan_thread;
192 static unsigned long jiffies_scan_yield;
193 /* used to avoid reporting of recently allocated objects */
194 static unsigned long jiffies_min_age;
195 static unsigned long jiffies_last_scan;
196 /* delay between automatic memory scannings */
197 static signed long jiffies_scan_wait;
198 /* enables or disables the task stacks scanning */
199 static int kmemleak_stack_scan = 1;
200 /* protects the memory scanning, parameters and debug/kmemleak file access */
201 static DEFINE_MUTEX(scan_mutex);
203 /* number of leaks reported (for limitation purposes) */
204 static int reported_leaks;
207 * Early object allocation/freeing logging. Kmemleak is initialized after the
208 * kernel allocator. However, both the kernel allocator and kmemleak may
209 * allocate memory blocks which need to be tracked. Kmemleak defines an
210 * arbitrary buffer to hold the allocation/freeing information before it is
211 * fully initialized.
214 /* kmemleak operation type for early logging */
215 enum {
216 KMEMLEAK_ALLOC,
217 KMEMLEAK_FREE,
218 KMEMLEAK_NOT_LEAK,
219 KMEMLEAK_IGNORE,
220 KMEMLEAK_SCAN_AREA,
221 KMEMLEAK_NO_SCAN
225 * Structure holding the information passed to kmemleak callbacks during the
226 * early logging.
228 struct early_log {
229 int op_type; /* kmemleak operation type */
230 const void *ptr; /* allocated/freed memory block */
231 size_t size; /* memory block size */
232 int min_count; /* minimum reference count */
233 unsigned long offset; /* scan area offset */
234 size_t length; /* scan area length */
237 /* early logging buffer and current position */
238 static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
239 static int crt_early_log;
241 static void kmemleak_disable(void);
244 * Print a warning and dump the stack trace.
246 #define kmemleak_warn(x...) do { \
247 pr_warning(x); \
248 dump_stack(); \
249 } while (0)
252 * Macro invoked when a serious kmemleak condition occured and cannot be
253 * recovered from. Kmemleak will be disabled and further allocation/freeing
254 * tracing no longer available.
256 #define kmemleak_stop(x...) do { \
257 kmemleak_warn(x); \
258 kmemleak_disable(); \
259 } while (0)
262 * Object colors, encoded with count and min_count:
263 * - white - orphan object, not enough references to it (count < min_count)
264 * - gray - not orphan, not marked as false positive (min_count == 0) or
265 * sufficient references to it (count >= min_count)
266 * - black - ignore, it doesn't contain references (e.g. text section)
267 * (min_count == -1). No function defined for this color.
268 * Newly created objects don't have any color assigned (object->count == -1)
269 * before the next memory scan when they become white.
271 static int color_white(const struct kmemleak_object *object)
273 return object->count != -1 && object->count < object->min_count;
276 static int color_gray(const struct kmemleak_object *object)
278 return object->min_count != -1 && object->count >= object->min_count;
282 * Objects are considered unreferenced only if their color is white, they have
283 * not be deleted and have a minimum age to avoid false positives caused by
284 * pointers temporarily stored in CPU registers.
286 static int unreferenced_object(struct kmemleak_object *object)
288 return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
289 time_before_eq(object->jiffies + jiffies_min_age,
290 jiffies_last_scan);
294 * Printing of the unreferenced objects information to the seq file. The
295 * print_unreferenced function must be called with the object->lock held.
297 static void print_unreferenced(struct seq_file *seq,
298 struct kmemleak_object *object)
300 int i;
302 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
303 object->pointer, object->size);
304 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
305 object->comm, object->pid, object->jiffies);
306 seq_printf(seq, " backtrace:\n");
308 for (i = 0; i < object->trace_len; i++) {
309 void *ptr = (void *)object->trace[i];
310 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
315 * Print the kmemleak_object information. This function is used mainly for
316 * debugging special cases when kmemleak operations. It must be called with
317 * the object->lock held.
319 static void dump_object_info(struct kmemleak_object *object)
321 struct stack_trace trace;
323 trace.nr_entries = object->trace_len;
324 trace.entries = object->trace;
326 pr_notice("Object 0x%08lx (size %zu):\n",
327 object->tree_node.start, object->size);
328 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
329 object->comm, object->pid, object->jiffies);
330 pr_notice(" min_count = %d\n", object->min_count);
331 pr_notice(" count = %d\n", object->count);
332 pr_notice(" backtrace:\n");
333 print_stack_trace(&trace, 4);
337 * Look-up a memory block metadata (kmemleak_object) in the priority search
338 * tree based on a pointer value. If alias is 0, only values pointing to the
339 * beginning of the memory block are allowed. The kmemleak_lock must be held
340 * when calling this function.
342 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
344 struct prio_tree_node *node;
345 struct prio_tree_iter iter;
346 struct kmemleak_object *object;
348 prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
349 node = prio_tree_next(&iter);
350 if (node) {
351 object = prio_tree_entry(node, struct kmemleak_object,
352 tree_node);
353 if (!alias && object->pointer != ptr) {
354 kmemleak_warn("Found object by alias");
355 object = NULL;
357 } else
358 object = NULL;
360 return object;
364 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
365 * that once an object's use_count reached 0, the RCU freeing was already
366 * registered and the object should no longer be used. This function must be
367 * called under the protection of rcu_read_lock().
369 static int get_object(struct kmemleak_object *object)
371 return atomic_inc_not_zero(&object->use_count);
375 * RCU callback to free a kmemleak_object.
377 static void free_object_rcu(struct rcu_head *rcu)
379 struct hlist_node *elem, *tmp;
380 struct kmemleak_scan_area *area;
381 struct kmemleak_object *object =
382 container_of(rcu, struct kmemleak_object, rcu);
385 * Once use_count is 0 (guaranteed by put_object), there is no other
386 * code accessing this object, hence no need for locking.
388 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
389 hlist_del(elem);
390 kmem_cache_free(scan_area_cache, area);
392 kmem_cache_free(object_cache, object);
396 * Decrement the object use_count. Once the count is 0, free the object using
397 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
398 * delete_object() path, the delayed RCU freeing ensures that there is no
399 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
400 * is also possible.
402 static void put_object(struct kmemleak_object *object)
404 if (!atomic_dec_and_test(&object->use_count))
405 return;
407 /* should only get here after delete_object was called */
408 WARN_ON(object->flags & OBJECT_ALLOCATED);
410 call_rcu(&object->rcu, free_object_rcu);
414 * Look up an object in the prio search tree and increase its use_count.
416 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
418 unsigned long flags;
419 struct kmemleak_object *object = NULL;
421 rcu_read_lock();
422 read_lock_irqsave(&kmemleak_lock, flags);
423 if (ptr >= min_addr && ptr < max_addr)
424 object = lookup_object(ptr, alias);
425 read_unlock_irqrestore(&kmemleak_lock, flags);
427 /* check whether the object is still available */
428 if (object && !get_object(object))
429 object = NULL;
430 rcu_read_unlock();
432 return object;
436 * Create the metadata (struct kmemleak_object) corresponding to an allocated
437 * memory block and add it to the object_list and object_tree_root.
439 static void create_object(unsigned long ptr, size_t size, int min_count,
440 gfp_t gfp)
442 unsigned long flags;
443 struct kmemleak_object *object;
444 struct prio_tree_node *node;
445 struct stack_trace trace;
447 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
448 if (!object) {
449 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
450 return;
453 INIT_LIST_HEAD(&object->object_list);
454 INIT_LIST_HEAD(&object->gray_list);
455 INIT_HLIST_HEAD(&object->area_list);
456 spin_lock_init(&object->lock);
457 atomic_set(&object->use_count, 1);
458 object->flags = OBJECT_ALLOCATED;
459 object->pointer = ptr;
460 object->size = size;
461 object->min_count = min_count;
462 object->count = -1; /* no color initially */
463 object->jiffies = jiffies;
465 /* task information */
466 if (in_irq()) {
467 object->pid = 0;
468 strncpy(object->comm, "hardirq", sizeof(object->comm));
469 } else if (in_softirq()) {
470 object->pid = 0;
471 strncpy(object->comm, "softirq", sizeof(object->comm));
472 } else {
473 object->pid = current->pid;
475 * There is a small chance of a race with set_task_comm(),
476 * however using get_task_comm() here may cause locking
477 * dependency issues with current->alloc_lock. In the worst
478 * case, the command line is not correct.
480 strncpy(object->comm, current->comm, sizeof(object->comm));
483 /* kernel backtrace */
484 trace.max_entries = MAX_TRACE;
485 trace.nr_entries = 0;
486 trace.entries = object->trace;
487 trace.skip = 1;
488 save_stack_trace(&trace);
489 object->trace_len = trace.nr_entries;
491 INIT_PRIO_TREE_NODE(&object->tree_node);
492 object->tree_node.start = ptr;
493 object->tree_node.last = ptr + size - 1;
495 write_lock_irqsave(&kmemleak_lock, flags);
496 min_addr = min(min_addr, ptr);
497 max_addr = max(max_addr, ptr + size);
498 node = prio_tree_insert(&object_tree_root, &object->tree_node);
500 * The code calling the kernel does not yet have the pointer to the
501 * memory block to be able to free it. However, we still hold the
502 * kmemleak_lock here in case parts of the kernel started freeing
503 * random memory blocks.
505 if (node != &object->tree_node) {
506 unsigned long flags;
508 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
509 "(already existing)\n", ptr);
510 object = lookup_object(ptr, 1);
511 spin_lock_irqsave(&object->lock, flags);
512 dump_object_info(object);
513 spin_unlock_irqrestore(&object->lock, flags);
515 goto out;
517 list_add_tail_rcu(&object->object_list, &object_list);
518 out:
519 write_unlock_irqrestore(&kmemleak_lock, flags);
523 * Remove the metadata (struct kmemleak_object) for a memory block from the
524 * object_list and object_tree_root and decrement its use_count.
526 static void delete_object(unsigned long ptr)
528 unsigned long flags;
529 struct kmemleak_object *object;
531 write_lock_irqsave(&kmemleak_lock, flags);
532 object = lookup_object(ptr, 0);
533 if (!object) {
534 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
535 ptr);
536 write_unlock_irqrestore(&kmemleak_lock, flags);
537 return;
539 prio_tree_remove(&object_tree_root, &object->tree_node);
540 list_del_rcu(&object->object_list);
541 write_unlock_irqrestore(&kmemleak_lock, flags);
543 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
544 WARN_ON(atomic_read(&object->use_count) < 1);
547 * Locking here also ensures that the corresponding memory block
548 * cannot be freed when it is being scanned.
550 spin_lock_irqsave(&object->lock, flags);
551 object->flags &= ~OBJECT_ALLOCATED;
552 spin_unlock_irqrestore(&object->lock, flags);
553 put_object(object);
557 * Make a object permanently as gray-colored so that it can no longer be
558 * reported as a leak. This is used in general to mark a false positive.
560 static void make_gray_object(unsigned long ptr)
562 unsigned long flags;
563 struct kmemleak_object *object;
565 object = find_and_get_object(ptr, 0);
566 if (!object) {
567 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
568 return;
571 spin_lock_irqsave(&object->lock, flags);
572 object->min_count = 0;
573 spin_unlock_irqrestore(&object->lock, flags);
574 put_object(object);
578 * Mark the object as black-colored so that it is ignored from scans and
579 * reporting.
581 static void make_black_object(unsigned long ptr)
583 unsigned long flags;
584 struct kmemleak_object *object;
586 object = find_and_get_object(ptr, 0);
587 if (!object) {
588 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
589 return;
592 spin_lock_irqsave(&object->lock, flags);
593 object->min_count = -1;
594 spin_unlock_irqrestore(&object->lock, flags);
595 put_object(object);
599 * Add a scanning area to the object. If at least one such area is added,
600 * kmemleak will only scan these ranges rather than the whole memory block.
602 static void add_scan_area(unsigned long ptr, unsigned long offset,
603 size_t length, gfp_t gfp)
605 unsigned long flags;
606 struct kmemleak_object *object;
607 struct kmemleak_scan_area *area;
609 object = find_and_get_object(ptr, 0);
610 if (!object) {
611 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
612 ptr);
613 return;
616 area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
617 if (!area) {
618 kmemleak_warn("Cannot allocate a scan area\n");
619 goto out;
622 spin_lock_irqsave(&object->lock, flags);
623 if (offset + length > object->size) {
624 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
625 dump_object_info(object);
626 kmem_cache_free(scan_area_cache, area);
627 goto out_unlock;
630 INIT_HLIST_NODE(&area->node);
631 area->offset = offset;
632 area->length = length;
634 hlist_add_head(&area->node, &object->area_list);
635 out_unlock:
636 spin_unlock_irqrestore(&object->lock, flags);
637 out:
638 put_object(object);
642 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
643 * pointer. Such object will not be scanned by kmemleak but references to it
644 * are searched.
646 static void object_no_scan(unsigned long ptr)
648 unsigned long flags;
649 struct kmemleak_object *object;
651 object = find_and_get_object(ptr, 0);
652 if (!object) {
653 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
654 return;
657 spin_lock_irqsave(&object->lock, flags);
658 object->flags |= OBJECT_NO_SCAN;
659 spin_unlock_irqrestore(&object->lock, flags);
660 put_object(object);
664 * Log an early kmemleak_* call to the early_log buffer. These calls will be
665 * processed later once kmemleak is fully initialized.
667 static void log_early(int op_type, const void *ptr, size_t size,
668 int min_count, unsigned long offset, size_t length)
670 unsigned long flags;
671 struct early_log *log;
673 if (crt_early_log >= ARRAY_SIZE(early_log)) {
674 pr_warning("Early log buffer exceeded\n");
675 kmemleak_disable();
676 return;
680 * There is no need for locking since the kernel is still in UP mode
681 * at this stage. Disabling the IRQs is enough.
683 local_irq_save(flags);
684 log = &early_log[crt_early_log];
685 log->op_type = op_type;
686 log->ptr = ptr;
687 log->size = size;
688 log->min_count = min_count;
689 log->offset = offset;
690 log->length = length;
691 crt_early_log++;
692 local_irq_restore(flags);
696 * Memory allocation function callback. This function is called from the
697 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
698 * vmalloc etc.).
700 void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
702 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
704 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
705 create_object((unsigned long)ptr, size, min_count, gfp);
706 else if (atomic_read(&kmemleak_early_log))
707 log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
709 EXPORT_SYMBOL_GPL(kmemleak_alloc);
712 * Memory freeing function callback. This function is called from the kernel
713 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
715 void kmemleak_free(const void *ptr)
717 pr_debug("%s(0x%p)\n", __func__, ptr);
719 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
720 delete_object((unsigned long)ptr);
721 else if (atomic_read(&kmemleak_early_log))
722 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
724 EXPORT_SYMBOL_GPL(kmemleak_free);
727 * Mark an already allocated memory block as a false positive. This will cause
728 * the block to no longer be reported as leak and always be scanned.
730 void kmemleak_not_leak(const void *ptr)
732 pr_debug("%s(0x%p)\n", __func__, ptr);
734 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
735 make_gray_object((unsigned long)ptr);
736 else if (atomic_read(&kmemleak_early_log))
737 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
739 EXPORT_SYMBOL(kmemleak_not_leak);
742 * Ignore a memory block. This is usually done when it is known that the
743 * corresponding block is not a leak and does not contain any references to
744 * other allocated memory blocks.
746 void kmemleak_ignore(const void *ptr)
748 pr_debug("%s(0x%p)\n", __func__, ptr);
750 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
751 make_black_object((unsigned long)ptr);
752 else if (atomic_read(&kmemleak_early_log))
753 log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
755 EXPORT_SYMBOL(kmemleak_ignore);
758 * Limit the range to be scanned in an allocated memory block.
760 void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
761 gfp_t gfp)
763 pr_debug("%s(0x%p)\n", __func__, ptr);
765 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
766 add_scan_area((unsigned long)ptr, offset, length, gfp);
767 else if (atomic_read(&kmemleak_early_log))
768 log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
770 EXPORT_SYMBOL(kmemleak_scan_area);
773 * Inform kmemleak not to scan the given memory block.
775 void kmemleak_no_scan(const void *ptr)
777 pr_debug("%s(0x%p)\n", __func__, ptr);
779 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
780 object_no_scan((unsigned long)ptr);
781 else if (atomic_read(&kmemleak_early_log))
782 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
784 EXPORT_SYMBOL(kmemleak_no_scan);
787 * Yield the CPU so that other tasks get a chance to run. The yielding is
788 * rate-limited to avoid excessive number of calls to the schedule() function
789 * during memory scanning.
791 static void scan_yield(void)
793 might_sleep();
795 if (time_is_before_eq_jiffies(next_scan_yield)) {
796 schedule();
797 next_scan_yield = jiffies + jiffies_scan_yield;
802 * Memory scanning is a long process and it needs to be interruptable. This
803 * function checks whether such interrupt condition occured.
805 static int scan_should_stop(void)
807 if (!atomic_read(&kmemleak_enabled))
808 return 1;
811 * This function may be called from either process or kthread context,
812 * hence the need to check for both stop conditions.
814 if (current->mm)
815 return signal_pending(current);
816 else
817 return kthread_should_stop();
819 return 0;
823 * Scan a memory block (exclusive range) for valid pointers and add those
824 * found to the gray list.
826 static void scan_block(void *_start, void *_end,
827 struct kmemleak_object *scanned)
829 unsigned long *ptr;
830 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
831 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
833 for (ptr = start; ptr < end; ptr++) {
834 unsigned long flags;
835 unsigned long pointer = *ptr;
836 struct kmemleak_object *object;
838 if (scan_should_stop())
839 break;
842 * When scanning a memory block with a corresponding
843 * kmemleak_object, the CPU yielding is handled in the calling
844 * code since it holds the object->lock to avoid the block
845 * freeing.
847 if (!scanned)
848 scan_yield();
850 object = find_and_get_object(pointer, 1);
851 if (!object)
852 continue;
853 if (object == scanned) {
854 /* self referenced, ignore */
855 put_object(object);
856 continue;
860 * Avoid the lockdep recursive warning on object->lock being
861 * previously acquired in scan_object(). These locks are
862 * enclosed by scan_mutex.
864 spin_lock_irqsave_nested(&object->lock, flags,
865 SINGLE_DEPTH_NESTING);
866 if (!color_white(object)) {
867 /* non-orphan, ignored or new */
868 spin_unlock_irqrestore(&object->lock, flags);
869 put_object(object);
870 continue;
874 * Increase the object's reference count (number of pointers
875 * to the memory block). If this count reaches the required
876 * minimum, the object's color will become gray and it will be
877 * added to the gray_list.
879 object->count++;
880 if (color_gray(object))
881 list_add_tail(&object->gray_list, &gray_list);
882 else
883 put_object(object);
884 spin_unlock_irqrestore(&object->lock, flags);
889 * Scan a memory block corresponding to a kmemleak_object. A condition is
890 * that object->use_count >= 1.
892 static void scan_object(struct kmemleak_object *object)
894 struct kmemleak_scan_area *area;
895 struct hlist_node *elem;
896 unsigned long flags;
899 * Once the object->lock is aquired, the corresponding memory block
900 * cannot be freed (the same lock is aquired in delete_object).
902 spin_lock_irqsave(&object->lock, flags);
903 if (object->flags & OBJECT_NO_SCAN)
904 goto out;
905 if (!(object->flags & OBJECT_ALLOCATED))
906 /* already freed object */
907 goto out;
908 if (hlist_empty(&object->area_list))
909 scan_block((void *)object->pointer,
910 (void *)(object->pointer + object->size), object);
911 else
912 hlist_for_each_entry(area, elem, &object->area_list, node)
913 scan_block((void *)(object->pointer + area->offset),
914 (void *)(object->pointer + area->offset
915 + area->length), object);
916 out:
917 spin_unlock_irqrestore(&object->lock, flags);
921 * Scan data sections and all the referenced memory blocks allocated via the
922 * kernel's standard allocators. This function must be called with the
923 * scan_mutex held.
925 static void kmemleak_scan(void)
927 unsigned long flags;
928 struct kmemleak_object *object, *tmp;
929 struct task_struct *task;
930 int i;
931 int new_leaks = 0;
933 jiffies_last_scan = jiffies;
935 /* prepare the kmemleak_object's */
936 rcu_read_lock();
937 list_for_each_entry_rcu(object, &object_list, object_list) {
938 spin_lock_irqsave(&object->lock, flags);
939 #ifdef DEBUG
941 * With a few exceptions there should be a maximum of
942 * 1 reference to any object at this point.
944 if (atomic_read(&object->use_count) > 1) {
945 pr_debug("object->use_count = %d\n",
946 atomic_read(&object->use_count));
947 dump_object_info(object);
949 #endif
950 /* reset the reference count (whiten the object) */
951 object->count = 0;
952 if (color_gray(object) && get_object(object))
953 list_add_tail(&object->gray_list, &gray_list);
955 spin_unlock_irqrestore(&object->lock, flags);
957 rcu_read_unlock();
959 /* data/bss scanning */
960 scan_block(_sdata, _edata, NULL);
961 scan_block(__bss_start, __bss_stop, NULL);
963 #ifdef CONFIG_SMP
964 /* per-cpu sections scanning */
965 for_each_possible_cpu(i)
966 scan_block(__per_cpu_start + per_cpu_offset(i),
967 __per_cpu_end + per_cpu_offset(i), NULL);
968 #endif
971 * Struct page scanning for each node. The code below is not yet safe
972 * with MEMORY_HOTPLUG.
974 for_each_online_node(i) {
975 pg_data_t *pgdat = NODE_DATA(i);
976 unsigned long start_pfn = pgdat->node_start_pfn;
977 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
978 unsigned long pfn;
980 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
981 struct page *page;
983 if (!pfn_valid(pfn))
984 continue;
985 page = pfn_to_page(pfn);
986 /* only scan if page is in use */
987 if (page_count(page) == 0)
988 continue;
989 scan_block(page, page + 1, NULL);
994 * Scanning the task stacks may introduce false negatives and it is
995 * not enabled by default.
997 if (kmemleak_stack_scan) {
998 read_lock(&tasklist_lock);
999 for_each_process(task)
1000 scan_block(task_stack_page(task),
1001 task_stack_page(task) + THREAD_SIZE, NULL);
1002 read_unlock(&tasklist_lock);
1006 * Scan the objects already referenced from the sections scanned
1007 * above. More objects will be referenced and, if there are no memory
1008 * leaks, all the objects will be scanned. The list traversal is safe
1009 * for both tail additions and removals from inside the loop. The
1010 * kmemleak objects cannot be freed from outside the loop because their
1011 * use_count was increased.
1013 object = list_entry(gray_list.next, typeof(*object), gray_list);
1014 while (&object->gray_list != &gray_list) {
1015 scan_yield();
1017 /* may add new objects to the list */
1018 if (!scan_should_stop())
1019 scan_object(object);
1021 tmp = list_entry(object->gray_list.next, typeof(*object),
1022 gray_list);
1024 /* remove the object from the list and release it */
1025 list_del(&object->gray_list);
1026 put_object(object);
1028 object = tmp;
1030 WARN_ON(!list_empty(&gray_list));
1033 * If scanning was stopped do not report any new unreferenced objects.
1035 if (scan_should_stop())
1036 return;
1039 * Scanning result reporting.
1041 rcu_read_lock();
1042 list_for_each_entry_rcu(object, &object_list, object_list) {
1043 spin_lock_irqsave(&object->lock, flags);
1044 if (unreferenced_object(object) &&
1045 !(object->flags & OBJECT_REPORTED)) {
1046 object->flags |= OBJECT_REPORTED;
1047 new_leaks++;
1049 spin_unlock_irqrestore(&object->lock, flags);
1051 rcu_read_unlock();
1053 if (new_leaks)
1054 pr_info("%d new suspected memory leaks (see "
1055 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1060 * Thread function performing automatic memory scanning. Unreferenced objects
1061 * at the end of a memory scan are reported but only the first time.
1063 static int kmemleak_scan_thread(void *arg)
1065 static int first_run = 1;
1067 pr_info("Automatic memory scanning thread started\n");
1070 * Wait before the first scan to allow the system to fully initialize.
1072 if (first_run) {
1073 first_run = 0;
1074 ssleep(SECS_FIRST_SCAN);
1077 while (!kthread_should_stop()) {
1078 signed long timeout = jiffies_scan_wait;
1080 mutex_lock(&scan_mutex);
1081 kmemleak_scan();
1082 mutex_unlock(&scan_mutex);
1084 /* wait before the next scan */
1085 while (timeout && !kthread_should_stop())
1086 timeout = schedule_timeout_interruptible(timeout);
1089 pr_info("Automatic memory scanning thread ended\n");
1091 return 0;
1095 * Start the automatic memory scanning thread. This function must be called
1096 * with the scan_mutex held.
1098 void start_scan_thread(void)
1100 if (scan_thread)
1101 return;
1102 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1103 if (IS_ERR(scan_thread)) {
1104 pr_warning("Failed to create the scan thread\n");
1105 scan_thread = NULL;
1110 * Stop the automatic memory scanning thread. This function must be called
1111 * with the scan_mutex held.
1113 void stop_scan_thread(void)
1115 if (scan_thread) {
1116 kthread_stop(scan_thread);
1117 scan_thread = NULL;
1122 * Iterate over the object_list and return the first valid object at or after
1123 * the required position with its use_count incremented. The function triggers
1124 * a memory scanning when the pos argument points to the first position.
1126 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1128 struct kmemleak_object *object;
1129 loff_t n = *pos;
1131 if (!n)
1132 reported_leaks = 0;
1133 if (reported_leaks >= REPORTS_NR)
1134 return NULL;
1136 rcu_read_lock();
1137 list_for_each_entry_rcu(object, &object_list, object_list) {
1138 if (n-- > 0)
1139 continue;
1140 if (get_object(object))
1141 goto out;
1143 object = NULL;
1144 out:
1145 rcu_read_unlock();
1146 return object;
1150 * Return the next object in the object_list. The function decrements the
1151 * use_count of the previous object and increases that of the next one.
1153 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1155 struct kmemleak_object *prev_obj = v;
1156 struct kmemleak_object *next_obj = NULL;
1157 struct list_head *n = &prev_obj->object_list;
1159 ++(*pos);
1160 if (reported_leaks >= REPORTS_NR)
1161 goto out;
1163 rcu_read_lock();
1164 list_for_each_continue_rcu(n, &object_list) {
1165 next_obj = list_entry(n, struct kmemleak_object, object_list);
1166 if (get_object(next_obj))
1167 break;
1169 rcu_read_unlock();
1170 out:
1171 put_object(prev_obj);
1172 return next_obj;
1176 * Decrement the use_count of the last object required, if any.
1178 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1180 if (v)
1181 put_object(v);
1185 * Print the information for an unreferenced object to the seq file.
1187 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1189 struct kmemleak_object *object = v;
1190 unsigned long flags;
1192 spin_lock_irqsave(&object->lock, flags);
1193 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
1194 print_unreferenced(seq, object);
1195 reported_leaks++;
1197 spin_unlock_irqrestore(&object->lock, flags);
1198 return 0;
1201 static const struct seq_operations kmemleak_seq_ops = {
1202 .start = kmemleak_seq_start,
1203 .next = kmemleak_seq_next,
1204 .stop = kmemleak_seq_stop,
1205 .show = kmemleak_seq_show,
1208 static int kmemleak_open(struct inode *inode, struct file *file)
1210 int ret = 0;
1212 if (!atomic_read(&kmemleak_enabled))
1213 return -EBUSY;
1215 ret = mutex_lock_interruptible(&scan_mutex);
1216 if (ret < 0)
1217 goto out;
1218 if (file->f_mode & FMODE_READ) {
1219 ret = seq_open(file, &kmemleak_seq_ops);
1220 if (ret < 0)
1221 goto scan_unlock;
1223 return ret;
1225 scan_unlock:
1226 mutex_unlock(&scan_mutex);
1227 out:
1228 return ret;
1231 static int kmemleak_release(struct inode *inode, struct file *file)
1233 int ret = 0;
1235 if (file->f_mode & FMODE_READ)
1236 seq_release(inode, file);
1237 mutex_unlock(&scan_mutex);
1239 return ret;
1243 * File write operation to configure kmemleak at run-time. The following
1244 * commands can be written to the /sys/kernel/debug/kmemleak file:
1245 * off - disable kmemleak (irreversible)
1246 * stack=on - enable the task stacks scanning
1247 * stack=off - disable the tasks stacks scanning
1248 * scan=on - start the automatic memory scanning thread
1249 * scan=off - stop the automatic memory scanning thread
1250 * scan=... - set the automatic memory scanning period in seconds (0 to
1251 * disable it)
1252 * scan - trigger a memory scan
1254 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1255 size_t size, loff_t *ppos)
1257 char buf[64];
1258 int buf_size;
1260 if (!atomic_read(&kmemleak_enabled))
1261 return -EBUSY;
1263 buf_size = min(size, (sizeof(buf) - 1));
1264 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1265 return -EFAULT;
1266 buf[buf_size] = 0;
1268 if (strncmp(buf, "off", 3) == 0)
1269 kmemleak_disable();
1270 else if (strncmp(buf, "stack=on", 8) == 0)
1271 kmemleak_stack_scan = 1;
1272 else if (strncmp(buf, "stack=off", 9) == 0)
1273 kmemleak_stack_scan = 0;
1274 else if (strncmp(buf, "scan=on", 7) == 0)
1275 start_scan_thread();
1276 else if (strncmp(buf, "scan=off", 8) == 0)
1277 stop_scan_thread();
1278 else if (strncmp(buf, "scan=", 5) == 0) {
1279 unsigned long secs;
1280 int err;
1282 err = strict_strtoul(buf + 5, 0, &secs);
1283 if (err < 0)
1284 return err;
1285 stop_scan_thread();
1286 if (secs) {
1287 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1288 start_scan_thread();
1290 } else if (strncmp(buf, "scan", 4) == 0)
1291 kmemleak_scan();
1292 else
1293 return -EINVAL;
1295 /* ignore the rest of the buffer, only one command at a time */
1296 *ppos += size;
1297 return size;
1300 static const struct file_operations kmemleak_fops = {
1301 .owner = THIS_MODULE,
1302 .open = kmemleak_open,
1303 .read = seq_read,
1304 .write = kmemleak_write,
1305 .llseek = seq_lseek,
1306 .release = kmemleak_release,
1310 * Perform the freeing of the kmemleak internal objects after waiting for any
1311 * current memory scan to complete.
1313 static int kmemleak_cleanup_thread(void *arg)
1315 struct kmemleak_object *object;
1317 mutex_lock(&scan_mutex);
1318 stop_scan_thread();
1320 rcu_read_lock();
1321 list_for_each_entry_rcu(object, &object_list, object_list)
1322 delete_object(object->pointer);
1323 rcu_read_unlock();
1324 mutex_unlock(&scan_mutex);
1326 return 0;
1330 * Start the clean-up thread.
1332 static void kmemleak_cleanup(void)
1334 struct task_struct *cleanup_thread;
1336 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1337 "kmemleak-clean");
1338 if (IS_ERR(cleanup_thread))
1339 pr_warning("Failed to create the clean-up thread\n");
1343 * Disable kmemleak. No memory allocation/freeing will be traced once this
1344 * function is called. Disabling kmemleak is an irreversible operation.
1346 static void kmemleak_disable(void)
1348 /* atomically check whether it was already invoked */
1349 if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1350 return;
1352 /* stop any memory operation tracing */
1353 atomic_set(&kmemleak_early_log, 0);
1354 atomic_set(&kmemleak_enabled, 0);
1356 /* check whether it is too early for a kernel thread */
1357 if (atomic_read(&kmemleak_initialized))
1358 kmemleak_cleanup();
1360 pr_info("Kernel memory leak detector disabled\n");
1364 * Allow boot-time kmemleak disabling (enabled by default).
1366 static int kmemleak_boot_config(char *str)
1368 if (!str)
1369 return -EINVAL;
1370 if (strcmp(str, "off") == 0)
1371 kmemleak_disable();
1372 else if (strcmp(str, "on") != 0)
1373 return -EINVAL;
1374 return 0;
1376 early_param("kmemleak", kmemleak_boot_config);
1379 * Kmemleak initialization.
1381 void __init kmemleak_init(void)
1383 int i;
1384 unsigned long flags;
1386 jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
1387 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1388 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1390 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1391 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1392 INIT_PRIO_TREE_ROOT(&object_tree_root);
1394 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1395 local_irq_save(flags);
1396 if (!atomic_read(&kmemleak_error)) {
1397 atomic_set(&kmemleak_enabled, 1);
1398 atomic_set(&kmemleak_early_log, 0);
1400 local_irq_restore(flags);
1403 * This is the point where tracking allocations is safe. Automatic
1404 * scanning is started during the late initcall. Add the early logged
1405 * callbacks to the kmemleak infrastructure.
1407 for (i = 0; i < crt_early_log; i++) {
1408 struct early_log *log = &early_log[i];
1410 switch (log->op_type) {
1411 case KMEMLEAK_ALLOC:
1412 kmemleak_alloc(log->ptr, log->size, log->min_count,
1413 GFP_KERNEL);
1414 break;
1415 case KMEMLEAK_FREE:
1416 kmemleak_free(log->ptr);
1417 break;
1418 case KMEMLEAK_NOT_LEAK:
1419 kmemleak_not_leak(log->ptr);
1420 break;
1421 case KMEMLEAK_IGNORE:
1422 kmemleak_ignore(log->ptr);
1423 break;
1424 case KMEMLEAK_SCAN_AREA:
1425 kmemleak_scan_area(log->ptr, log->offset, log->length,
1426 GFP_KERNEL);
1427 break;
1428 case KMEMLEAK_NO_SCAN:
1429 kmemleak_no_scan(log->ptr);
1430 break;
1431 default:
1432 WARN_ON(1);
1438 * Late initialization function.
1440 static int __init kmemleak_late_init(void)
1442 struct dentry *dentry;
1444 atomic_set(&kmemleak_initialized, 1);
1446 if (atomic_read(&kmemleak_error)) {
1448 * Some error occured and kmemleak was disabled. There is a
1449 * small chance that kmemleak_disable() was called immediately
1450 * after setting kmemleak_initialized and we may end up with
1451 * two clean-up threads but serialized by scan_mutex.
1453 kmemleak_cleanup();
1454 return -ENOMEM;
1457 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1458 &kmemleak_fops);
1459 if (!dentry)
1460 pr_warning("Failed to create the debugfs kmemleak file\n");
1461 mutex_lock(&scan_mutex);
1462 start_scan_thread();
1463 mutex_unlock(&scan_mutex);
1465 pr_info("Kernel memory leak detector initialized\n");
1467 return 0;
1469 late_initcall(kmemleak_late_init);