4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed
52 * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
53 * file together with modifications to the memory scanning parameters
54 * including the scan_thread pointer
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/module.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
75 #include <linux/gfp.h>
77 #include <linux/debugfs.h>
78 #include <linux/seq_file.h>
79 #include <linux/cpumask.h>
80 #include <linux/spinlock.h>
81 #include <linux/mutex.h>
82 #include <linux/rcupdate.h>
83 #include <linux/stacktrace.h>
84 #include <linux/cache.h>
85 #include <linux/percpu.h>
86 #include <linux/hardirq.h>
87 #include <linux/mmzone.h>
88 #include <linux/slab.h>
89 #include <linux/thread_info.h>
90 #include <linux/err.h>
91 #include <linux/uaccess.h>
92 #include <linux/string.h>
93 #include <linux/nodemask.h>
96 #include <asm/sections.h>
97 #include <asm/processor.h>
98 #include <asm/atomic.h>
100 #include <linux/kmemleak.h>
103 * Kmemleak configuration and common defines.
105 #define MAX_TRACE 16 /* stack trace length */
106 #define REPORTS_NR 50 /* maximum number of reported leaks */
107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
108 #define MSECS_SCAN_YIELD 10 /* CPU yielding period */
109 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
110 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
112 #define BYTES_PER_POINTER sizeof(void *)
114 /* GFP bitmask for kmemleak internal allocations */
115 #define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area
{
119 struct hlist_node node
;
120 unsigned long offset
;
125 * Structure holding the metadata for each allocated memory block.
126 * Modifications to such objects should be made while holding the
127 * object->lock. Insertions or deletions from object_list, gray_list or
128 * tree_node are already protected by the corresponding locks or mutex (see
129 * the notes on locking above). These objects are reference-counted
130 * (use_count) and freed using the RCU mechanism.
132 struct kmemleak_object
{
134 unsigned long flags
; /* object status flags */
135 struct list_head object_list
;
136 struct list_head gray_list
;
137 struct prio_tree_node tree_node
;
138 struct rcu_head rcu
; /* object_list lockless traversal */
139 /* object usage count; object freed when use_count == 0 */
141 unsigned long pointer
;
143 /* minimum number of a pointers found before it is considered leak */
145 /* the total number of pointers found pointing to this object */
147 /* memory ranges to be scanned inside an object (empty for all) */
148 struct hlist_head area_list
;
149 unsigned long trace
[MAX_TRACE
];
150 unsigned int trace_len
;
151 unsigned long jiffies
; /* creation timestamp */
152 pid_t pid
; /* pid of the current task */
153 char comm
[TASK_COMM_LEN
]; /* executable name */
156 /* flag representing the memory block allocation status */
157 #define OBJECT_ALLOCATED (1 << 0)
158 /* flag set after the first reporting of an unreference object */
159 #define OBJECT_REPORTED (1 << 1)
160 /* flag set to not scan the object */
161 #define OBJECT_NO_SCAN (1 << 2)
163 /* the list of all allocated objects */
164 static LIST_HEAD(object_list
);
165 /* the list of gray-colored objects (see color_gray comment below) */
166 static LIST_HEAD(gray_list
);
167 /* prio search tree for object boundaries */
168 static struct prio_tree_root object_tree_root
;
169 /* rw_lock protecting the access to object_list and prio_tree_root */
170 static DEFINE_RWLOCK(kmemleak_lock
);
172 /* allocation caches for kmemleak internal data */
173 static struct kmem_cache
*object_cache
;
174 static struct kmem_cache
*scan_area_cache
;
176 /* set if tracing memory operations is enabled */
177 static atomic_t kmemleak_enabled
= ATOMIC_INIT(0);
178 /* set in the late_initcall if there were no errors */
179 static atomic_t kmemleak_initialized
= ATOMIC_INIT(0);
180 /* enables or disables early logging of the memory operations */
181 static atomic_t kmemleak_early_log
= ATOMIC_INIT(1);
182 /* set if a fata kmemleak error has occurred */
183 static atomic_t kmemleak_error
= ATOMIC_INIT(0);
185 /* minimum and maximum address that may be valid pointers */
186 static unsigned long min_addr
= ULONG_MAX
;
187 static unsigned long max_addr
;
189 /* used for yielding the CPU to other tasks during scanning */
190 static unsigned long next_scan_yield
;
191 static struct task_struct
*scan_thread
;
192 static unsigned long jiffies_scan_yield
;
193 static unsigned long jiffies_min_age
;
194 /* delay between automatic memory scannings */
195 static signed long jiffies_scan_wait
;
196 /* enables or disables the task stacks scanning */
197 static int kmemleak_stack_scan
;
198 /* mutex protecting the memory scanning */
199 static DEFINE_MUTEX(scan_mutex
);
200 /* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
201 static DEFINE_MUTEX(kmemleak_mutex
);
203 /* number of leaks reported (for limitation purposes) */
204 static int reported_leaks
;
207 * Early object allocation/freeing logging. Kmemleak is initialized after the
208 * kernel allocator. However, both the kernel allocator and kmemleak may
209 * allocate memory blocks which need to be tracked. Kmemleak defines an
210 * arbitrary buffer to hold the allocation/freeing information before it is
214 /* kmemleak operation type for early logging */
225 * Structure holding the information passed to kmemleak callbacks during the
229 int op_type
; /* kmemleak operation type */
230 const void *ptr
; /* allocated/freed memory block */
231 size_t size
; /* memory block size */
232 int min_count
; /* minimum reference count */
233 unsigned long offset
; /* scan area offset */
234 size_t length
; /* scan area length */
237 /* early logging buffer and current position */
238 static struct early_log early_log
[200];
239 static int crt_early_log
;
241 static void kmemleak_disable(void);
244 * Print a warning and dump the stack trace.
246 #define kmemleak_warn(x...) do { \
252 * Macro invoked when a serious kmemleak condition occured and cannot be
253 * recovered from. Kmemleak will be disabled and further allocation/freeing
254 * tracing no longer available.
256 #define kmemleak_stop(x...) do { \
258 kmemleak_disable(); \
262 * Object colors, encoded with count and min_count:
263 * - white - orphan object, not enough references to it (count < min_count)
264 * - gray - not orphan, not marked as false positive (min_count == 0) or
265 * sufficient references to it (count >= min_count)
266 * - black - ignore, it doesn't contain references (e.g. text section)
267 * (min_count == -1). No function defined for this color.
268 * Newly created objects don't have any color assigned (object->count == -1)
269 * before the next memory scan when they become white.
271 static int color_white(const struct kmemleak_object
*object
)
273 return object
->count
!= -1 && object
->count
< object
->min_count
;
276 static int color_gray(const struct kmemleak_object
*object
)
278 return object
->min_count
!= -1 && object
->count
>= object
->min_count
;
282 * Objects are considered referenced if their color is gray and they have not
285 static int referenced_object(struct kmemleak_object
*object
)
287 return (object
->flags
& OBJECT_ALLOCATED
) && color_gray(object
);
291 * Objects are considered unreferenced only if their color is white, they have
292 * not be deleted and have a minimum age to avoid false positives caused by
293 * pointers temporarily stored in CPU registers.
295 static int unreferenced_object(struct kmemleak_object
*object
)
297 return (object
->flags
& OBJECT_ALLOCATED
) && color_white(object
) &&
298 time_is_before_eq_jiffies(object
->jiffies
+ jiffies_min_age
);
302 * Printing of the (un)referenced objects information, either to the seq file
303 * or to the kernel log. The print_referenced/print_unreferenced functions
304 * must be called with the object->lock held.
306 #define print_helper(seq, x...) do { \
307 struct seq_file *s = (seq); \
314 static void print_referenced(struct kmemleak_object
*object
)
316 pr_info("referenced object 0x%08lx (size %zu)\n",
317 object
->pointer
, object
->size
);
320 static void print_unreferenced(struct seq_file
*seq
,
321 struct kmemleak_object
*object
)
325 print_helper(seq
, "unreferenced object 0x%08lx (size %zu):\n",
326 object
->pointer
, object
->size
);
327 print_helper(seq
, " comm \"%s\", pid %d, jiffies %lu\n",
328 object
->comm
, object
->pid
, object
->jiffies
);
329 print_helper(seq
, " backtrace:\n");
331 for (i
= 0; i
< object
->trace_len
; i
++) {
332 void *ptr
= (void *)object
->trace
[i
];
333 print_helper(seq
, " [<%p>] %pS\n", ptr
, ptr
);
338 * Print the kmemleak_object information. This function is used mainly for
339 * debugging special cases when kmemleak operations. It must be called with
340 * the object->lock held.
342 static void dump_object_info(struct kmemleak_object
*object
)
344 struct stack_trace trace
;
346 trace
.nr_entries
= object
->trace_len
;
347 trace
.entries
= object
->trace
;
349 pr_notice("Object 0x%08lx (size %zu):\n",
350 object
->tree_node
.start
, object
->size
);
351 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
352 object
->comm
, object
->pid
, object
->jiffies
);
353 pr_notice(" min_count = %d\n", object
->min_count
);
354 pr_notice(" count = %d\n", object
->count
);
355 pr_notice(" backtrace:\n");
356 print_stack_trace(&trace
, 4);
360 * Look-up a memory block metadata (kmemleak_object) in the priority search
361 * tree based on a pointer value. If alias is 0, only values pointing to the
362 * beginning of the memory block are allowed. The kmemleak_lock must be held
363 * when calling this function.
365 static struct kmemleak_object
*lookup_object(unsigned long ptr
, int alias
)
367 struct prio_tree_node
*node
;
368 struct prio_tree_iter iter
;
369 struct kmemleak_object
*object
;
371 prio_tree_iter_init(&iter
, &object_tree_root
, ptr
, ptr
);
372 node
= prio_tree_next(&iter
);
374 object
= prio_tree_entry(node
, struct kmemleak_object
,
376 if (!alias
&& object
->pointer
!= ptr
) {
377 kmemleak_warn("Found object by alias");
387 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
388 * that once an object's use_count reached 0, the RCU freeing was already
389 * registered and the object should no longer be used. This function must be
390 * called under the protection of rcu_read_lock().
392 static int get_object(struct kmemleak_object
*object
)
394 return atomic_inc_not_zero(&object
->use_count
);
398 * RCU callback to free a kmemleak_object.
400 static void free_object_rcu(struct rcu_head
*rcu
)
402 struct hlist_node
*elem
, *tmp
;
403 struct kmemleak_scan_area
*area
;
404 struct kmemleak_object
*object
=
405 container_of(rcu
, struct kmemleak_object
, rcu
);
408 * Once use_count is 0 (guaranteed by put_object), there is no other
409 * code accessing this object, hence no need for locking.
411 hlist_for_each_entry_safe(area
, elem
, tmp
, &object
->area_list
, node
) {
413 kmem_cache_free(scan_area_cache
, area
);
415 kmem_cache_free(object_cache
, object
);
419 * Decrement the object use_count. Once the count is 0, free the object using
420 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
421 * delete_object() path, the delayed RCU freeing ensures that there is no
422 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
425 static void put_object(struct kmemleak_object
*object
)
427 if (!atomic_dec_and_test(&object
->use_count
))
430 /* should only get here after delete_object was called */
431 WARN_ON(object
->flags
& OBJECT_ALLOCATED
);
433 call_rcu(&object
->rcu
, free_object_rcu
);
437 * Look up an object in the prio search tree and increase its use_count.
439 static struct kmemleak_object
*find_and_get_object(unsigned long ptr
, int alias
)
442 struct kmemleak_object
*object
= NULL
;
445 read_lock_irqsave(&kmemleak_lock
, flags
);
446 if (ptr
>= min_addr
&& ptr
< max_addr
)
447 object
= lookup_object(ptr
, alias
);
448 read_unlock_irqrestore(&kmemleak_lock
, flags
);
450 /* check whether the object is still available */
451 if (object
&& !get_object(object
))
459 * Create the metadata (struct kmemleak_object) corresponding to an allocated
460 * memory block and add it to the object_list and object_tree_root.
462 static void create_object(unsigned long ptr
, size_t size
, int min_count
,
466 struct kmemleak_object
*object
;
467 struct prio_tree_node
*node
;
468 struct stack_trace trace
;
470 object
= kmem_cache_alloc(object_cache
, gfp
& GFP_KMEMLEAK_MASK
);
472 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
476 INIT_LIST_HEAD(&object
->object_list
);
477 INIT_LIST_HEAD(&object
->gray_list
);
478 INIT_HLIST_HEAD(&object
->area_list
);
479 spin_lock_init(&object
->lock
);
480 atomic_set(&object
->use_count
, 1);
481 object
->flags
= OBJECT_ALLOCATED
;
482 object
->pointer
= ptr
;
484 object
->min_count
= min_count
;
485 object
->count
= -1; /* no color initially */
486 object
->jiffies
= jiffies
;
488 /* task information */
491 strncpy(object
->comm
, "hardirq", sizeof(object
->comm
));
492 } else if (in_softirq()) {
494 strncpy(object
->comm
, "softirq", sizeof(object
->comm
));
496 object
->pid
= current
->pid
;
498 * There is a small chance of a race with set_task_comm(),
499 * however using get_task_comm() here may cause locking
500 * dependency issues with current->alloc_lock. In the worst
501 * case, the command line is not correct.
503 strncpy(object
->comm
, current
->comm
, sizeof(object
->comm
));
506 /* kernel backtrace */
507 trace
.max_entries
= MAX_TRACE
;
508 trace
.nr_entries
= 0;
509 trace
.entries
= object
->trace
;
511 save_stack_trace(&trace
);
512 object
->trace_len
= trace
.nr_entries
;
514 INIT_PRIO_TREE_NODE(&object
->tree_node
);
515 object
->tree_node
.start
= ptr
;
516 object
->tree_node
.last
= ptr
+ size
- 1;
518 write_lock_irqsave(&kmemleak_lock
, flags
);
519 min_addr
= min(min_addr
, ptr
);
520 max_addr
= max(max_addr
, ptr
+ size
);
521 node
= prio_tree_insert(&object_tree_root
, &object
->tree_node
);
523 * The code calling the kernel does not yet have the pointer to the
524 * memory block to be able to free it. However, we still hold the
525 * kmemleak_lock here in case parts of the kernel started freeing
526 * random memory blocks.
528 if (node
!= &object
->tree_node
) {
531 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
532 "(already existing)\n", ptr
);
533 object
= lookup_object(ptr
, 1);
534 spin_lock_irqsave(&object
->lock
, flags
);
535 dump_object_info(object
);
536 spin_unlock_irqrestore(&object
->lock
, flags
);
540 list_add_tail_rcu(&object
->object_list
, &object_list
);
542 write_unlock_irqrestore(&kmemleak_lock
, flags
);
546 * Remove the metadata (struct kmemleak_object) for a memory block from the
547 * object_list and object_tree_root and decrement its use_count.
549 static void delete_object(unsigned long ptr
)
552 struct kmemleak_object
*object
;
554 write_lock_irqsave(&kmemleak_lock
, flags
);
555 object
= lookup_object(ptr
, 0);
557 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
559 write_unlock_irqrestore(&kmemleak_lock
, flags
);
562 prio_tree_remove(&object_tree_root
, &object
->tree_node
);
563 list_del_rcu(&object
->object_list
);
564 write_unlock_irqrestore(&kmemleak_lock
, flags
);
566 WARN_ON(!(object
->flags
& OBJECT_ALLOCATED
));
567 WARN_ON(atomic_read(&object
->use_count
) < 1);
570 * Locking here also ensures that the corresponding memory block
571 * cannot be freed when it is being scanned.
573 spin_lock_irqsave(&object
->lock
, flags
);
574 if (object
->flags
& OBJECT_REPORTED
)
575 print_referenced(object
);
576 object
->flags
&= ~OBJECT_ALLOCATED
;
577 spin_unlock_irqrestore(&object
->lock
, flags
);
582 * Make a object permanently as gray-colored so that it can no longer be
583 * reported as a leak. This is used in general to mark a false positive.
585 static void make_gray_object(unsigned long ptr
)
588 struct kmemleak_object
*object
;
590 object
= find_and_get_object(ptr
, 0);
592 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr
);
596 spin_lock_irqsave(&object
->lock
, flags
);
597 object
->min_count
= 0;
598 spin_unlock_irqrestore(&object
->lock
, flags
);
603 * Mark the object as black-colored so that it is ignored from scans and
606 static void make_black_object(unsigned long ptr
)
609 struct kmemleak_object
*object
;
611 object
= find_and_get_object(ptr
, 0);
613 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr
);
617 spin_lock_irqsave(&object
->lock
, flags
);
618 object
->min_count
= -1;
619 spin_unlock_irqrestore(&object
->lock
, flags
);
624 * Add a scanning area to the object. If at least one such area is added,
625 * kmemleak will only scan these ranges rather than the whole memory block.
627 static void add_scan_area(unsigned long ptr
, unsigned long offset
,
628 size_t length
, gfp_t gfp
)
631 struct kmemleak_object
*object
;
632 struct kmemleak_scan_area
*area
;
634 object
= find_and_get_object(ptr
, 0);
636 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
641 area
= kmem_cache_alloc(scan_area_cache
, gfp
& GFP_KMEMLEAK_MASK
);
643 kmemleak_warn("Cannot allocate a scan area\n");
647 spin_lock_irqsave(&object
->lock
, flags
);
648 if (offset
+ length
> object
->size
) {
649 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr
);
650 dump_object_info(object
);
651 kmem_cache_free(scan_area_cache
, area
);
655 INIT_HLIST_NODE(&area
->node
);
656 area
->offset
= offset
;
657 area
->length
= length
;
659 hlist_add_head(&area
->node
, &object
->area_list
);
661 spin_unlock_irqrestore(&object
->lock
, flags
);
667 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
668 * pointer. Such object will not be scanned by kmemleak but references to it
671 static void object_no_scan(unsigned long ptr
)
674 struct kmemleak_object
*object
;
676 object
= find_and_get_object(ptr
, 0);
678 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr
);
682 spin_lock_irqsave(&object
->lock
, flags
);
683 object
->flags
|= OBJECT_NO_SCAN
;
684 spin_unlock_irqrestore(&object
->lock
, flags
);
689 * Log an early kmemleak_* call to the early_log buffer. These calls will be
690 * processed later once kmemleak is fully initialized.
692 static void log_early(int op_type
, const void *ptr
, size_t size
,
693 int min_count
, unsigned long offset
, size_t length
)
696 struct early_log
*log
;
698 if (crt_early_log
>= ARRAY_SIZE(early_log
)) {
699 kmemleak_stop("Early log buffer exceeded\n");
704 * There is no need for locking since the kernel is still in UP mode
705 * at this stage. Disabling the IRQs is enough.
707 local_irq_save(flags
);
708 log
= &early_log
[crt_early_log
];
709 log
->op_type
= op_type
;
712 log
->min_count
= min_count
;
713 log
->offset
= offset
;
714 log
->length
= length
;
716 local_irq_restore(flags
);
720 * Memory allocation function callback. This function is called from the
721 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
724 void kmemleak_alloc(const void *ptr
, size_t size
, int min_count
, gfp_t gfp
)
726 pr_debug("%s(0x%p, %zu, %d)\n", __func__
, ptr
, size
, min_count
);
728 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
729 create_object((unsigned long)ptr
, size
, min_count
, gfp
);
730 else if (atomic_read(&kmemleak_early_log
))
731 log_early(KMEMLEAK_ALLOC
, ptr
, size
, min_count
, 0, 0);
733 EXPORT_SYMBOL_GPL(kmemleak_alloc
);
736 * Memory freeing function callback. This function is called from the kernel
737 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
739 void kmemleak_free(const void *ptr
)
741 pr_debug("%s(0x%p)\n", __func__
, ptr
);
743 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
744 delete_object((unsigned long)ptr
);
745 else if (atomic_read(&kmemleak_early_log
))
746 log_early(KMEMLEAK_FREE
, ptr
, 0, 0, 0, 0);
748 EXPORT_SYMBOL_GPL(kmemleak_free
);
751 * Mark an already allocated memory block as a false positive. This will cause
752 * the block to no longer be reported as leak and always be scanned.
754 void kmemleak_not_leak(const void *ptr
)
756 pr_debug("%s(0x%p)\n", __func__
, ptr
);
758 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
759 make_gray_object((unsigned long)ptr
);
760 else if (atomic_read(&kmemleak_early_log
))
761 log_early(KMEMLEAK_NOT_LEAK
, ptr
, 0, 0, 0, 0);
763 EXPORT_SYMBOL(kmemleak_not_leak
);
766 * Ignore a memory block. This is usually done when it is known that the
767 * corresponding block is not a leak and does not contain any references to
768 * other allocated memory blocks.
770 void kmemleak_ignore(const void *ptr
)
772 pr_debug("%s(0x%p)\n", __func__
, ptr
);
774 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
775 make_black_object((unsigned long)ptr
);
776 else if (atomic_read(&kmemleak_early_log
))
777 log_early(KMEMLEAK_IGNORE
, ptr
, 0, 0, 0, 0);
779 EXPORT_SYMBOL(kmemleak_ignore
);
782 * Limit the range to be scanned in an allocated memory block.
784 void kmemleak_scan_area(const void *ptr
, unsigned long offset
, size_t length
,
787 pr_debug("%s(0x%p)\n", __func__
, ptr
);
789 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
790 add_scan_area((unsigned long)ptr
, offset
, length
, gfp
);
791 else if (atomic_read(&kmemleak_early_log
))
792 log_early(KMEMLEAK_SCAN_AREA
, ptr
, 0, 0, offset
, length
);
794 EXPORT_SYMBOL(kmemleak_scan_area
);
797 * Inform kmemleak not to scan the given memory block.
799 void kmemleak_no_scan(const void *ptr
)
801 pr_debug("%s(0x%p)\n", __func__
, ptr
);
803 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
804 object_no_scan((unsigned long)ptr
);
805 else if (atomic_read(&kmemleak_early_log
))
806 log_early(KMEMLEAK_NO_SCAN
, ptr
, 0, 0, 0, 0);
808 EXPORT_SYMBOL(kmemleak_no_scan
);
811 * Yield the CPU so that other tasks get a chance to run. The yielding is
812 * rate-limited to avoid excessive number of calls to the schedule() function
813 * during memory scanning.
815 static void scan_yield(void)
819 if (time_is_before_eq_jiffies(next_scan_yield
)) {
821 next_scan_yield
= jiffies
+ jiffies_scan_yield
;
826 * Memory scanning is a long process and it needs to be interruptable. This
827 * function checks whether such interrupt condition occured.
829 static int scan_should_stop(void)
831 if (!atomic_read(&kmemleak_enabled
))
835 * This function may be called from either process or kthread context,
836 * hence the need to check for both stop conditions.
839 return signal_pending(current
);
841 return kthread_should_stop();
847 * Scan a memory block (exclusive range) for valid pointers and add those
848 * found to the gray list.
850 static void scan_block(void *_start
, void *_end
,
851 struct kmemleak_object
*scanned
)
854 unsigned long *start
= PTR_ALIGN(_start
, BYTES_PER_POINTER
);
855 unsigned long *end
= _end
- (BYTES_PER_POINTER
- 1);
857 for (ptr
= start
; ptr
< end
; ptr
++) {
859 unsigned long pointer
= *ptr
;
860 struct kmemleak_object
*object
;
862 if (scan_should_stop())
866 * When scanning a memory block with a corresponding
867 * kmemleak_object, the CPU yielding is handled in the calling
868 * code since it holds the object->lock to avoid the block
874 object
= find_and_get_object(pointer
, 1);
877 if (object
== scanned
) {
878 /* self referenced, ignore */
884 * Avoid the lockdep recursive warning on object->lock being
885 * previously acquired in scan_object(). These locks are
886 * enclosed by scan_mutex.
888 spin_lock_irqsave_nested(&object
->lock
, flags
,
889 SINGLE_DEPTH_NESTING
);
890 if (!color_white(object
)) {
891 /* non-orphan, ignored or new */
892 spin_unlock_irqrestore(&object
->lock
, flags
);
898 * Increase the object's reference count (number of pointers
899 * to the memory block). If this count reaches the required
900 * minimum, the object's color will become gray and it will be
901 * added to the gray_list.
904 if (color_gray(object
))
905 list_add_tail(&object
->gray_list
, &gray_list
);
908 spin_unlock_irqrestore(&object
->lock
, flags
);
913 * Scan a memory block corresponding to a kmemleak_object. A condition is
914 * that object->use_count >= 1.
916 static void scan_object(struct kmemleak_object
*object
)
918 struct kmemleak_scan_area
*area
;
919 struct hlist_node
*elem
;
923 * Once the object->lock is aquired, the corresponding memory block
924 * cannot be freed (the same lock is aquired in delete_object).
926 spin_lock_irqsave(&object
->lock
, flags
);
927 if (object
->flags
& OBJECT_NO_SCAN
)
929 if (!(object
->flags
& OBJECT_ALLOCATED
))
930 /* already freed object */
932 if (hlist_empty(&object
->area_list
))
933 scan_block((void *)object
->pointer
,
934 (void *)(object
->pointer
+ object
->size
), object
);
936 hlist_for_each_entry(area
, elem
, &object
->area_list
, node
)
937 scan_block((void *)(object
->pointer
+ area
->offset
),
938 (void *)(object
->pointer
+ area
->offset
939 + area
->length
), object
);
941 spin_unlock_irqrestore(&object
->lock
, flags
);
945 * Scan data sections and all the referenced memory blocks allocated via the
946 * kernel's standard allocators. This function must be called with the
949 static void kmemleak_scan(void)
952 struct kmemleak_object
*object
, *tmp
;
953 struct task_struct
*task
;
956 /* prepare the kmemleak_object's */
958 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
959 spin_lock_irqsave(&object
->lock
, flags
);
962 * With a few exceptions there should be a maximum of
963 * 1 reference to any object at this point.
965 if (atomic_read(&object
->use_count
) > 1) {
966 pr_debug("object->use_count = %d\n",
967 atomic_read(&object
->use_count
));
968 dump_object_info(object
);
971 /* reset the reference count (whiten the object) */
973 if (color_gray(object
) && get_object(object
))
974 list_add_tail(&object
->gray_list
, &gray_list
);
976 spin_unlock_irqrestore(&object
->lock
, flags
);
980 /* data/bss scanning */
981 scan_block(_sdata
, _edata
, NULL
);
982 scan_block(__bss_start
, __bss_stop
, NULL
);
985 /* per-cpu sections scanning */
986 for_each_possible_cpu(i
)
987 scan_block(__per_cpu_start
+ per_cpu_offset(i
),
988 __per_cpu_end
+ per_cpu_offset(i
), NULL
);
992 * Struct page scanning for each node. The code below is not yet safe
993 * with MEMORY_HOTPLUG.
995 for_each_online_node(i
) {
996 pg_data_t
*pgdat
= NODE_DATA(i
);
997 unsigned long start_pfn
= pgdat
->node_start_pfn
;
998 unsigned long end_pfn
= start_pfn
+ pgdat
->node_spanned_pages
;
1001 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1004 if (!pfn_valid(pfn
))
1006 page
= pfn_to_page(pfn
);
1007 /* only scan if page is in use */
1008 if (page_count(page
) == 0)
1010 scan_block(page
, page
+ 1, NULL
);
1015 * Scanning the task stacks may introduce false negatives and it is
1016 * not enabled by default.
1018 if (kmemleak_stack_scan
) {
1019 read_lock(&tasklist_lock
);
1020 for_each_process(task
)
1021 scan_block(task_stack_page(task
),
1022 task_stack_page(task
) + THREAD_SIZE
, NULL
);
1023 read_unlock(&tasklist_lock
);
1027 * Scan the objects already referenced from the sections scanned
1028 * above. More objects will be referenced and, if there are no memory
1029 * leaks, all the objects will be scanned. The list traversal is safe
1030 * for both tail additions and removals from inside the loop. The
1031 * kmemleak objects cannot be freed from outside the loop because their
1032 * use_count was increased.
1034 object
= list_entry(gray_list
.next
, typeof(*object
), gray_list
);
1035 while (&object
->gray_list
!= &gray_list
) {
1038 /* may add new objects to the list */
1039 if (!scan_should_stop())
1040 scan_object(object
);
1042 tmp
= list_entry(object
->gray_list
.next
, typeof(*object
),
1045 /* remove the object from the list and release it */
1046 list_del(&object
->gray_list
);
1051 WARN_ON(!list_empty(&gray_list
));
1055 * Thread function performing automatic memory scanning. Unreferenced objects
1056 * at the end of a memory scan are reported but only the first time.
1058 static int kmemleak_scan_thread(void *arg
)
1060 static int first_run
= 1;
1062 pr_info("Automatic memory scanning thread started\n");
1065 * Wait before the first scan to allow the system to fully initialize.
1069 ssleep(SECS_FIRST_SCAN
);
1072 while (!kthread_should_stop()) {
1073 struct kmemleak_object
*object
;
1074 signed long timeout
= jiffies_scan_wait
;
1076 mutex_lock(&scan_mutex
);
1082 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1083 unsigned long flags
;
1085 if (reported_leaks
>= REPORTS_NR
)
1087 spin_lock_irqsave(&object
->lock
, flags
);
1088 if (!(object
->flags
& OBJECT_REPORTED
) &&
1089 unreferenced_object(object
)) {
1090 print_unreferenced(NULL
, object
);
1091 object
->flags
|= OBJECT_REPORTED
;
1093 } else if ((object
->flags
& OBJECT_REPORTED
) &&
1094 referenced_object(object
)) {
1095 print_referenced(object
);
1096 object
->flags
&= ~OBJECT_REPORTED
;
1098 spin_unlock_irqrestore(&object
->lock
, flags
);
1102 mutex_unlock(&scan_mutex
);
1103 /* wait before the next scan */
1104 while (timeout
&& !kthread_should_stop())
1105 timeout
= schedule_timeout_interruptible(timeout
);
1108 pr_info("Automatic memory scanning thread ended\n");
1114 * Start the automatic memory scanning thread. This function must be called
1115 * with the kmemleak_mutex held.
1117 void start_scan_thread(void)
1121 scan_thread
= kthread_run(kmemleak_scan_thread
, NULL
, "kmemleak");
1122 if (IS_ERR(scan_thread
)) {
1123 pr_warning("Failed to create the scan thread\n");
1129 * Stop the automatic memory scanning thread. This function must be called
1130 * with the kmemleak_mutex held.
1132 void stop_scan_thread(void)
1135 kthread_stop(scan_thread
);
1141 * Iterate over the object_list and return the first valid object at or after
1142 * the required position with its use_count incremented. The function triggers
1143 * a memory scanning when the pos argument points to the first position.
1145 static void *kmemleak_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1147 struct kmemleak_object
*object
;
1154 if (reported_leaks
>= REPORTS_NR
)
1158 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1161 if (get_object(object
))
1171 * Return the next object in the object_list. The function decrements the
1172 * use_count of the previous object and increases that of the next one.
1174 static void *kmemleak_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1176 struct kmemleak_object
*prev_obj
= v
;
1177 struct kmemleak_object
*next_obj
= NULL
;
1178 struct list_head
*n
= &prev_obj
->object_list
;
1181 if (reported_leaks
>= REPORTS_NR
)
1185 list_for_each_continue_rcu(n
, &object_list
) {
1186 next_obj
= list_entry(n
, struct kmemleak_object
, object_list
);
1187 if (get_object(next_obj
))
1192 put_object(prev_obj
);
1197 * Decrement the use_count of the last object required, if any.
1199 static void kmemleak_seq_stop(struct seq_file
*seq
, void *v
)
1206 * Print the information for an unreferenced object to the seq file.
1208 static int kmemleak_seq_show(struct seq_file
*seq
, void *v
)
1210 struct kmemleak_object
*object
= v
;
1211 unsigned long flags
;
1213 spin_lock_irqsave(&object
->lock
, flags
);
1214 if (!unreferenced_object(object
))
1216 print_unreferenced(seq
, object
);
1219 spin_unlock_irqrestore(&object
->lock
, flags
);
1223 static const struct seq_operations kmemleak_seq_ops
= {
1224 .start
= kmemleak_seq_start
,
1225 .next
= kmemleak_seq_next
,
1226 .stop
= kmemleak_seq_stop
,
1227 .show
= kmemleak_seq_show
,
1230 static int kmemleak_open(struct inode
*inode
, struct file
*file
)
1234 if (!atomic_read(&kmemleak_enabled
))
1237 ret
= mutex_lock_interruptible(&kmemleak_mutex
);
1240 if (file
->f_mode
& FMODE_READ
) {
1241 ret
= mutex_lock_interruptible(&scan_mutex
);
1243 goto kmemleak_unlock
;
1244 ret
= seq_open(file
, &kmemleak_seq_ops
);
1251 mutex_unlock(&scan_mutex
);
1253 mutex_unlock(&kmemleak_mutex
);
1258 static int kmemleak_release(struct inode
*inode
, struct file
*file
)
1262 if (file
->f_mode
& FMODE_READ
) {
1263 seq_release(inode
, file
);
1264 mutex_unlock(&scan_mutex
);
1266 mutex_unlock(&kmemleak_mutex
);
1272 * File write operation to configure kmemleak at run-time. The following
1273 * commands can be written to the /sys/kernel/debug/kmemleak file:
1274 * off - disable kmemleak (irreversible)
1275 * stack=on - enable the task stacks scanning
1276 * stack=off - disable the tasks stacks scanning
1277 * scan=on - start the automatic memory scanning thread
1278 * scan=off - stop the automatic memory scanning thread
1279 * scan=... - set the automatic memory scanning period in seconds (0 to
1282 static ssize_t
kmemleak_write(struct file
*file
, const char __user
*user_buf
,
1283 size_t size
, loff_t
*ppos
)
1288 if (!atomic_read(&kmemleak_enabled
))
1291 buf_size
= min(size
, (sizeof(buf
) - 1));
1292 if (strncpy_from_user(buf
, user_buf
, buf_size
) < 0)
1296 if (strncmp(buf
, "off", 3) == 0)
1298 else if (strncmp(buf
, "stack=on", 8) == 0)
1299 kmemleak_stack_scan
= 1;
1300 else if (strncmp(buf
, "stack=off", 9) == 0)
1301 kmemleak_stack_scan
= 0;
1302 else if (strncmp(buf
, "scan=on", 7) == 0)
1303 start_scan_thread();
1304 else if (strncmp(buf
, "scan=off", 8) == 0)
1306 else if (strncmp(buf
, "scan=", 5) == 0) {
1310 err
= strict_strtoul(buf
+ 5, 0, &secs
);
1315 jiffies_scan_wait
= msecs_to_jiffies(secs
* 1000);
1316 start_scan_thread();
1321 /* ignore the rest of the buffer, only one command at a time */
1326 static const struct file_operations kmemleak_fops
= {
1327 .owner
= THIS_MODULE
,
1328 .open
= kmemleak_open
,
1330 .write
= kmemleak_write
,
1331 .llseek
= seq_lseek
,
1332 .release
= kmemleak_release
,
1336 * Perform the freeing of the kmemleak internal objects after waiting for any
1337 * current memory scan to complete.
1339 static int kmemleak_cleanup_thread(void *arg
)
1341 struct kmemleak_object
*object
;
1343 mutex_lock(&kmemleak_mutex
);
1345 mutex_unlock(&kmemleak_mutex
);
1347 mutex_lock(&scan_mutex
);
1349 list_for_each_entry_rcu(object
, &object_list
, object_list
)
1350 delete_object(object
->pointer
);
1352 mutex_unlock(&scan_mutex
);
1358 * Start the clean-up thread.
1360 static void kmemleak_cleanup(void)
1362 struct task_struct
*cleanup_thread
;
1364 cleanup_thread
= kthread_run(kmemleak_cleanup_thread
, NULL
,
1366 if (IS_ERR(cleanup_thread
))
1367 pr_warning("Failed to create the clean-up thread\n");
1371 * Disable kmemleak. No memory allocation/freeing will be traced once this
1372 * function is called. Disabling kmemleak is an irreversible operation.
1374 static void kmemleak_disable(void)
1376 /* atomically check whether it was already invoked */
1377 if (atomic_cmpxchg(&kmemleak_error
, 0, 1))
1380 /* stop any memory operation tracing */
1381 atomic_set(&kmemleak_early_log
, 0);
1382 atomic_set(&kmemleak_enabled
, 0);
1384 /* check whether it is too early for a kernel thread */
1385 if (atomic_read(&kmemleak_initialized
))
1388 pr_info("Kernel memory leak detector disabled\n");
1392 * Allow boot-time kmemleak disabling (enabled by default).
1394 static int kmemleak_boot_config(char *str
)
1398 if (strcmp(str
, "off") == 0)
1400 else if (strcmp(str
, "on") != 0)
1404 early_param("kmemleak", kmemleak_boot_config
);
1407 * Kmemleak initialization.
1409 void __init
kmemleak_init(void)
1412 unsigned long flags
;
1414 jiffies_scan_yield
= msecs_to_jiffies(MSECS_SCAN_YIELD
);
1415 jiffies_min_age
= msecs_to_jiffies(MSECS_MIN_AGE
);
1416 jiffies_scan_wait
= msecs_to_jiffies(SECS_SCAN_WAIT
* 1000);
1418 object_cache
= KMEM_CACHE(kmemleak_object
, SLAB_NOLEAKTRACE
);
1419 scan_area_cache
= KMEM_CACHE(kmemleak_scan_area
, SLAB_NOLEAKTRACE
);
1420 INIT_PRIO_TREE_ROOT(&object_tree_root
);
1422 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1423 local_irq_save(flags
);
1424 if (!atomic_read(&kmemleak_error
)) {
1425 atomic_set(&kmemleak_enabled
, 1);
1426 atomic_set(&kmemleak_early_log
, 0);
1428 local_irq_restore(flags
);
1431 * This is the point where tracking allocations is safe. Automatic
1432 * scanning is started during the late initcall. Add the early logged
1433 * callbacks to the kmemleak infrastructure.
1435 for (i
= 0; i
< crt_early_log
; i
++) {
1436 struct early_log
*log
= &early_log
[i
];
1438 switch (log
->op_type
) {
1439 case KMEMLEAK_ALLOC
:
1440 kmemleak_alloc(log
->ptr
, log
->size
, log
->min_count
,
1444 kmemleak_free(log
->ptr
);
1446 case KMEMLEAK_NOT_LEAK
:
1447 kmemleak_not_leak(log
->ptr
);
1449 case KMEMLEAK_IGNORE
:
1450 kmemleak_ignore(log
->ptr
);
1452 case KMEMLEAK_SCAN_AREA
:
1453 kmemleak_scan_area(log
->ptr
, log
->offset
, log
->length
,
1456 case KMEMLEAK_NO_SCAN
:
1457 kmemleak_no_scan(log
->ptr
);
1466 * Late initialization function.
1468 static int __init
kmemleak_late_init(void)
1470 struct dentry
*dentry
;
1472 atomic_set(&kmemleak_initialized
, 1);
1474 if (atomic_read(&kmemleak_error
)) {
1476 * Some error occured and kmemleak was disabled. There is a
1477 * small chance that kmemleak_disable() was called immediately
1478 * after setting kmemleak_initialized and we may end up with
1479 * two clean-up threads but serialized by scan_mutex.
1485 dentry
= debugfs_create_file("kmemleak", S_IRUGO
, NULL
, NULL
,
1488 pr_warning("Failed to create the debugfs kmemleak file\n");
1489 mutex_lock(&kmemleak_mutex
);
1490 start_scan_thread();
1491 mutex_unlock(&kmemleak_mutex
);
1493 pr_info("Kernel memory leak detector initialized\n");
1497 late_initcall(kmemleak_late_init
);