4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/module.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
76 #include <linux/debugfs.h>
77 #include <linux/seq_file.h>
78 #include <linux/cpumask.h>
79 #include <linux/spinlock.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/cache.h>
84 #include <linux/percpu.h>
85 #include <linux/hardirq.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <asm/atomic.h>
101 #include <linux/kmemcheck.h>
102 #include <linux/kmemleak.h>
105 * Kmemleak configuration and common defines.
107 #define MAX_TRACE 16 /* stack trace length */
108 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
110 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
113 #define BYTES_PER_POINTER sizeof(void *)
115 /* GFP bitmask for kmemleak internal allocations */
116 #define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
118 /* scanning area inside a memory block */
119 struct kmemleak_scan_area
{
120 struct hlist_node node
;
125 #define KMEMLEAK_GREY 0
126 #define KMEMLEAK_BLACK -1
129 * Structure holding the metadata for each allocated memory block.
130 * Modifications to such objects should be made while holding the
131 * object->lock. Insertions or deletions from object_list, gray_list or
132 * tree_node are already protected by the corresponding locks or mutex (see
133 * the notes on locking above). These objects are reference-counted
134 * (use_count) and freed using the RCU mechanism.
136 struct kmemleak_object
{
138 unsigned long flags
; /* object status flags */
139 struct list_head object_list
;
140 struct list_head gray_list
;
141 struct prio_tree_node tree_node
;
142 struct rcu_head rcu
; /* object_list lockless traversal */
143 /* object usage count; object freed when use_count == 0 */
145 unsigned long pointer
;
147 /* minimum number of a pointers found before it is considered leak */
149 /* the total number of pointers found pointing to this object */
151 /* checksum for detecting modified objects */
153 /* memory ranges to be scanned inside an object (empty for all) */
154 struct hlist_head area_list
;
155 unsigned long trace
[MAX_TRACE
];
156 unsigned int trace_len
;
157 unsigned long jiffies
; /* creation timestamp */
158 pid_t pid
; /* pid of the current task */
159 char comm
[TASK_COMM_LEN
]; /* executable name */
162 /* flag representing the memory block allocation status */
163 #define OBJECT_ALLOCATED (1 << 0)
164 /* flag set after the first reporting of an unreference object */
165 #define OBJECT_REPORTED (1 << 1)
166 /* flag set to not scan the object */
167 #define OBJECT_NO_SCAN (1 << 2)
169 /* number of bytes to print per line; must be 16 or 32 */
170 #define HEX_ROW_SIZE 16
171 /* number of bytes to print at a time (1, 2, 4, 8) */
172 #define HEX_GROUP_SIZE 1
173 /* include ASCII after the hex output */
175 /* max number of lines to be printed */
176 #define HEX_MAX_LINES 2
178 /* the list of all allocated objects */
179 static LIST_HEAD(object_list
);
180 /* the list of gray-colored objects (see color_gray comment below) */
181 static LIST_HEAD(gray_list
);
182 /* prio search tree for object boundaries */
183 static struct prio_tree_root object_tree_root
;
184 /* rw_lock protecting the access to object_list and prio_tree_root */
185 static DEFINE_RWLOCK(kmemleak_lock
);
187 /* allocation caches for kmemleak internal data */
188 static struct kmem_cache
*object_cache
;
189 static struct kmem_cache
*scan_area_cache
;
191 /* set if tracing memory operations is enabled */
192 static atomic_t kmemleak_enabled
= ATOMIC_INIT(0);
193 /* set in the late_initcall if there were no errors */
194 static atomic_t kmemleak_initialized
= ATOMIC_INIT(0);
195 /* enables or disables early logging of the memory operations */
196 static atomic_t kmemleak_early_log
= ATOMIC_INIT(1);
197 /* set if a fata kmemleak error has occurred */
198 static atomic_t kmemleak_error
= ATOMIC_INIT(0);
200 /* minimum and maximum address that may be valid pointers */
201 static unsigned long min_addr
= ULONG_MAX
;
202 static unsigned long max_addr
;
204 static struct task_struct
*scan_thread
;
205 /* used to avoid reporting of recently allocated objects */
206 static unsigned long jiffies_min_age
;
207 static unsigned long jiffies_last_scan
;
208 /* delay between automatic memory scannings */
209 static signed long jiffies_scan_wait
;
210 /* enables or disables the task stacks scanning */
211 static int kmemleak_stack_scan
= 1;
212 /* protects the memory scanning, parameters and debug/kmemleak file access */
213 static DEFINE_MUTEX(scan_mutex
);
216 * Early object allocation/freeing logging. Kmemleak is initialized after the
217 * kernel allocator. However, both the kernel allocator and kmemleak may
218 * allocate memory blocks which need to be tracked. Kmemleak defines an
219 * arbitrary buffer to hold the allocation/freeing information before it is
223 /* kmemleak operation type for early logging */
235 * Structure holding the information passed to kmemleak callbacks during the
239 int op_type
; /* kmemleak operation type */
240 const void *ptr
; /* allocated/freed memory block */
241 size_t size
; /* memory block size */
242 int min_count
; /* minimum reference count */
243 unsigned long trace
[MAX_TRACE
]; /* stack trace */
244 unsigned int trace_len
; /* stack trace length */
247 /* early logging buffer and current position */
248 static struct early_log
249 early_log
[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE
] __initdata
;
250 static int crt_early_log __initdata
;
252 static void kmemleak_disable(void);
255 * Print a warning and dump the stack trace.
257 #define kmemleak_warn(x...) do { \
263 * Macro invoked when a serious kmemleak condition occured and cannot be
264 * recovered from. Kmemleak will be disabled and further allocation/freeing
265 * tracing no longer available.
267 #define kmemleak_stop(x...) do { \
269 kmemleak_disable(); \
273 * Printing of the objects hex dump to the seq file. The number of lines to be
274 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
275 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
276 * with the object->lock held.
278 static void hex_dump_object(struct seq_file
*seq
,
279 struct kmemleak_object
*object
)
281 const u8
*ptr
= (const u8
*)object
->pointer
;
282 int i
, len
, remaining
;
283 unsigned char linebuf
[HEX_ROW_SIZE
* 5];
285 /* limit the number of lines to HEX_MAX_LINES */
287 min(object
->size
, (size_t)(HEX_MAX_LINES
* HEX_ROW_SIZE
));
289 seq_printf(seq
, " hex dump (first %d bytes):\n", len
);
290 for (i
= 0; i
< len
; i
+= HEX_ROW_SIZE
) {
291 int linelen
= min(remaining
, HEX_ROW_SIZE
);
293 remaining
-= HEX_ROW_SIZE
;
294 hex_dump_to_buffer(ptr
+ i
, linelen
, HEX_ROW_SIZE
,
295 HEX_GROUP_SIZE
, linebuf
, sizeof(linebuf
),
297 seq_printf(seq
, " %s\n", linebuf
);
302 * Object colors, encoded with count and min_count:
303 * - white - orphan object, not enough references to it (count < min_count)
304 * - gray - not orphan, not marked as false positive (min_count == 0) or
305 * sufficient references to it (count >= min_count)
306 * - black - ignore, it doesn't contain references (e.g. text section)
307 * (min_count == -1). No function defined for this color.
308 * Newly created objects don't have any color assigned (object->count == -1)
309 * before the next memory scan when they become white.
311 static bool color_white(const struct kmemleak_object
*object
)
313 return object
->count
!= KMEMLEAK_BLACK
&&
314 object
->count
< object
->min_count
;
317 static bool color_gray(const struct kmemleak_object
*object
)
319 return object
->min_count
!= KMEMLEAK_BLACK
&&
320 object
->count
>= object
->min_count
;
324 * Objects are considered unreferenced only if their color is white, they have
325 * not be deleted and have a minimum age to avoid false positives caused by
326 * pointers temporarily stored in CPU registers.
328 static bool unreferenced_object(struct kmemleak_object
*object
)
330 return (color_white(object
) && object
->flags
& OBJECT_ALLOCATED
) &&
331 time_before_eq(object
->jiffies
+ jiffies_min_age
,
336 * Printing of the unreferenced objects information to the seq file. The
337 * print_unreferenced function must be called with the object->lock held.
339 static void print_unreferenced(struct seq_file
*seq
,
340 struct kmemleak_object
*object
)
343 unsigned int msecs_age
= jiffies_to_msecs(jiffies
- object
->jiffies
);
345 seq_printf(seq
, "unreferenced object 0x%08lx (size %zu):\n",
346 object
->pointer
, object
->size
);
347 seq_printf(seq
, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
348 object
->comm
, object
->pid
, object
->jiffies
,
349 msecs_age
/ 1000, msecs_age
% 1000);
350 hex_dump_object(seq
, object
);
351 seq_printf(seq
, " backtrace:\n");
353 for (i
= 0; i
< object
->trace_len
; i
++) {
354 void *ptr
= (void *)object
->trace
[i
];
355 seq_printf(seq
, " [<%p>] %pS\n", ptr
, ptr
);
360 * Print the kmemleak_object information. This function is used mainly for
361 * debugging special cases when kmemleak operations. It must be called with
362 * the object->lock held.
364 static void dump_object_info(struct kmemleak_object
*object
)
366 struct stack_trace trace
;
368 trace
.nr_entries
= object
->trace_len
;
369 trace
.entries
= object
->trace
;
371 pr_notice("Object 0x%08lx (size %zu):\n",
372 object
->tree_node
.start
, object
->size
);
373 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
374 object
->comm
, object
->pid
, object
->jiffies
);
375 pr_notice(" min_count = %d\n", object
->min_count
);
376 pr_notice(" count = %d\n", object
->count
);
377 pr_notice(" flags = 0x%lx\n", object
->flags
);
378 pr_notice(" checksum = %d\n", object
->checksum
);
379 pr_notice(" backtrace:\n");
380 print_stack_trace(&trace
, 4);
384 * Look-up a memory block metadata (kmemleak_object) in the priority search
385 * tree based on a pointer value. If alias is 0, only values pointing to the
386 * beginning of the memory block are allowed. The kmemleak_lock must be held
387 * when calling this function.
389 static struct kmemleak_object
*lookup_object(unsigned long ptr
, int alias
)
391 struct prio_tree_node
*node
;
392 struct prio_tree_iter iter
;
393 struct kmemleak_object
*object
;
395 prio_tree_iter_init(&iter
, &object_tree_root
, ptr
, ptr
);
396 node
= prio_tree_next(&iter
);
398 object
= prio_tree_entry(node
, struct kmemleak_object
,
400 if (!alias
&& object
->pointer
!= ptr
) {
401 kmemleak_warn("Found object by alias");
411 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
412 * that once an object's use_count reached 0, the RCU freeing was already
413 * registered and the object should no longer be used. This function must be
414 * called under the protection of rcu_read_lock().
416 static int get_object(struct kmemleak_object
*object
)
418 return atomic_inc_not_zero(&object
->use_count
);
422 * RCU callback to free a kmemleak_object.
424 static void free_object_rcu(struct rcu_head
*rcu
)
426 struct hlist_node
*elem
, *tmp
;
427 struct kmemleak_scan_area
*area
;
428 struct kmemleak_object
*object
=
429 container_of(rcu
, struct kmemleak_object
, rcu
);
432 * Once use_count is 0 (guaranteed by put_object), there is no other
433 * code accessing this object, hence no need for locking.
435 hlist_for_each_entry_safe(area
, elem
, tmp
, &object
->area_list
, node
) {
437 kmem_cache_free(scan_area_cache
, area
);
439 kmem_cache_free(object_cache
, object
);
443 * Decrement the object use_count. Once the count is 0, free the object using
444 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
445 * delete_object() path, the delayed RCU freeing ensures that there is no
446 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
449 static void put_object(struct kmemleak_object
*object
)
451 if (!atomic_dec_and_test(&object
->use_count
))
454 /* should only get here after delete_object was called */
455 WARN_ON(object
->flags
& OBJECT_ALLOCATED
);
457 call_rcu(&object
->rcu
, free_object_rcu
);
461 * Look up an object in the prio search tree and increase its use_count.
463 static struct kmemleak_object
*find_and_get_object(unsigned long ptr
, int alias
)
466 struct kmemleak_object
*object
= NULL
;
469 read_lock_irqsave(&kmemleak_lock
, flags
);
470 if (ptr
>= min_addr
&& ptr
< max_addr
)
471 object
= lookup_object(ptr
, alias
);
472 read_unlock_irqrestore(&kmemleak_lock
, flags
);
474 /* check whether the object is still available */
475 if (object
&& !get_object(object
))
483 * Save stack trace to the given array of MAX_TRACE size.
485 static int __save_stack_trace(unsigned long *trace
)
487 struct stack_trace stack_trace
;
489 stack_trace
.max_entries
= MAX_TRACE
;
490 stack_trace
.nr_entries
= 0;
491 stack_trace
.entries
= trace
;
492 stack_trace
.skip
= 2;
493 save_stack_trace(&stack_trace
);
495 return stack_trace
.nr_entries
;
499 * Create the metadata (struct kmemleak_object) corresponding to an allocated
500 * memory block and add it to the object_list and object_tree_root.
502 static struct kmemleak_object
*create_object(unsigned long ptr
, size_t size
,
503 int min_count
, gfp_t gfp
)
506 struct kmemleak_object
*object
;
507 struct prio_tree_node
*node
;
509 object
= kmem_cache_alloc(object_cache
, gfp
& GFP_KMEMLEAK_MASK
);
511 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
515 INIT_LIST_HEAD(&object
->object_list
);
516 INIT_LIST_HEAD(&object
->gray_list
);
517 INIT_HLIST_HEAD(&object
->area_list
);
518 spin_lock_init(&object
->lock
);
519 atomic_set(&object
->use_count
, 1);
520 object
->flags
= OBJECT_ALLOCATED
;
521 object
->pointer
= ptr
;
523 object
->min_count
= min_count
;
524 object
->count
= 0; /* white color initially */
525 object
->jiffies
= jiffies
;
526 object
->checksum
= 0;
528 /* task information */
531 strncpy(object
->comm
, "hardirq", sizeof(object
->comm
));
532 } else if (in_softirq()) {
534 strncpy(object
->comm
, "softirq", sizeof(object
->comm
));
536 object
->pid
= current
->pid
;
538 * There is a small chance of a race with set_task_comm(),
539 * however using get_task_comm() here may cause locking
540 * dependency issues with current->alloc_lock. In the worst
541 * case, the command line is not correct.
543 strncpy(object
->comm
, current
->comm
, sizeof(object
->comm
));
546 /* kernel backtrace */
547 object
->trace_len
= __save_stack_trace(object
->trace
);
549 INIT_PRIO_TREE_NODE(&object
->tree_node
);
550 object
->tree_node
.start
= ptr
;
551 object
->tree_node
.last
= ptr
+ size
- 1;
553 write_lock_irqsave(&kmemleak_lock
, flags
);
555 min_addr
= min(min_addr
, ptr
);
556 max_addr
= max(max_addr
, ptr
+ size
);
557 node
= prio_tree_insert(&object_tree_root
, &object
->tree_node
);
559 * The code calling the kernel does not yet have the pointer to the
560 * memory block to be able to free it. However, we still hold the
561 * kmemleak_lock here in case parts of the kernel started freeing
562 * random memory blocks.
564 if (node
!= &object
->tree_node
) {
565 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
566 "(already existing)\n", ptr
);
567 object
= lookup_object(ptr
, 1);
568 spin_lock(&object
->lock
);
569 dump_object_info(object
);
570 spin_unlock(&object
->lock
);
574 list_add_tail_rcu(&object
->object_list
, &object_list
);
576 write_unlock_irqrestore(&kmemleak_lock
, flags
);
581 * Remove the metadata (struct kmemleak_object) for a memory block from the
582 * object_list and object_tree_root and decrement its use_count.
584 static void __delete_object(struct kmemleak_object
*object
)
588 write_lock_irqsave(&kmemleak_lock
, flags
);
589 prio_tree_remove(&object_tree_root
, &object
->tree_node
);
590 list_del_rcu(&object
->object_list
);
591 write_unlock_irqrestore(&kmemleak_lock
, flags
);
593 WARN_ON(!(object
->flags
& OBJECT_ALLOCATED
));
594 WARN_ON(atomic_read(&object
->use_count
) < 2);
597 * Locking here also ensures that the corresponding memory block
598 * cannot be freed when it is being scanned.
600 spin_lock_irqsave(&object
->lock
, flags
);
601 object
->flags
&= ~OBJECT_ALLOCATED
;
602 spin_unlock_irqrestore(&object
->lock
, flags
);
607 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
610 static void delete_object_full(unsigned long ptr
)
612 struct kmemleak_object
*object
;
614 object
= find_and_get_object(ptr
, 0);
617 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
622 __delete_object(object
);
627 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
628 * delete it. If the memory block is partially freed, the function may create
629 * additional metadata for the remaining parts of the block.
631 static void delete_object_part(unsigned long ptr
, size_t size
)
633 struct kmemleak_object
*object
;
634 unsigned long start
, end
;
636 object
= find_and_get_object(ptr
, 1);
639 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
640 "(size %zu)\n", ptr
, size
);
644 __delete_object(object
);
647 * Create one or two objects that may result from the memory block
648 * split. Note that partial freeing is only done by free_bootmem() and
649 * this happens before kmemleak_init() is called. The path below is
650 * only executed during early log recording in kmemleak_init(), so
651 * GFP_KERNEL is enough.
653 start
= object
->pointer
;
654 end
= object
->pointer
+ object
->size
;
656 create_object(start
, ptr
- start
, object
->min_count
,
658 if (ptr
+ size
< end
)
659 create_object(ptr
+ size
, end
- ptr
- size
, object
->min_count
,
665 static void __paint_it(struct kmemleak_object
*object
, int color
)
667 object
->min_count
= color
;
668 if (color
== KMEMLEAK_BLACK
)
669 object
->flags
|= OBJECT_NO_SCAN
;
672 static void paint_it(struct kmemleak_object
*object
, int color
)
676 spin_lock_irqsave(&object
->lock
, flags
);
677 __paint_it(object
, color
);
678 spin_unlock_irqrestore(&object
->lock
, flags
);
681 static void paint_ptr(unsigned long ptr
, int color
)
683 struct kmemleak_object
*object
;
685 object
= find_and_get_object(ptr
, 0);
687 kmemleak_warn("Trying to color unknown object "
688 "at 0x%08lx as %s\n", ptr
,
689 (color
== KMEMLEAK_GREY
) ? "Grey" :
690 (color
== KMEMLEAK_BLACK
) ? "Black" : "Unknown");
693 paint_it(object
, color
);
698 * Make a object permanently as gray-colored so that it can no longer be
699 * reported as a leak. This is used in general to mark a false positive.
701 static void make_gray_object(unsigned long ptr
)
703 paint_ptr(ptr
, KMEMLEAK_GREY
);
707 * Mark the object as black-colored so that it is ignored from scans and
710 static void make_black_object(unsigned long ptr
)
712 paint_ptr(ptr
, KMEMLEAK_BLACK
);
716 * Add a scanning area to the object. If at least one such area is added,
717 * kmemleak will only scan these ranges rather than the whole memory block.
719 static void add_scan_area(unsigned long ptr
, size_t size
, gfp_t gfp
)
722 struct kmemleak_object
*object
;
723 struct kmemleak_scan_area
*area
;
725 object
= find_and_get_object(ptr
, 1);
727 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
732 area
= kmem_cache_alloc(scan_area_cache
, gfp
& GFP_KMEMLEAK_MASK
);
734 kmemleak_warn("Cannot allocate a scan area\n");
738 spin_lock_irqsave(&object
->lock
, flags
);
739 if (ptr
+ size
> object
->pointer
+ object
->size
) {
740 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr
);
741 dump_object_info(object
);
742 kmem_cache_free(scan_area_cache
, area
);
746 INIT_HLIST_NODE(&area
->node
);
750 hlist_add_head(&area
->node
, &object
->area_list
);
752 spin_unlock_irqrestore(&object
->lock
, flags
);
758 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
759 * pointer. Such object will not be scanned by kmemleak but references to it
762 static void object_no_scan(unsigned long ptr
)
765 struct kmemleak_object
*object
;
767 object
= find_and_get_object(ptr
, 0);
769 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr
);
773 spin_lock_irqsave(&object
->lock
, flags
);
774 object
->flags
|= OBJECT_NO_SCAN
;
775 spin_unlock_irqrestore(&object
->lock
, flags
);
780 * Log an early kmemleak_* call to the early_log buffer. These calls will be
781 * processed later once kmemleak is fully initialized.
783 static void __init
log_early(int op_type
, const void *ptr
, size_t size
,
787 struct early_log
*log
;
789 if (crt_early_log
>= ARRAY_SIZE(early_log
)) {
790 pr_warning("Early log buffer exceeded, "
791 "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
797 * There is no need for locking since the kernel is still in UP mode
798 * at this stage. Disabling the IRQs is enough.
800 local_irq_save(flags
);
801 log
= &early_log
[crt_early_log
];
802 log
->op_type
= op_type
;
805 log
->min_count
= min_count
;
806 if (op_type
== KMEMLEAK_ALLOC
)
807 log
->trace_len
= __save_stack_trace(log
->trace
);
809 local_irq_restore(flags
);
813 * Log an early allocated block and populate the stack trace.
815 static void early_alloc(struct early_log
*log
)
817 struct kmemleak_object
*object
;
821 if (!atomic_read(&kmemleak_enabled
) || !log
->ptr
|| IS_ERR(log
->ptr
))
825 * RCU locking needed to ensure object is not freed via put_object().
828 object
= create_object((unsigned long)log
->ptr
, log
->size
,
829 log
->min_count
, GFP_ATOMIC
);
832 spin_lock_irqsave(&object
->lock
, flags
);
833 for (i
= 0; i
< log
->trace_len
; i
++)
834 object
->trace
[i
] = log
->trace
[i
];
835 object
->trace_len
= log
->trace_len
;
836 spin_unlock_irqrestore(&object
->lock
, flags
);
842 * Memory allocation function callback. This function is called from the
843 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
846 void __ref
kmemleak_alloc(const void *ptr
, size_t size
, int min_count
,
849 pr_debug("%s(0x%p, %zu, %d)\n", __func__
, ptr
, size
, min_count
);
851 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
852 create_object((unsigned long)ptr
, size
, min_count
, gfp
);
853 else if (atomic_read(&kmemleak_early_log
))
854 log_early(KMEMLEAK_ALLOC
, ptr
, size
, min_count
);
856 EXPORT_SYMBOL_GPL(kmemleak_alloc
);
859 * Memory freeing function callback. This function is called from the kernel
860 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
862 void __ref
kmemleak_free(const void *ptr
)
864 pr_debug("%s(0x%p)\n", __func__
, ptr
);
866 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
867 delete_object_full((unsigned long)ptr
);
868 else if (atomic_read(&kmemleak_early_log
))
869 log_early(KMEMLEAK_FREE
, ptr
, 0, 0);
871 EXPORT_SYMBOL_GPL(kmemleak_free
);
874 * Partial memory freeing function callback. This function is usually called
875 * from bootmem allocator when (part of) a memory block is freed.
877 void __ref
kmemleak_free_part(const void *ptr
, size_t size
)
879 pr_debug("%s(0x%p)\n", __func__
, ptr
);
881 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
882 delete_object_part((unsigned long)ptr
, size
);
883 else if (atomic_read(&kmemleak_early_log
))
884 log_early(KMEMLEAK_FREE_PART
, ptr
, size
, 0);
886 EXPORT_SYMBOL_GPL(kmemleak_free_part
);
889 * Mark an already allocated memory block as a false positive. This will cause
890 * the block to no longer be reported as leak and always be scanned.
892 void __ref
kmemleak_not_leak(const void *ptr
)
894 pr_debug("%s(0x%p)\n", __func__
, ptr
);
896 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
897 make_gray_object((unsigned long)ptr
);
898 else if (atomic_read(&kmemleak_early_log
))
899 log_early(KMEMLEAK_NOT_LEAK
, ptr
, 0, 0);
901 EXPORT_SYMBOL(kmemleak_not_leak
);
904 * Ignore a memory block. This is usually done when it is known that the
905 * corresponding block is not a leak and does not contain any references to
906 * other allocated memory blocks.
908 void __ref
kmemleak_ignore(const void *ptr
)
910 pr_debug("%s(0x%p)\n", __func__
, ptr
);
912 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
913 make_black_object((unsigned long)ptr
);
914 else if (atomic_read(&kmemleak_early_log
))
915 log_early(KMEMLEAK_IGNORE
, ptr
, 0, 0);
917 EXPORT_SYMBOL(kmemleak_ignore
);
920 * Limit the range to be scanned in an allocated memory block.
922 void __ref
kmemleak_scan_area(const void *ptr
, size_t size
, gfp_t gfp
)
924 pr_debug("%s(0x%p)\n", __func__
, ptr
);
926 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
927 add_scan_area((unsigned long)ptr
, size
, gfp
);
928 else if (atomic_read(&kmemleak_early_log
))
929 log_early(KMEMLEAK_SCAN_AREA
, ptr
, size
, 0);
931 EXPORT_SYMBOL(kmemleak_scan_area
);
934 * Inform kmemleak not to scan the given memory block.
936 void __ref
kmemleak_no_scan(const void *ptr
)
938 pr_debug("%s(0x%p)\n", __func__
, ptr
);
940 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
941 object_no_scan((unsigned long)ptr
);
942 else if (atomic_read(&kmemleak_early_log
))
943 log_early(KMEMLEAK_NO_SCAN
, ptr
, 0, 0);
945 EXPORT_SYMBOL(kmemleak_no_scan
);
948 * Update an object's checksum and return true if it was modified.
950 static bool update_checksum(struct kmemleak_object
*object
)
952 u32 old_csum
= object
->checksum
;
954 if (!kmemcheck_is_obj_initialized(object
->pointer
, object
->size
))
957 object
->checksum
= crc32(0, (void *)object
->pointer
, object
->size
);
958 return object
->checksum
!= old_csum
;
962 * Memory scanning is a long process and it needs to be interruptable. This
963 * function checks whether such interrupt condition occured.
965 static int scan_should_stop(void)
967 if (!atomic_read(&kmemleak_enabled
))
971 * This function may be called from either process or kthread context,
972 * hence the need to check for both stop conditions.
975 return signal_pending(current
);
977 return kthread_should_stop();
983 * Scan a memory block (exclusive range) for valid pointers and add those
984 * found to the gray list.
986 static void scan_block(void *_start
, void *_end
,
987 struct kmemleak_object
*scanned
, int allow_resched
)
990 unsigned long *start
= PTR_ALIGN(_start
, BYTES_PER_POINTER
);
991 unsigned long *end
= _end
- (BYTES_PER_POINTER
- 1);
993 for (ptr
= start
; ptr
< end
; ptr
++) {
994 struct kmemleak_object
*object
;
996 unsigned long pointer
;
1000 if (scan_should_stop())
1003 /* don't scan uninitialized memory */
1004 if (!kmemcheck_is_obj_initialized((unsigned long)ptr
,
1010 object
= find_and_get_object(pointer
, 1);
1013 if (object
== scanned
) {
1014 /* self referenced, ignore */
1020 * Avoid the lockdep recursive warning on object->lock being
1021 * previously acquired in scan_object(). These locks are
1022 * enclosed by scan_mutex.
1024 spin_lock_irqsave_nested(&object
->lock
, flags
,
1025 SINGLE_DEPTH_NESTING
);
1026 if (!color_white(object
)) {
1027 /* non-orphan, ignored or new */
1028 spin_unlock_irqrestore(&object
->lock
, flags
);
1034 * Increase the object's reference count (number of pointers
1035 * to the memory block). If this count reaches the required
1036 * minimum, the object's color will become gray and it will be
1037 * added to the gray_list.
1040 if (color_gray(object
)) {
1041 list_add_tail(&object
->gray_list
, &gray_list
);
1042 spin_unlock_irqrestore(&object
->lock
, flags
);
1046 spin_unlock_irqrestore(&object
->lock
, flags
);
1052 * Scan a memory block corresponding to a kmemleak_object. A condition is
1053 * that object->use_count >= 1.
1055 static void scan_object(struct kmemleak_object
*object
)
1057 struct kmemleak_scan_area
*area
;
1058 struct hlist_node
*elem
;
1059 unsigned long flags
;
1062 * Once the object->lock is acquired, the corresponding memory block
1063 * cannot be freed (the same lock is acquired in delete_object).
1065 spin_lock_irqsave(&object
->lock
, flags
);
1066 if (object
->flags
& OBJECT_NO_SCAN
)
1068 if (!(object
->flags
& OBJECT_ALLOCATED
))
1069 /* already freed object */
1071 if (hlist_empty(&object
->area_list
)) {
1072 void *start
= (void *)object
->pointer
;
1073 void *end
= (void *)(object
->pointer
+ object
->size
);
1075 while (start
< end
&& (object
->flags
& OBJECT_ALLOCATED
) &&
1076 !(object
->flags
& OBJECT_NO_SCAN
)) {
1077 scan_block(start
, min(start
+ MAX_SCAN_SIZE
, end
),
1079 start
+= MAX_SCAN_SIZE
;
1081 spin_unlock_irqrestore(&object
->lock
, flags
);
1083 spin_lock_irqsave(&object
->lock
, flags
);
1086 hlist_for_each_entry(area
, elem
, &object
->area_list
, node
)
1087 scan_block((void *)area
->start
,
1088 (void *)(area
->start
+ area
->size
),
1091 spin_unlock_irqrestore(&object
->lock
, flags
);
1095 * Scan the objects already referenced (gray objects). More objects will be
1096 * referenced and, if there are no memory leaks, all the objects are scanned.
1098 static void scan_gray_list(void)
1100 struct kmemleak_object
*object
, *tmp
;
1103 * The list traversal is safe for both tail additions and removals
1104 * from inside the loop. The kmemleak objects cannot be freed from
1105 * outside the loop because their use_count was incremented.
1107 object
= list_entry(gray_list
.next
, typeof(*object
), gray_list
);
1108 while (&object
->gray_list
!= &gray_list
) {
1111 /* may add new objects to the list */
1112 if (!scan_should_stop())
1113 scan_object(object
);
1115 tmp
= list_entry(object
->gray_list
.next
, typeof(*object
),
1118 /* remove the object from the list and release it */
1119 list_del(&object
->gray_list
);
1124 WARN_ON(!list_empty(&gray_list
));
1128 * Scan data sections and all the referenced memory blocks allocated via the
1129 * kernel's standard allocators. This function must be called with the
1132 static void kmemleak_scan(void)
1134 unsigned long flags
;
1135 struct kmemleak_object
*object
;
1139 jiffies_last_scan
= jiffies
;
1141 /* prepare the kmemleak_object's */
1143 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1144 spin_lock_irqsave(&object
->lock
, flags
);
1147 * With a few exceptions there should be a maximum of
1148 * 1 reference to any object at this point.
1150 if (atomic_read(&object
->use_count
) > 1) {
1151 pr_debug("object->use_count = %d\n",
1152 atomic_read(&object
->use_count
));
1153 dump_object_info(object
);
1156 /* reset the reference count (whiten the object) */
1158 if (color_gray(object
) && get_object(object
))
1159 list_add_tail(&object
->gray_list
, &gray_list
);
1161 spin_unlock_irqrestore(&object
->lock
, flags
);
1165 /* data/bss scanning */
1166 scan_block(_sdata
, _edata
, NULL
, 1);
1167 scan_block(__bss_start
, __bss_stop
, NULL
, 1);
1170 /* per-cpu sections scanning */
1171 for_each_possible_cpu(i
)
1172 scan_block(__per_cpu_start
+ per_cpu_offset(i
),
1173 __per_cpu_end
+ per_cpu_offset(i
), NULL
, 1);
1177 * Struct page scanning for each node. The code below is not yet safe
1178 * with MEMORY_HOTPLUG.
1180 for_each_online_node(i
) {
1181 pg_data_t
*pgdat
= NODE_DATA(i
);
1182 unsigned long start_pfn
= pgdat
->node_start_pfn
;
1183 unsigned long end_pfn
= start_pfn
+ pgdat
->node_spanned_pages
;
1186 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1189 if (!pfn_valid(pfn
))
1191 page
= pfn_to_page(pfn
);
1192 /* only scan if page is in use */
1193 if (page_count(page
) == 0)
1195 scan_block(page
, page
+ 1, NULL
, 1);
1200 * Scanning the task stacks (may introduce false negatives).
1202 if (kmemleak_stack_scan
) {
1203 struct task_struct
*p
, *g
;
1205 read_lock(&tasklist_lock
);
1206 do_each_thread(g
, p
) {
1207 scan_block(task_stack_page(p
), task_stack_page(p
) +
1208 THREAD_SIZE
, NULL
, 0);
1209 } while_each_thread(g
, p
);
1210 read_unlock(&tasklist_lock
);
1214 * Scan the objects already referenced from the sections scanned
1220 * Check for new or unreferenced objects modified since the previous
1221 * scan and color them gray until the next scan.
1224 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1225 spin_lock_irqsave(&object
->lock
, flags
);
1226 if (color_white(object
) && (object
->flags
& OBJECT_ALLOCATED
)
1227 && update_checksum(object
) && get_object(object
)) {
1228 /* color it gray temporarily */
1229 object
->count
= object
->min_count
;
1230 list_add_tail(&object
->gray_list
, &gray_list
);
1232 spin_unlock_irqrestore(&object
->lock
, flags
);
1237 * Re-scan the gray list for modified unreferenced objects.
1242 * If scanning was stopped do not report any new unreferenced objects.
1244 if (scan_should_stop())
1248 * Scanning result reporting.
1251 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1252 spin_lock_irqsave(&object
->lock
, flags
);
1253 if (unreferenced_object(object
) &&
1254 !(object
->flags
& OBJECT_REPORTED
)) {
1255 object
->flags
|= OBJECT_REPORTED
;
1258 spin_unlock_irqrestore(&object
->lock
, flags
);
1263 pr_info("%d new suspected memory leaks (see "
1264 "/sys/kernel/debug/kmemleak)\n", new_leaks
);
1269 * Thread function performing automatic memory scanning. Unreferenced objects
1270 * at the end of a memory scan are reported but only the first time.
1272 static int kmemleak_scan_thread(void *arg
)
1274 static int first_run
= 1;
1276 pr_info("Automatic memory scanning thread started\n");
1277 set_user_nice(current
, 10);
1280 * Wait before the first scan to allow the system to fully initialize.
1284 ssleep(SECS_FIRST_SCAN
);
1287 while (!kthread_should_stop()) {
1288 signed long timeout
= jiffies_scan_wait
;
1290 mutex_lock(&scan_mutex
);
1292 mutex_unlock(&scan_mutex
);
1294 /* wait before the next scan */
1295 while (timeout
&& !kthread_should_stop())
1296 timeout
= schedule_timeout_interruptible(timeout
);
1299 pr_info("Automatic memory scanning thread ended\n");
1305 * Start the automatic memory scanning thread. This function must be called
1306 * with the scan_mutex held.
1308 static void start_scan_thread(void)
1312 scan_thread
= kthread_run(kmemleak_scan_thread
, NULL
, "kmemleak");
1313 if (IS_ERR(scan_thread
)) {
1314 pr_warning("Failed to create the scan thread\n");
1320 * Stop the automatic memory scanning thread. This function must be called
1321 * with the scan_mutex held.
1323 static void stop_scan_thread(void)
1326 kthread_stop(scan_thread
);
1332 * Iterate over the object_list and return the first valid object at or after
1333 * the required position with its use_count incremented. The function triggers
1334 * a memory scanning when the pos argument points to the first position.
1336 static void *kmemleak_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1338 struct kmemleak_object
*object
;
1342 err
= mutex_lock_interruptible(&scan_mutex
);
1344 return ERR_PTR(err
);
1347 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1350 if (get_object(object
))
1359 * Return the next object in the object_list. The function decrements the
1360 * use_count of the previous object and increases that of the next one.
1362 static void *kmemleak_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1364 struct kmemleak_object
*prev_obj
= v
;
1365 struct kmemleak_object
*next_obj
= NULL
;
1366 struct list_head
*n
= &prev_obj
->object_list
;
1370 list_for_each_continue_rcu(n
, &object_list
) {
1371 next_obj
= list_entry(n
, struct kmemleak_object
, object_list
);
1372 if (get_object(next_obj
))
1376 put_object(prev_obj
);
1381 * Decrement the use_count of the last object required, if any.
1383 static void kmemleak_seq_stop(struct seq_file
*seq
, void *v
)
1387 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1388 * waiting was interrupted, so only release it if !IS_ERR.
1391 mutex_unlock(&scan_mutex
);
1398 * Print the information for an unreferenced object to the seq file.
1400 static int kmemleak_seq_show(struct seq_file
*seq
, void *v
)
1402 struct kmemleak_object
*object
= v
;
1403 unsigned long flags
;
1405 spin_lock_irqsave(&object
->lock
, flags
);
1406 if ((object
->flags
& OBJECT_REPORTED
) && unreferenced_object(object
))
1407 print_unreferenced(seq
, object
);
1408 spin_unlock_irqrestore(&object
->lock
, flags
);
1412 static const struct seq_operations kmemleak_seq_ops
= {
1413 .start
= kmemleak_seq_start
,
1414 .next
= kmemleak_seq_next
,
1415 .stop
= kmemleak_seq_stop
,
1416 .show
= kmemleak_seq_show
,
1419 static int kmemleak_open(struct inode
*inode
, struct file
*file
)
1421 if (!atomic_read(&kmemleak_enabled
))
1424 return seq_open(file
, &kmemleak_seq_ops
);
1427 static int kmemleak_release(struct inode
*inode
, struct file
*file
)
1429 return seq_release(inode
, file
);
1432 static int dump_str_object_info(const char *str
)
1434 unsigned long flags
;
1435 struct kmemleak_object
*object
;
1438 addr
= simple_strtoul(str
, NULL
, 0);
1439 object
= find_and_get_object(addr
, 0);
1441 pr_info("Unknown object at 0x%08lx\n", addr
);
1445 spin_lock_irqsave(&object
->lock
, flags
);
1446 dump_object_info(object
);
1447 spin_unlock_irqrestore(&object
->lock
, flags
);
1454 * We use grey instead of black to ensure we can do future scans on the same
1455 * objects. If we did not do future scans these black objects could
1456 * potentially contain references to newly allocated objects in the future and
1457 * we'd end up with false positives.
1459 static void kmemleak_clear(void)
1461 struct kmemleak_object
*object
;
1462 unsigned long flags
;
1465 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1466 spin_lock_irqsave(&object
->lock
, flags
);
1467 if ((object
->flags
& OBJECT_REPORTED
) &&
1468 unreferenced_object(object
))
1469 __paint_it(object
, KMEMLEAK_GREY
);
1470 spin_unlock_irqrestore(&object
->lock
, flags
);
1476 * File write operation to configure kmemleak at run-time. The following
1477 * commands can be written to the /sys/kernel/debug/kmemleak file:
1478 * off - disable kmemleak (irreversible)
1479 * stack=on - enable the task stacks scanning
1480 * stack=off - disable the tasks stacks scanning
1481 * scan=on - start the automatic memory scanning thread
1482 * scan=off - stop the automatic memory scanning thread
1483 * scan=... - set the automatic memory scanning period in seconds (0 to
1485 * scan - trigger a memory scan
1486 * clear - mark all current reported unreferenced kmemleak objects as
1487 * grey to ignore printing them
1488 * dump=... - dump information about the object found at the given address
1490 static ssize_t
kmemleak_write(struct file
*file
, const char __user
*user_buf
,
1491 size_t size
, loff_t
*ppos
)
1497 buf_size
= min(size
, (sizeof(buf
) - 1));
1498 if (strncpy_from_user(buf
, user_buf
, buf_size
) < 0)
1502 ret
= mutex_lock_interruptible(&scan_mutex
);
1506 if (strncmp(buf
, "off", 3) == 0)
1508 else if (strncmp(buf
, "stack=on", 8) == 0)
1509 kmemleak_stack_scan
= 1;
1510 else if (strncmp(buf
, "stack=off", 9) == 0)
1511 kmemleak_stack_scan
= 0;
1512 else if (strncmp(buf
, "scan=on", 7) == 0)
1513 start_scan_thread();
1514 else if (strncmp(buf
, "scan=off", 8) == 0)
1516 else if (strncmp(buf
, "scan=", 5) == 0) {
1519 ret
= strict_strtoul(buf
+ 5, 0, &secs
);
1524 jiffies_scan_wait
= msecs_to_jiffies(secs
* 1000);
1525 start_scan_thread();
1527 } else if (strncmp(buf
, "scan", 4) == 0)
1529 else if (strncmp(buf
, "clear", 5) == 0)
1531 else if (strncmp(buf
, "dump=", 5) == 0)
1532 ret
= dump_str_object_info(buf
+ 5);
1537 mutex_unlock(&scan_mutex
);
1541 /* ignore the rest of the buffer, only one command at a time */
1546 static const struct file_operations kmemleak_fops
= {
1547 .owner
= THIS_MODULE
,
1548 .open
= kmemleak_open
,
1550 .write
= kmemleak_write
,
1551 .llseek
= seq_lseek
,
1552 .release
= kmemleak_release
,
1556 * Perform the freeing of the kmemleak internal objects after waiting for any
1557 * current memory scan to complete.
1559 static void kmemleak_do_cleanup(struct work_struct
*work
)
1561 struct kmemleak_object
*object
;
1563 mutex_lock(&scan_mutex
);
1567 list_for_each_entry_rcu(object
, &object_list
, object_list
)
1568 delete_object_full(object
->pointer
);
1570 mutex_unlock(&scan_mutex
);
1573 static DECLARE_WORK(cleanup_work
, kmemleak_do_cleanup
);
1576 * Disable kmemleak. No memory allocation/freeing will be traced once this
1577 * function is called. Disabling kmemleak is an irreversible operation.
1579 static void kmemleak_disable(void)
1581 /* atomically check whether it was already invoked */
1582 if (atomic_cmpxchg(&kmemleak_error
, 0, 1))
1585 /* stop any memory operation tracing */
1586 atomic_set(&kmemleak_early_log
, 0);
1587 atomic_set(&kmemleak_enabled
, 0);
1589 /* check whether it is too early for a kernel thread */
1590 if (atomic_read(&kmemleak_initialized
))
1591 schedule_work(&cleanup_work
);
1593 pr_info("Kernel memory leak detector disabled\n");
1597 * Allow boot-time kmemleak disabling (enabled by default).
1599 static int kmemleak_boot_config(char *str
)
1603 if (strcmp(str
, "off") == 0)
1605 else if (strcmp(str
, "on") != 0)
1609 early_param("kmemleak", kmemleak_boot_config
);
1612 * Kmemleak initialization.
1614 void __init
kmemleak_init(void)
1617 unsigned long flags
;
1619 jiffies_min_age
= msecs_to_jiffies(MSECS_MIN_AGE
);
1620 jiffies_scan_wait
= msecs_to_jiffies(SECS_SCAN_WAIT
* 1000);
1622 object_cache
= KMEM_CACHE(kmemleak_object
, SLAB_NOLEAKTRACE
);
1623 scan_area_cache
= KMEM_CACHE(kmemleak_scan_area
, SLAB_NOLEAKTRACE
);
1624 INIT_PRIO_TREE_ROOT(&object_tree_root
);
1626 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1627 local_irq_save(flags
);
1628 if (!atomic_read(&kmemleak_error
)) {
1629 atomic_set(&kmemleak_enabled
, 1);
1630 atomic_set(&kmemleak_early_log
, 0);
1632 local_irq_restore(flags
);
1635 * This is the point where tracking allocations is safe. Automatic
1636 * scanning is started during the late initcall. Add the early logged
1637 * callbacks to the kmemleak infrastructure.
1639 for (i
= 0; i
< crt_early_log
; i
++) {
1640 struct early_log
*log
= &early_log
[i
];
1642 switch (log
->op_type
) {
1643 case KMEMLEAK_ALLOC
:
1647 kmemleak_free(log
->ptr
);
1649 case KMEMLEAK_FREE_PART
:
1650 kmemleak_free_part(log
->ptr
, log
->size
);
1652 case KMEMLEAK_NOT_LEAK
:
1653 kmemleak_not_leak(log
->ptr
);
1655 case KMEMLEAK_IGNORE
:
1656 kmemleak_ignore(log
->ptr
);
1658 case KMEMLEAK_SCAN_AREA
:
1659 kmemleak_scan_area(log
->ptr
, log
->size
, GFP_KERNEL
);
1661 case KMEMLEAK_NO_SCAN
:
1662 kmemleak_no_scan(log
->ptr
);
1671 * Late initialization function.
1673 static int __init
kmemleak_late_init(void)
1675 struct dentry
*dentry
;
1677 atomic_set(&kmemleak_initialized
, 1);
1679 if (atomic_read(&kmemleak_error
)) {
1681 * Some error occured and kmemleak was disabled. There is a
1682 * small chance that kmemleak_disable() was called immediately
1683 * after setting kmemleak_initialized and we may end up with
1684 * two clean-up threads but serialized by scan_mutex.
1686 schedule_work(&cleanup_work
);
1690 dentry
= debugfs_create_file("kmemleak", S_IRUGO
, NULL
, NULL
,
1693 pr_warning("Failed to create the debugfs kmemleak file\n");
1694 mutex_lock(&scan_mutex
);
1695 start_scan_thread();
1696 mutex_unlock(&scan_mutex
);
1698 pr_info("Kernel memory leak detector initialized\n");
1702 late_initcall(kmemleak_late_init
);