4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/export.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
76 #include <linux/debugfs.h>
77 #include <linux/seq_file.h>
78 #include <linux/cpumask.h>
79 #include <linux/spinlock.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/cache.h>
84 #include <linux/percpu.h>
85 #include <linux/hardirq.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
101 #include <linux/kmemcheck.h>
102 #include <linux/kmemleak.h>
103 #include <linux/memory_hotplug.h>
106 * Kmemleak configuration and common defines.
108 #define MAX_TRACE 16 /* stack trace length */
109 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
110 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
111 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
112 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114 #define BYTES_PER_POINTER sizeof(void *)
116 /* GFP bitmask for kmemleak internal allocations */
117 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
121 /* scanning area inside a memory block */
122 struct kmemleak_scan_area
{
123 struct hlist_node node
;
128 #define KMEMLEAK_GREY 0
129 #define KMEMLEAK_BLACK -1
132 * Structure holding the metadata for each allocated memory block.
133 * Modifications to such objects should be made while holding the
134 * object->lock. Insertions or deletions from object_list, gray_list or
135 * tree_node are already protected by the corresponding locks or mutex (see
136 * the notes on locking above). These objects are reference-counted
137 * (use_count) and freed using the RCU mechanism.
139 struct kmemleak_object
{
141 unsigned long flags
; /* object status flags */
142 struct list_head object_list
;
143 struct list_head gray_list
;
144 struct prio_tree_node tree_node
;
145 struct rcu_head rcu
; /* object_list lockless traversal */
146 /* object usage count; object freed when use_count == 0 */
148 unsigned long pointer
;
150 /* minimum number of a pointers found before it is considered leak */
152 /* the total number of pointers found pointing to this object */
154 /* checksum for detecting modified objects */
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list
;
158 unsigned long trace
[MAX_TRACE
];
159 unsigned int trace_len
;
160 unsigned long jiffies
; /* creation timestamp */
161 pid_t pid
; /* pid of the current task */
162 char comm
[TASK_COMM_LEN
]; /* executable name */
165 /* flag representing the memory block allocation status */
166 #define OBJECT_ALLOCATED (1 << 0)
167 /* flag set after the first reporting of an unreference object */
168 #define OBJECT_REPORTED (1 << 1)
169 /* flag set to not scan the object */
170 #define OBJECT_NO_SCAN (1 << 2)
172 /* number of bytes to print per line; must be 16 or 32 */
173 #define HEX_ROW_SIZE 16
174 /* number of bytes to print at a time (1, 2, 4, 8) */
175 #define HEX_GROUP_SIZE 1
176 /* include ASCII after the hex output */
178 /* max number of lines to be printed */
179 #define HEX_MAX_LINES 2
181 /* the list of all allocated objects */
182 static LIST_HEAD(object_list
);
183 /* the list of gray-colored objects (see color_gray comment below) */
184 static LIST_HEAD(gray_list
);
185 /* prio search tree for object boundaries */
186 static struct prio_tree_root object_tree_root
;
187 /* rw_lock protecting the access to object_list and prio_tree_root */
188 static DEFINE_RWLOCK(kmemleak_lock
);
190 /* allocation caches for kmemleak internal data */
191 static struct kmem_cache
*object_cache
;
192 static struct kmem_cache
*scan_area_cache
;
194 /* set if tracing memory operations is enabled */
195 static atomic_t kmemleak_enabled
= ATOMIC_INIT(0);
196 /* set in the late_initcall if there were no errors */
197 static atomic_t kmemleak_initialized
= ATOMIC_INIT(0);
198 /* enables or disables early logging of the memory operations */
199 static atomic_t kmemleak_early_log
= ATOMIC_INIT(1);
200 /* set if a kmemleak warning was issued */
201 static atomic_t kmemleak_warning
= ATOMIC_INIT(0);
202 /* set if a fatal kmemleak error has occurred */
203 static atomic_t kmemleak_error
= ATOMIC_INIT(0);
205 /* minimum and maximum address that may be valid pointers */
206 static unsigned long min_addr
= ULONG_MAX
;
207 static unsigned long max_addr
;
209 static struct task_struct
*scan_thread
;
210 /* used to avoid reporting of recently allocated objects */
211 static unsigned long jiffies_min_age
;
212 static unsigned long jiffies_last_scan
;
213 /* delay between automatic memory scannings */
214 static signed long jiffies_scan_wait
;
215 /* enables or disables the task stacks scanning */
216 static int kmemleak_stack_scan
= 1;
217 /* protects the memory scanning, parameters and debug/kmemleak file access */
218 static DEFINE_MUTEX(scan_mutex
);
219 /* setting kmemleak=on, will set this var, skipping the disable */
220 static int kmemleak_skip_disable
;
224 * Early object allocation/freeing logging. Kmemleak is initialized after the
225 * kernel allocator. However, both the kernel allocator and kmemleak may
226 * allocate memory blocks which need to be tracked. Kmemleak defines an
227 * arbitrary buffer to hold the allocation/freeing information before it is
231 /* kmemleak operation type for early logging */
234 KMEMLEAK_ALLOC_PERCPU
,
237 KMEMLEAK_FREE_PERCPU
,
245 * Structure holding the information passed to kmemleak callbacks during the
249 int op_type
; /* kmemleak operation type */
250 const void *ptr
; /* allocated/freed memory block */
251 size_t size
; /* memory block size */
252 int min_count
; /* minimum reference count */
253 unsigned long trace
[MAX_TRACE
]; /* stack trace */
254 unsigned int trace_len
; /* stack trace length */
257 /* early logging buffer and current position */
258 static struct early_log
259 early_log
[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE
] __initdata
;
260 static int crt_early_log __initdata
;
262 static void kmemleak_disable(void);
265 * Print a warning and dump the stack trace.
267 #define kmemleak_warn(x...) do { \
270 atomic_set(&kmemleak_warning, 1); \
274 * Macro invoked when a serious kmemleak condition occurred and cannot be
275 * recovered from. Kmemleak will be disabled and further allocation/freeing
276 * tracing no longer available.
278 #define kmemleak_stop(x...) do { \
280 kmemleak_disable(); \
284 * Printing of the objects hex dump to the seq file. The number of lines to be
285 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
286 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
287 * with the object->lock held.
289 static void hex_dump_object(struct seq_file
*seq
,
290 struct kmemleak_object
*object
)
292 const u8
*ptr
= (const u8
*)object
->pointer
;
293 int i
, len
, remaining
;
294 unsigned char linebuf
[HEX_ROW_SIZE
* 5];
296 /* limit the number of lines to HEX_MAX_LINES */
298 min(object
->size
, (size_t)(HEX_MAX_LINES
* HEX_ROW_SIZE
));
300 seq_printf(seq
, " hex dump (first %d bytes):\n", len
);
301 for (i
= 0; i
< len
; i
+= HEX_ROW_SIZE
) {
302 int linelen
= min(remaining
, HEX_ROW_SIZE
);
304 remaining
-= HEX_ROW_SIZE
;
305 hex_dump_to_buffer(ptr
+ i
, linelen
, HEX_ROW_SIZE
,
306 HEX_GROUP_SIZE
, linebuf
, sizeof(linebuf
),
308 seq_printf(seq
, " %s\n", linebuf
);
313 * Object colors, encoded with count and min_count:
314 * - white - orphan object, not enough references to it (count < min_count)
315 * - gray - not orphan, not marked as false positive (min_count == 0) or
316 * sufficient references to it (count >= min_count)
317 * - black - ignore, it doesn't contain references (e.g. text section)
318 * (min_count == -1). No function defined for this color.
319 * Newly created objects don't have any color assigned (object->count == -1)
320 * before the next memory scan when they become white.
322 static bool color_white(const struct kmemleak_object
*object
)
324 return object
->count
!= KMEMLEAK_BLACK
&&
325 object
->count
< object
->min_count
;
328 static bool color_gray(const struct kmemleak_object
*object
)
330 return object
->min_count
!= KMEMLEAK_BLACK
&&
331 object
->count
>= object
->min_count
;
335 * Objects are considered unreferenced only if their color is white, they have
336 * not be deleted and have a minimum age to avoid false positives caused by
337 * pointers temporarily stored in CPU registers.
339 static bool unreferenced_object(struct kmemleak_object
*object
)
341 return (color_white(object
) && object
->flags
& OBJECT_ALLOCATED
) &&
342 time_before_eq(object
->jiffies
+ jiffies_min_age
,
347 * Printing of the unreferenced objects information to the seq file. The
348 * print_unreferenced function must be called with the object->lock held.
350 static void print_unreferenced(struct seq_file
*seq
,
351 struct kmemleak_object
*object
)
354 unsigned int msecs_age
= jiffies_to_msecs(jiffies
- object
->jiffies
);
356 seq_printf(seq
, "unreferenced object 0x%08lx (size %zu):\n",
357 object
->pointer
, object
->size
);
358 seq_printf(seq
, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
359 object
->comm
, object
->pid
, object
->jiffies
,
360 msecs_age
/ 1000, msecs_age
% 1000);
361 hex_dump_object(seq
, object
);
362 seq_printf(seq
, " backtrace:\n");
364 for (i
= 0; i
< object
->trace_len
; i
++) {
365 void *ptr
= (void *)object
->trace
[i
];
366 seq_printf(seq
, " [<%p>] %pS\n", ptr
, ptr
);
371 * Print the kmemleak_object information. This function is used mainly for
372 * debugging special cases when kmemleak operations. It must be called with
373 * the object->lock held.
375 static void dump_object_info(struct kmemleak_object
*object
)
377 struct stack_trace trace
;
379 trace
.nr_entries
= object
->trace_len
;
380 trace
.entries
= object
->trace
;
382 pr_notice("Object 0x%08lx (size %zu):\n",
383 object
->tree_node
.start
, object
->size
);
384 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
385 object
->comm
, object
->pid
, object
->jiffies
);
386 pr_notice(" min_count = %d\n", object
->min_count
);
387 pr_notice(" count = %d\n", object
->count
);
388 pr_notice(" flags = 0x%lx\n", object
->flags
);
389 pr_notice(" checksum = %d\n", object
->checksum
);
390 pr_notice(" backtrace:\n");
391 print_stack_trace(&trace
, 4);
395 * Look-up a memory block metadata (kmemleak_object) in the priority search
396 * tree based on a pointer value. If alias is 0, only values pointing to the
397 * beginning of the memory block are allowed. The kmemleak_lock must be held
398 * when calling this function.
400 static struct kmemleak_object
*lookup_object(unsigned long ptr
, int alias
)
402 struct prio_tree_node
*node
;
403 struct prio_tree_iter iter
;
404 struct kmemleak_object
*object
;
406 prio_tree_iter_init(&iter
, &object_tree_root
, ptr
, ptr
);
407 node
= prio_tree_next(&iter
);
409 object
= prio_tree_entry(node
, struct kmemleak_object
,
411 if (!alias
&& object
->pointer
!= ptr
) {
412 kmemleak_warn("Found object by alias at 0x%08lx\n",
414 dump_object_info(object
);
424 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
425 * that once an object's use_count reached 0, the RCU freeing was already
426 * registered and the object should no longer be used. This function must be
427 * called under the protection of rcu_read_lock().
429 static int get_object(struct kmemleak_object
*object
)
431 return atomic_inc_not_zero(&object
->use_count
);
435 * RCU callback to free a kmemleak_object.
437 static void free_object_rcu(struct rcu_head
*rcu
)
439 struct hlist_node
*elem
, *tmp
;
440 struct kmemleak_scan_area
*area
;
441 struct kmemleak_object
*object
=
442 container_of(rcu
, struct kmemleak_object
, rcu
);
445 * Once use_count is 0 (guaranteed by put_object), there is no other
446 * code accessing this object, hence no need for locking.
448 hlist_for_each_entry_safe(area
, elem
, tmp
, &object
->area_list
, node
) {
450 kmem_cache_free(scan_area_cache
, area
);
452 kmem_cache_free(object_cache
, object
);
456 * Decrement the object use_count. Once the count is 0, free the object using
457 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
458 * delete_object() path, the delayed RCU freeing ensures that there is no
459 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
462 static void put_object(struct kmemleak_object
*object
)
464 if (!atomic_dec_and_test(&object
->use_count
))
467 /* should only get here after delete_object was called */
468 WARN_ON(object
->flags
& OBJECT_ALLOCATED
);
470 call_rcu(&object
->rcu
, free_object_rcu
);
474 * Look up an object in the prio search tree and increase its use_count.
476 static struct kmemleak_object
*find_and_get_object(unsigned long ptr
, int alias
)
479 struct kmemleak_object
*object
= NULL
;
482 read_lock_irqsave(&kmemleak_lock
, flags
);
483 if (ptr
>= min_addr
&& ptr
< max_addr
)
484 object
= lookup_object(ptr
, alias
);
485 read_unlock_irqrestore(&kmemleak_lock
, flags
);
487 /* check whether the object is still available */
488 if (object
&& !get_object(object
))
496 * Save stack trace to the given array of MAX_TRACE size.
498 static int __save_stack_trace(unsigned long *trace
)
500 struct stack_trace stack_trace
;
502 stack_trace
.max_entries
= MAX_TRACE
;
503 stack_trace
.nr_entries
= 0;
504 stack_trace
.entries
= trace
;
505 stack_trace
.skip
= 2;
506 save_stack_trace(&stack_trace
);
508 return stack_trace
.nr_entries
;
512 * Create the metadata (struct kmemleak_object) corresponding to an allocated
513 * memory block and add it to the object_list and object_tree_root.
515 static struct kmemleak_object
*create_object(unsigned long ptr
, size_t size
,
516 int min_count
, gfp_t gfp
)
519 struct kmemleak_object
*object
;
520 struct prio_tree_node
*node
;
522 object
= kmem_cache_alloc(object_cache
, gfp_kmemleak_mask(gfp
));
524 pr_warning("Cannot allocate a kmemleak_object structure\n");
529 INIT_LIST_HEAD(&object
->object_list
);
530 INIT_LIST_HEAD(&object
->gray_list
);
531 INIT_HLIST_HEAD(&object
->area_list
);
532 spin_lock_init(&object
->lock
);
533 atomic_set(&object
->use_count
, 1);
534 object
->flags
= OBJECT_ALLOCATED
;
535 object
->pointer
= ptr
;
537 object
->min_count
= min_count
;
538 object
->count
= 0; /* white color initially */
539 object
->jiffies
= jiffies
;
540 object
->checksum
= 0;
542 /* task information */
545 strncpy(object
->comm
, "hardirq", sizeof(object
->comm
));
546 } else if (in_softirq()) {
548 strncpy(object
->comm
, "softirq", sizeof(object
->comm
));
550 object
->pid
= current
->pid
;
552 * There is a small chance of a race with set_task_comm(),
553 * however using get_task_comm() here may cause locking
554 * dependency issues with current->alloc_lock. In the worst
555 * case, the command line is not correct.
557 strncpy(object
->comm
, current
->comm
, sizeof(object
->comm
));
560 /* kernel backtrace */
561 object
->trace_len
= __save_stack_trace(object
->trace
);
563 INIT_PRIO_TREE_NODE(&object
->tree_node
);
564 object
->tree_node
.start
= ptr
;
565 object
->tree_node
.last
= ptr
+ size
- 1;
567 write_lock_irqsave(&kmemleak_lock
, flags
);
569 min_addr
= min(min_addr
, ptr
);
570 max_addr
= max(max_addr
, ptr
+ size
);
571 node
= prio_tree_insert(&object_tree_root
, &object
->tree_node
);
573 * The code calling the kernel does not yet have the pointer to the
574 * memory block to be able to free it. However, we still hold the
575 * kmemleak_lock here in case parts of the kernel started freeing
576 * random memory blocks.
578 if (node
!= &object
->tree_node
) {
579 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
580 "(already existing)\n", ptr
);
581 object
= lookup_object(ptr
, 1);
582 spin_lock(&object
->lock
);
583 dump_object_info(object
);
584 spin_unlock(&object
->lock
);
588 list_add_tail_rcu(&object
->object_list
, &object_list
);
590 write_unlock_irqrestore(&kmemleak_lock
, flags
);
595 * Remove the metadata (struct kmemleak_object) for a memory block from the
596 * object_list and object_tree_root and decrement its use_count.
598 static void __delete_object(struct kmemleak_object
*object
)
602 write_lock_irqsave(&kmemleak_lock
, flags
);
603 prio_tree_remove(&object_tree_root
, &object
->tree_node
);
604 list_del_rcu(&object
->object_list
);
605 write_unlock_irqrestore(&kmemleak_lock
, flags
);
607 WARN_ON(!(object
->flags
& OBJECT_ALLOCATED
));
608 WARN_ON(atomic_read(&object
->use_count
) < 2);
611 * Locking here also ensures that the corresponding memory block
612 * cannot be freed when it is being scanned.
614 spin_lock_irqsave(&object
->lock
, flags
);
615 object
->flags
&= ~OBJECT_ALLOCATED
;
616 spin_unlock_irqrestore(&object
->lock
, flags
);
621 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
624 static void delete_object_full(unsigned long ptr
)
626 struct kmemleak_object
*object
;
628 object
= find_and_get_object(ptr
, 0);
631 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
636 __delete_object(object
);
641 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
642 * delete it. If the memory block is partially freed, the function may create
643 * additional metadata for the remaining parts of the block.
645 static void delete_object_part(unsigned long ptr
, size_t size
)
647 struct kmemleak_object
*object
;
648 unsigned long start
, end
;
650 object
= find_and_get_object(ptr
, 1);
653 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
654 "(size %zu)\n", ptr
, size
);
658 __delete_object(object
);
661 * Create one or two objects that may result from the memory block
662 * split. Note that partial freeing is only done by free_bootmem() and
663 * this happens before kmemleak_init() is called. The path below is
664 * only executed during early log recording in kmemleak_init(), so
665 * GFP_KERNEL is enough.
667 start
= object
->pointer
;
668 end
= object
->pointer
+ object
->size
;
670 create_object(start
, ptr
- start
, object
->min_count
,
672 if (ptr
+ size
< end
)
673 create_object(ptr
+ size
, end
- ptr
- size
, object
->min_count
,
679 static void __paint_it(struct kmemleak_object
*object
, int color
)
681 object
->min_count
= color
;
682 if (color
== KMEMLEAK_BLACK
)
683 object
->flags
|= OBJECT_NO_SCAN
;
686 static void paint_it(struct kmemleak_object
*object
, int color
)
690 spin_lock_irqsave(&object
->lock
, flags
);
691 __paint_it(object
, color
);
692 spin_unlock_irqrestore(&object
->lock
, flags
);
695 static void paint_ptr(unsigned long ptr
, int color
)
697 struct kmemleak_object
*object
;
699 object
= find_and_get_object(ptr
, 0);
701 kmemleak_warn("Trying to color unknown object "
702 "at 0x%08lx as %s\n", ptr
,
703 (color
== KMEMLEAK_GREY
) ? "Grey" :
704 (color
== KMEMLEAK_BLACK
) ? "Black" : "Unknown");
707 paint_it(object
, color
);
712 * Mark an object permanently as gray-colored so that it can no longer be
713 * reported as a leak. This is used in general to mark a false positive.
715 static void make_gray_object(unsigned long ptr
)
717 paint_ptr(ptr
, KMEMLEAK_GREY
);
721 * Mark the object as black-colored so that it is ignored from scans and
724 static void make_black_object(unsigned long ptr
)
726 paint_ptr(ptr
, KMEMLEAK_BLACK
);
730 * Add a scanning area to the object. If at least one such area is added,
731 * kmemleak will only scan these ranges rather than the whole memory block.
733 static void add_scan_area(unsigned long ptr
, size_t size
, gfp_t gfp
)
736 struct kmemleak_object
*object
;
737 struct kmemleak_scan_area
*area
;
739 object
= find_and_get_object(ptr
, 1);
741 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
746 area
= kmem_cache_alloc(scan_area_cache
, gfp_kmemleak_mask(gfp
));
748 pr_warning("Cannot allocate a scan area\n");
752 spin_lock_irqsave(&object
->lock
, flags
);
753 if (ptr
+ size
> object
->pointer
+ object
->size
) {
754 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr
);
755 dump_object_info(object
);
756 kmem_cache_free(scan_area_cache
, area
);
760 INIT_HLIST_NODE(&area
->node
);
764 hlist_add_head(&area
->node
, &object
->area_list
);
766 spin_unlock_irqrestore(&object
->lock
, flags
);
772 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
773 * pointer. Such object will not be scanned by kmemleak but references to it
776 static void object_no_scan(unsigned long ptr
)
779 struct kmemleak_object
*object
;
781 object
= find_and_get_object(ptr
, 0);
783 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr
);
787 spin_lock_irqsave(&object
->lock
, flags
);
788 object
->flags
|= OBJECT_NO_SCAN
;
789 spin_unlock_irqrestore(&object
->lock
, flags
);
794 * Log an early kmemleak_* call to the early_log buffer. These calls will be
795 * processed later once kmemleak is fully initialized.
797 static void __init
log_early(int op_type
, const void *ptr
, size_t size
,
801 struct early_log
*log
;
803 if (atomic_read(&kmemleak_error
)) {
804 /* kmemleak stopped recording, just count the requests */
809 if (crt_early_log
>= ARRAY_SIZE(early_log
)) {
815 * There is no need for locking since the kernel is still in UP mode
816 * at this stage. Disabling the IRQs is enough.
818 local_irq_save(flags
);
819 log
= &early_log
[crt_early_log
];
820 log
->op_type
= op_type
;
823 log
->min_count
= min_count
;
824 log
->trace_len
= __save_stack_trace(log
->trace
);
826 local_irq_restore(flags
);
830 * Log an early allocated block and populate the stack trace.
832 static void early_alloc(struct early_log
*log
)
834 struct kmemleak_object
*object
;
838 if (!atomic_read(&kmemleak_enabled
) || !log
->ptr
|| IS_ERR(log
->ptr
))
842 * RCU locking needed to ensure object is not freed via put_object().
845 object
= create_object((unsigned long)log
->ptr
, log
->size
,
846 log
->min_count
, GFP_ATOMIC
);
849 spin_lock_irqsave(&object
->lock
, flags
);
850 for (i
= 0; i
< log
->trace_len
; i
++)
851 object
->trace
[i
] = log
->trace
[i
];
852 object
->trace_len
= log
->trace_len
;
853 spin_unlock_irqrestore(&object
->lock
, flags
);
859 * Log an early allocated block and populate the stack trace.
861 static void early_alloc_percpu(struct early_log
*log
)
864 const void __percpu
*ptr
= log
->ptr
;
866 for_each_possible_cpu(cpu
) {
867 log
->ptr
= per_cpu_ptr(ptr
, cpu
);
873 * kmemleak_alloc - register a newly allocated object
874 * @ptr: pointer to beginning of the object
875 * @size: size of the object
876 * @min_count: minimum number of references to this object. If during memory
877 * scanning a number of references less than @min_count is found,
878 * the object is reported as a memory leak. If @min_count is 0,
879 * the object is never reported as a leak. If @min_count is -1,
880 * the object is ignored (not scanned and not reported as a leak)
881 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
883 * This function is called from the kernel allocators when a new object
884 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
886 void __ref
kmemleak_alloc(const void *ptr
, size_t size
, int min_count
,
889 pr_debug("%s(0x%p, %zu, %d)\n", __func__
, ptr
, size
, min_count
);
891 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
892 create_object((unsigned long)ptr
, size
, min_count
, gfp
);
893 else if (atomic_read(&kmemleak_early_log
))
894 log_early(KMEMLEAK_ALLOC
, ptr
, size
, min_count
);
896 EXPORT_SYMBOL_GPL(kmemleak_alloc
);
899 * kmemleak_alloc_percpu - register a newly allocated __percpu object
900 * @ptr: __percpu pointer to beginning of the object
901 * @size: size of the object
903 * This function is called from the kernel percpu allocator when a new object
904 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
907 void __ref
kmemleak_alloc_percpu(const void __percpu
*ptr
, size_t size
)
911 pr_debug("%s(0x%p, %zu)\n", __func__
, ptr
, size
);
914 * Percpu allocations are only scanned and not reported as leaks
915 * (min_count is set to 0).
917 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
918 for_each_possible_cpu(cpu
)
919 create_object((unsigned long)per_cpu_ptr(ptr
, cpu
),
920 size
, 0, GFP_KERNEL
);
921 else if (atomic_read(&kmemleak_early_log
))
922 log_early(KMEMLEAK_ALLOC_PERCPU
, ptr
, size
, 0);
924 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu
);
927 * kmemleak_free - unregister a previously registered object
928 * @ptr: pointer to beginning of the object
930 * This function is called from the kernel allocators when an object (memory
931 * block) is freed (kmem_cache_free, kfree, vfree etc.).
933 void __ref
kmemleak_free(const void *ptr
)
935 pr_debug("%s(0x%p)\n", __func__
, ptr
);
937 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
938 delete_object_full((unsigned long)ptr
);
939 else if (atomic_read(&kmemleak_early_log
))
940 log_early(KMEMLEAK_FREE
, ptr
, 0, 0);
942 EXPORT_SYMBOL_GPL(kmemleak_free
);
945 * kmemleak_free_part - partially unregister a previously registered object
946 * @ptr: pointer to the beginning or inside the object. This also
947 * represents the start of the range to be freed
948 * @size: size to be unregistered
950 * This function is called when only a part of a memory block is freed
951 * (usually from the bootmem allocator).
953 void __ref
kmemleak_free_part(const void *ptr
, size_t size
)
955 pr_debug("%s(0x%p)\n", __func__
, ptr
);
957 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
958 delete_object_part((unsigned long)ptr
, size
);
959 else if (atomic_read(&kmemleak_early_log
))
960 log_early(KMEMLEAK_FREE_PART
, ptr
, size
, 0);
962 EXPORT_SYMBOL_GPL(kmemleak_free_part
);
965 * kmemleak_free_percpu - unregister a previously registered __percpu object
966 * @ptr: __percpu pointer to beginning of the object
968 * This function is called from the kernel percpu allocator when an object
969 * (memory block) is freed (free_percpu).
971 void __ref
kmemleak_free_percpu(const void __percpu
*ptr
)
975 pr_debug("%s(0x%p)\n", __func__
, ptr
);
977 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
978 for_each_possible_cpu(cpu
)
979 delete_object_full((unsigned long)per_cpu_ptr(ptr
,
981 else if (atomic_read(&kmemleak_early_log
))
982 log_early(KMEMLEAK_FREE_PERCPU
, ptr
, 0, 0);
984 EXPORT_SYMBOL_GPL(kmemleak_free_percpu
);
987 * kmemleak_not_leak - mark an allocated object as false positive
988 * @ptr: pointer to beginning of the object
990 * Calling this function on an object will cause the memory block to no longer
991 * be reported as leak and always be scanned.
993 void __ref
kmemleak_not_leak(const void *ptr
)
995 pr_debug("%s(0x%p)\n", __func__
, ptr
);
997 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
998 make_gray_object((unsigned long)ptr
);
999 else if (atomic_read(&kmemleak_early_log
))
1000 log_early(KMEMLEAK_NOT_LEAK
, ptr
, 0, 0);
1002 EXPORT_SYMBOL(kmemleak_not_leak
);
1005 * kmemleak_ignore - ignore an allocated object
1006 * @ptr: pointer to beginning of the object
1008 * Calling this function on an object will cause the memory block to be
1009 * ignored (not scanned and not reported as a leak). This is usually done when
1010 * it is known that the corresponding block is not a leak and does not contain
1011 * any references to other allocated memory blocks.
1013 void __ref
kmemleak_ignore(const void *ptr
)
1015 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1017 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
1018 make_black_object((unsigned long)ptr
);
1019 else if (atomic_read(&kmemleak_early_log
))
1020 log_early(KMEMLEAK_IGNORE
, ptr
, 0, 0);
1022 EXPORT_SYMBOL(kmemleak_ignore
);
1025 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1026 * @ptr: pointer to beginning or inside the object. This also
1027 * represents the start of the scan area
1028 * @size: size of the scan area
1029 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1031 * This function is used when it is known that only certain parts of an object
1032 * contain references to other objects. Kmemleak will only scan these areas
1033 * reducing the number false negatives.
1035 void __ref
kmemleak_scan_area(const void *ptr
, size_t size
, gfp_t gfp
)
1037 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1039 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
1040 add_scan_area((unsigned long)ptr
, size
, gfp
);
1041 else if (atomic_read(&kmemleak_early_log
))
1042 log_early(KMEMLEAK_SCAN_AREA
, ptr
, size
, 0);
1044 EXPORT_SYMBOL(kmemleak_scan_area
);
1047 * kmemleak_no_scan - do not scan an allocated object
1048 * @ptr: pointer to beginning of the object
1050 * This function notifies kmemleak not to scan the given memory block. Useful
1051 * in situations where it is known that the given object does not contain any
1052 * references to other objects. Kmemleak will not scan such objects reducing
1053 * the number of false negatives.
1055 void __ref
kmemleak_no_scan(const void *ptr
)
1057 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1059 if (atomic_read(&kmemleak_enabled
) && ptr
&& !IS_ERR(ptr
))
1060 object_no_scan((unsigned long)ptr
);
1061 else if (atomic_read(&kmemleak_early_log
))
1062 log_early(KMEMLEAK_NO_SCAN
, ptr
, 0, 0);
1064 EXPORT_SYMBOL(kmemleak_no_scan
);
1067 * Update an object's checksum and return true if it was modified.
1069 static bool update_checksum(struct kmemleak_object
*object
)
1071 u32 old_csum
= object
->checksum
;
1073 if (!kmemcheck_is_obj_initialized(object
->pointer
, object
->size
))
1076 object
->checksum
= crc32(0, (void *)object
->pointer
, object
->size
);
1077 return object
->checksum
!= old_csum
;
1081 * Memory scanning is a long process and it needs to be interruptable. This
1082 * function checks whether such interrupt condition occurred.
1084 static int scan_should_stop(void)
1086 if (!atomic_read(&kmemleak_enabled
))
1090 * This function may be called from either process or kthread context,
1091 * hence the need to check for both stop conditions.
1094 return signal_pending(current
);
1096 return kthread_should_stop();
1102 * Scan a memory block (exclusive range) for valid pointers and add those
1103 * found to the gray list.
1105 static void scan_block(void *_start
, void *_end
,
1106 struct kmemleak_object
*scanned
, int allow_resched
)
1109 unsigned long *start
= PTR_ALIGN(_start
, BYTES_PER_POINTER
);
1110 unsigned long *end
= _end
- (BYTES_PER_POINTER
- 1);
1112 for (ptr
= start
; ptr
< end
; ptr
++) {
1113 struct kmemleak_object
*object
;
1114 unsigned long flags
;
1115 unsigned long pointer
;
1119 if (scan_should_stop())
1122 /* don't scan uninitialized memory */
1123 if (!kmemcheck_is_obj_initialized((unsigned long)ptr
,
1129 object
= find_and_get_object(pointer
, 1);
1132 if (object
== scanned
) {
1133 /* self referenced, ignore */
1139 * Avoid the lockdep recursive warning on object->lock being
1140 * previously acquired in scan_object(). These locks are
1141 * enclosed by scan_mutex.
1143 spin_lock_irqsave_nested(&object
->lock
, flags
,
1144 SINGLE_DEPTH_NESTING
);
1145 if (!color_white(object
)) {
1146 /* non-orphan, ignored or new */
1147 spin_unlock_irqrestore(&object
->lock
, flags
);
1153 * Increase the object's reference count (number of pointers
1154 * to the memory block). If this count reaches the required
1155 * minimum, the object's color will become gray and it will be
1156 * added to the gray_list.
1159 if (color_gray(object
)) {
1160 list_add_tail(&object
->gray_list
, &gray_list
);
1161 spin_unlock_irqrestore(&object
->lock
, flags
);
1165 spin_unlock_irqrestore(&object
->lock
, flags
);
1171 * Scan a memory block corresponding to a kmemleak_object. A condition is
1172 * that object->use_count >= 1.
1174 static void scan_object(struct kmemleak_object
*object
)
1176 struct kmemleak_scan_area
*area
;
1177 struct hlist_node
*elem
;
1178 unsigned long flags
;
1181 * Once the object->lock is acquired, the corresponding memory block
1182 * cannot be freed (the same lock is acquired in delete_object).
1184 spin_lock_irqsave(&object
->lock
, flags
);
1185 if (object
->flags
& OBJECT_NO_SCAN
)
1187 if (!(object
->flags
& OBJECT_ALLOCATED
))
1188 /* already freed object */
1190 if (hlist_empty(&object
->area_list
)) {
1191 void *start
= (void *)object
->pointer
;
1192 void *end
= (void *)(object
->pointer
+ object
->size
);
1194 while (start
< end
&& (object
->flags
& OBJECT_ALLOCATED
) &&
1195 !(object
->flags
& OBJECT_NO_SCAN
)) {
1196 scan_block(start
, min(start
+ MAX_SCAN_SIZE
, end
),
1198 start
+= MAX_SCAN_SIZE
;
1200 spin_unlock_irqrestore(&object
->lock
, flags
);
1202 spin_lock_irqsave(&object
->lock
, flags
);
1205 hlist_for_each_entry(area
, elem
, &object
->area_list
, node
)
1206 scan_block((void *)area
->start
,
1207 (void *)(area
->start
+ area
->size
),
1210 spin_unlock_irqrestore(&object
->lock
, flags
);
1214 * Scan the objects already referenced (gray objects). More objects will be
1215 * referenced and, if there are no memory leaks, all the objects are scanned.
1217 static void scan_gray_list(void)
1219 struct kmemleak_object
*object
, *tmp
;
1222 * The list traversal is safe for both tail additions and removals
1223 * from inside the loop. The kmemleak objects cannot be freed from
1224 * outside the loop because their use_count was incremented.
1226 object
= list_entry(gray_list
.next
, typeof(*object
), gray_list
);
1227 while (&object
->gray_list
!= &gray_list
) {
1230 /* may add new objects to the list */
1231 if (!scan_should_stop())
1232 scan_object(object
);
1234 tmp
= list_entry(object
->gray_list
.next
, typeof(*object
),
1237 /* remove the object from the list and release it */
1238 list_del(&object
->gray_list
);
1243 WARN_ON(!list_empty(&gray_list
));
1247 * Scan data sections and all the referenced memory blocks allocated via the
1248 * kernel's standard allocators. This function must be called with the
1251 static void kmemleak_scan(void)
1253 unsigned long flags
;
1254 struct kmemleak_object
*object
;
1258 jiffies_last_scan
= jiffies
;
1260 /* prepare the kmemleak_object's */
1262 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1263 spin_lock_irqsave(&object
->lock
, flags
);
1266 * With a few exceptions there should be a maximum of
1267 * 1 reference to any object at this point.
1269 if (atomic_read(&object
->use_count
) > 1) {
1270 pr_debug("object->use_count = %d\n",
1271 atomic_read(&object
->use_count
));
1272 dump_object_info(object
);
1275 /* reset the reference count (whiten the object) */
1277 if (color_gray(object
) && get_object(object
))
1278 list_add_tail(&object
->gray_list
, &gray_list
);
1280 spin_unlock_irqrestore(&object
->lock
, flags
);
1284 /* data/bss scanning */
1285 scan_block(_sdata
, _edata
, NULL
, 1);
1286 scan_block(__bss_start
, __bss_stop
, NULL
, 1);
1289 /* per-cpu sections scanning */
1290 for_each_possible_cpu(i
)
1291 scan_block(__per_cpu_start
+ per_cpu_offset(i
),
1292 __per_cpu_end
+ per_cpu_offset(i
), NULL
, 1);
1296 * Struct page scanning for each node.
1298 lock_memory_hotplug();
1299 for_each_online_node(i
) {
1300 pg_data_t
*pgdat
= NODE_DATA(i
);
1301 unsigned long start_pfn
= pgdat
->node_start_pfn
;
1302 unsigned long end_pfn
= start_pfn
+ pgdat
->node_spanned_pages
;
1305 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1308 if (!pfn_valid(pfn
))
1310 page
= pfn_to_page(pfn
);
1311 /* only scan if page is in use */
1312 if (page_count(page
) == 0)
1314 scan_block(page
, page
+ 1, NULL
, 1);
1317 unlock_memory_hotplug();
1320 * Scanning the task stacks (may introduce false negatives).
1322 if (kmemleak_stack_scan
) {
1323 struct task_struct
*p
, *g
;
1325 read_lock(&tasklist_lock
);
1326 do_each_thread(g
, p
) {
1327 scan_block(task_stack_page(p
), task_stack_page(p
) +
1328 THREAD_SIZE
, NULL
, 0);
1329 } while_each_thread(g
, p
);
1330 read_unlock(&tasklist_lock
);
1334 * Scan the objects already referenced from the sections scanned
1340 * Check for new or unreferenced objects modified since the previous
1341 * scan and color them gray until the next scan.
1344 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1345 spin_lock_irqsave(&object
->lock
, flags
);
1346 if (color_white(object
) && (object
->flags
& OBJECT_ALLOCATED
)
1347 && update_checksum(object
) && get_object(object
)) {
1348 /* color it gray temporarily */
1349 object
->count
= object
->min_count
;
1350 list_add_tail(&object
->gray_list
, &gray_list
);
1352 spin_unlock_irqrestore(&object
->lock
, flags
);
1357 * Re-scan the gray list for modified unreferenced objects.
1362 * If scanning was stopped do not report any new unreferenced objects.
1364 if (scan_should_stop())
1368 * Scanning result reporting.
1371 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1372 spin_lock_irqsave(&object
->lock
, flags
);
1373 if (unreferenced_object(object
) &&
1374 !(object
->flags
& OBJECT_REPORTED
)) {
1375 object
->flags
|= OBJECT_REPORTED
;
1378 spin_unlock_irqrestore(&object
->lock
, flags
);
1383 pr_info("%d new suspected memory leaks (see "
1384 "/sys/kernel/debug/kmemleak)\n", new_leaks
);
1389 * Thread function performing automatic memory scanning. Unreferenced objects
1390 * at the end of a memory scan are reported but only the first time.
1392 static int kmemleak_scan_thread(void *arg
)
1394 static int first_run
= 1;
1396 pr_info("Automatic memory scanning thread started\n");
1397 set_user_nice(current
, 10);
1400 * Wait before the first scan to allow the system to fully initialize.
1404 ssleep(SECS_FIRST_SCAN
);
1407 while (!kthread_should_stop()) {
1408 signed long timeout
= jiffies_scan_wait
;
1410 mutex_lock(&scan_mutex
);
1412 mutex_unlock(&scan_mutex
);
1414 /* wait before the next scan */
1415 while (timeout
&& !kthread_should_stop())
1416 timeout
= schedule_timeout_interruptible(timeout
);
1419 pr_info("Automatic memory scanning thread ended\n");
1425 * Start the automatic memory scanning thread. This function must be called
1426 * with the scan_mutex held.
1428 static void start_scan_thread(void)
1432 scan_thread
= kthread_run(kmemleak_scan_thread
, NULL
, "kmemleak");
1433 if (IS_ERR(scan_thread
)) {
1434 pr_warning("Failed to create the scan thread\n");
1440 * Stop the automatic memory scanning thread. This function must be called
1441 * with the scan_mutex held.
1443 static void stop_scan_thread(void)
1446 kthread_stop(scan_thread
);
1452 * Iterate over the object_list and return the first valid object at or after
1453 * the required position with its use_count incremented. The function triggers
1454 * a memory scanning when the pos argument points to the first position.
1456 static void *kmemleak_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1458 struct kmemleak_object
*object
;
1462 err
= mutex_lock_interruptible(&scan_mutex
);
1464 return ERR_PTR(err
);
1467 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1470 if (get_object(object
))
1479 * Return the next object in the object_list. The function decrements the
1480 * use_count of the previous object and increases that of the next one.
1482 static void *kmemleak_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1484 struct kmemleak_object
*prev_obj
= v
;
1485 struct kmemleak_object
*next_obj
= NULL
;
1486 struct list_head
*n
= &prev_obj
->object_list
;
1490 list_for_each_continue_rcu(n
, &object_list
) {
1491 struct kmemleak_object
*obj
=
1492 list_entry(n
, struct kmemleak_object
, object_list
);
1493 if (get_object(obj
)) {
1499 put_object(prev_obj
);
1504 * Decrement the use_count of the last object required, if any.
1506 static void kmemleak_seq_stop(struct seq_file
*seq
, void *v
)
1510 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1511 * waiting was interrupted, so only release it if !IS_ERR.
1514 mutex_unlock(&scan_mutex
);
1521 * Print the information for an unreferenced object to the seq file.
1523 static int kmemleak_seq_show(struct seq_file
*seq
, void *v
)
1525 struct kmemleak_object
*object
= v
;
1526 unsigned long flags
;
1528 spin_lock_irqsave(&object
->lock
, flags
);
1529 if ((object
->flags
& OBJECT_REPORTED
) && unreferenced_object(object
))
1530 print_unreferenced(seq
, object
);
1531 spin_unlock_irqrestore(&object
->lock
, flags
);
1535 static const struct seq_operations kmemleak_seq_ops
= {
1536 .start
= kmemleak_seq_start
,
1537 .next
= kmemleak_seq_next
,
1538 .stop
= kmemleak_seq_stop
,
1539 .show
= kmemleak_seq_show
,
1542 static int kmemleak_open(struct inode
*inode
, struct file
*file
)
1544 return seq_open(file
, &kmemleak_seq_ops
);
1547 static int kmemleak_release(struct inode
*inode
, struct file
*file
)
1549 return seq_release(inode
, file
);
1552 static int dump_str_object_info(const char *str
)
1554 unsigned long flags
;
1555 struct kmemleak_object
*object
;
1558 addr
= simple_strtoul(str
, NULL
, 0);
1559 object
= find_and_get_object(addr
, 0);
1561 pr_info("Unknown object at 0x%08lx\n", addr
);
1565 spin_lock_irqsave(&object
->lock
, flags
);
1566 dump_object_info(object
);
1567 spin_unlock_irqrestore(&object
->lock
, flags
);
1574 * We use grey instead of black to ensure we can do future scans on the same
1575 * objects. If we did not do future scans these black objects could
1576 * potentially contain references to newly allocated objects in the future and
1577 * we'd end up with false positives.
1579 static void kmemleak_clear(void)
1581 struct kmemleak_object
*object
;
1582 unsigned long flags
;
1585 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1586 spin_lock_irqsave(&object
->lock
, flags
);
1587 if ((object
->flags
& OBJECT_REPORTED
) &&
1588 unreferenced_object(object
))
1589 __paint_it(object
, KMEMLEAK_GREY
);
1590 spin_unlock_irqrestore(&object
->lock
, flags
);
1596 * File write operation to configure kmemleak at run-time. The following
1597 * commands can be written to the /sys/kernel/debug/kmemleak file:
1598 * off - disable kmemleak (irreversible)
1599 * stack=on - enable the task stacks scanning
1600 * stack=off - disable the tasks stacks scanning
1601 * scan=on - start the automatic memory scanning thread
1602 * scan=off - stop the automatic memory scanning thread
1603 * scan=... - set the automatic memory scanning period in seconds (0 to
1605 * scan - trigger a memory scan
1606 * clear - mark all current reported unreferenced kmemleak objects as
1607 * grey to ignore printing them
1608 * dump=... - dump information about the object found at the given address
1610 static ssize_t
kmemleak_write(struct file
*file
, const char __user
*user_buf
,
1611 size_t size
, loff_t
*ppos
)
1617 if (!atomic_read(&kmemleak_enabled
))
1620 buf_size
= min(size
, (sizeof(buf
) - 1));
1621 if (strncpy_from_user(buf
, user_buf
, buf_size
) < 0)
1625 ret
= mutex_lock_interruptible(&scan_mutex
);
1629 if (strncmp(buf
, "off", 3) == 0)
1631 else if (strncmp(buf
, "stack=on", 8) == 0)
1632 kmemleak_stack_scan
= 1;
1633 else if (strncmp(buf
, "stack=off", 9) == 0)
1634 kmemleak_stack_scan
= 0;
1635 else if (strncmp(buf
, "scan=on", 7) == 0)
1636 start_scan_thread();
1637 else if (strncmp(buf
, "scan=off", 8) == 0)
1639 else if (strncmp(buf
, "scan=", 5) == 0) {
1642 ret
= strict_strtoul(buf
+ 5, 0, &secs
);
1647 jiffies_scan_wait
= msecs_to_jiffies(secs
* 1000);
1648 start_scan_thread();
1650 } else if (strncmp(buf
, "scan", 4) == 0)
1652 else if (strncmp(buf
, "clear", 5) == 0)
1654 else if (strncmp(buf
, "dump=", 5) == 0)
1655 ret
= dump_str_object_info(buf
+ 5);
1660 mutex_unlock(&scan_mutex
);
1664 /* ignore the rest of the buffer, only one command at a time */
1669 static const struct file_operations kmemleak_fops
= {
1670 .owner
= THIS_MODULE
,
1671 .open
= kmemleak_open
,
1673 .write
= kmemleak_write
,
1674 .llseek
= seq_lseek
,
1675 .release
= kmemleak_release
,
1679 * Stop the memory scanning thread and free the kmemleak internal objects if
1680 * no previous scan thread (otherwise, kmemleak may still have some useful
1681 * information on memory leaks).
1683 static void kmemleak_do_cleanup(struct work_struct
*work
)
1685 struct kmemleak_object
*object
;
1686 bool cleanup
= scan_thread
== NULL
;
1688 mutex_lock(&scan_mutex
);
1693 list_for_each_entry_rcu(object
, &object_list
, object_list
)
1694 delete_object_full(object
->pointer
);
1697 mutex_unlock(&scan_mutex
);
1700 static DECLARE_WORK(cleanup_work
, kmemleak_do_cleanup
);
1703 * Disable kmemleak. No memory allocation/freeing will be traced once this
1704 * function is called. Disabling kmemleak is an irreversible operation.
1706 static void kmemleak_disable(void)
1708 /* atomically check whether it was already invoked */
1709 if (atomic_cmpxchg(&kmemleak_error
, 0, 1))
1712 /* stop any memory operation tracing */
1713 atomic_set(&kmemleak_enabled
, 0);
1715 /* check whether it is too early for a kernel thread */
1716 if (atomic_read(&kmemleak_initialized
))
1717 schedule_work(&cleanup_work
);
1719 pr_info("Kernel memory leak detector disabled\n");
1723 * Allow boot-time kmemleak disabling (enabled by default).
1725 static int kmemleak_boot_config(char *str
)
1729 if (strcmp(str
, "off") == 0)
1731 else if (strcmp(str
, "on") == 0)
1732 kmemleak_skip_disable
= 1;
1737 early_param("kmemleak", kmemleak_boot_config
);
1739 static void __init
print_log_trace(struct early_log
*log
)
1741 struct stack_trace trace
;
1743 trace
.nr_entries
= log
->trace_len
;
1744 trace
.entries
= log
->trace
;
1746 pr_notice("Early log backtrace:\n");
1747 print_stack_trace(&trace
, 2);
1751 * Kmemleak initialization.
1753 void __init
kmemleak_init(void)
1756 unsigned long flags
;
1758 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1759 if (!kmemleak_skip_disable
) {
1765 jiffies_min_age
= msecs_to_jiffies(MSECS_MIN_AGE
);
1766 jiffies_scan_wait
= msecs_to_jiffies(SECS_SCAN_WAIT
* 1000);
1768 object_cache
= KMEM_CACHE(kmemleak_object
, SLAB_NOLEAKTRACE
);
1769 scan_area_cache
= KMEM_CACHE(kmemleak_scan_area
, SLAB_NOLEAKTRACE
);
1770 INIT_PRIO_TREE_ROOT(&object_tree_root
);
1772 if (crt_early_log
>= ARRAY_SIZE(early_log
))
1773 pr_warning("Early log buffer exceeded (%d), please increase "
1774 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log
);
1776 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1777 local_irq_save(flags
);
1778 atomic_set(&kmemleak_early_log
, 0);
1779 if (atomic_read(&kmemleak_error
)) {
1780 local_irq_restore(flags
);
1783 atomic_set(&kmemleak_enabled
, 1);
1784 local_irq_restore(flags
);
1787 * This is the point where tracking allocations is safe. Automatic
1788 * scanning is started during the late initcall. Add the early logged
1789 * callbacks to the kmemleak infrastructure.
1791 for (i
= 0; i
< crt_early_log
; i
++) {
1792 struct early_log
*log
= &early_log
[i
];
1794 switch (log
->op_type
) {
1795 case KMEMLEAK_ALLOC
:
1798 case KMEMLEAK_ALLOC_PERCPU
:
1799 early_alloc_percpu(log
);
1802 kmemleak_free(log
->ptr
);
1804 case KMEMLEAK_FREE_PART
:
1805 kmemleak_free_part(log
->ptr
, log
->size
);
1807 case KMEMLEAK_FREE_PERCPU
:
1808 kmemleak_free_percpu(log
->ptr
);
1810 case KMEMLEAK_NOT_LEAK
:
1811 kmemleak_not_leak(log
->ptr
);
1813 case KMEMLEAK_IGNORE
:
1814 kmemleak_ignore(log
->ptr
);
1816 case KMEMLEAK_SCAN_AREA
:
1817 kmemleak_scan_area(log
->ptr
, log
->size
, GFP_KERNEL
);
1819 case KMEMLEAK_NO_SCAN
:
1820 kmemleak_no_scan(log
->ptr
);
1823 kmemleak_warn("Unknown early log operation: %d\n",
1827 if (atomic_read(&kmemleak_warning
)) {
1828 print_log_trace(log
);
1829 atomic_set(&kmemleak_warning
, 0);
1835 * Late initialization function.
1837 static int __init
kmemleak_late_init(void)
1839 struct dentry
*dentry
;
1841 atomic_set(&kmemleak_initialized
, 1);
1843 if (atomic_read(&kmemleak_error
)) {
1845 * Some error occurred and kmemleak was disabled. There is a
1846 * small chance that kmemleak_disable() was called immediately
1847 * after setting kmemleak_initialized and we may end up with
1848 * two clean-up threads but serialized by scan_mutex.
1850 schedule_work(&cleanup_work
);
1854 dentry
= debugfs_create_file("kmemleak", S_IRUGO
, NULL
, NULL
,
1857 pr_warning("Failed to create the debugfs kmemleak file\n");
1858 mutex_lock(&scan_mutex
);
1859 start_scan_thread();
1860 mutex_unlock(&scan_mutex
);
1862 pr_info("Kernel memory leak detector initialized\n");
1866 late_initcall(kmemleak_late_init
);