4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/dev-tools/kmemleak.rst.
27 * The following locks and mutexes are used by kmemleak:
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a red black tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
56 * Locks and mutexes are acquired/nested in the following order:
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched/signal.h>
77 #include <linux/sched/task.h>
78 #include <linux/sched/task_stack.h>
79 #include <linux/jiffies.h>
80 #include <linux/delay.h>
81 #include <linux/export.h>
82 #include <linux/kthread.h>
83 #include <linux/rbtree.h>
85 #include <linux/debugfs.h>
86 #include <linux/seq_file.h>
87 #include <linux/cpumask.h>
88 #include <linux/spinlock.h>
89 #include <linux/mutex.h>
90 #include <linux/rcupdate.h>
91 #include <linux/stacktrace.h>
92 #include <linux/cache.h>
93 #include <linux/percpu.h>
94 #include <linux/bootmem.h>
95 #include <linux/pfn.h>
96 #include <linux/mmzone.h>
97 #include <linux/slab.h>
98 #include <linux/thread_info.h>
99 #include <linux/err.h>
100 #include <linux/uaccess.h>
101 #include <linux/string.h>
102 #include <linux/nodemask.h>
103 #include <linux/mm.h>
104 #include <linux/workqueue.h>
105 #include <linux/crc32.h>
107 #include <asm/sections.h>
108 #include <asm/processor.h>
109 #include <linux/atomic.h>
111 #include <linux/kasan.h>
112 #include <linux/kmemleak.h>
113 #include <linux/memory_hotplug.h>
116 * Kmemleak configuration and common defines.
118 #define MAX_TRACE 16 /* stack trace length */
119 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
120 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
121 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
122 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
124 #define BYTES_PER_POINTER sizeof(void *)
126 /* GFP bitmask for kmemleak internal allocations */
127 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
128 __GFP_NORETRY | __GFP_NOMEMALLOC | \
129 __GFP_NOWARN | __GFP_NOFAIL)
131 /* scanning area inside a memory block */
132 struct kmemleak_scan_area
{
133 struct hlist_node node
;
138 #define KMEMLEAK_GREY 0
139 #define KMEMLEAK_BLACK -1
142 * Structure holding the metadata for each allocated memory block.
143 * Modifications to such objects should be made while holding the
144 * object->lock. Insertions or deletions from object_list, gray_list or
145 * rb_node are already protected by the corresponding locks or mutex (see
146 * the notes on locking above). These objects are reference-counted
147 * (use_count) and freed using the RCU mechanism.
149 struct kmemleak_object
{
151 unsigned int flags
; /* object status flags */
152 struct list_head object_list
;
153 struct list_head gray_list
;
154 struct rb_node rb_node
;
155 struct rcu_head rcu
; /* object_list lockless traversal */
156 /* object usage count; object freed when use_count == 0 */
158 unsigned long pointer
;
160 /* pass surplus references to this pointer */
161 unsigned long excess_ref
;
162 /* minimum number of a pointers found before it is considered leak */
164 /* the total number of pointers found pointing to this object */
166 /* checksum for detecting modified objects */
168 /* memory ranges to be scanned inside an object (empty for all) */
169 struct hlist_head area_list
;
170 unsigned long trace
[MAX_TRACE
];
171 unsigned int trace_len
;
172 unsigned long jiffies
; /* creation timestamp */
173 pid_t pid
; /* pid of the current task */
174 char comm
[TASK_COMM_LEN
]; /* executable name */
177 /* flag representing the memory block allocation status */
178 #define OBJECT_ALLOCATED (1 << 0)
179 /* flag set after the first reporting of an unreference object */
180 #define OBJECT_REPORTED (1 << 1)
181 /* flag set to not scan the object */
182 #define OBJECT_NO_SCAN (1 << 2)
184 /* number of bytes to print per line; must be 16 or 32 */
185 #define HEX_ROW_SIZE 16
186 /* number of bytes to print at a time (1, 2, 4, 8) */
187 #define HEX_GROUP_SIZE 1
188 /* include ASCII after the hex output */
190 /* max number of lines to be printed */
191 #define HEX_MAX_LINES 2
193 /* the list of all allocated objects */
194 static LIST_HEAD(object_list
);
195 /* the list of gray-colored objects (see color_gray comment below) */
196 static LIST_HEAD(gray_list
);
197 /* search tree for object boundaries */
198 static struct rb_root object_tree_root
= RB_ROOT
;
199 /* rw_lock protecting the access to object_list and object_tree_root */
200 static DEFINE_RWLOCK(kmemleak_lock
);
202 /* allocation caches for kmemleak internal data */
203 static struct kmem_cache
*object_cache
;
204 static struct kmem_cache
*scan_area_cache
;
206 /* set if tracing memory operations is enabled */
207 static int kmemleak_enabled
;
208 /* same as above but only for the kmemleak_free() callback */
209 static int kmemleak_free_enabled
;
210 /* set in the late_initcall if there were no errors */
211 static int kmemleak_initialized
;
212 /* enables or disables early logging of the memory operations */
213 static int kmemleak_early_log
= 1;
214 /* set if a kmemleak warning was issued */
215 static int kmemleak_warning
;
216 /* set if a fatal kmemleak error has occurred */
217 static int kmemleak_error
;
219 /* minimum and maximum address that may be valid pointers */
220 static unsigned long min_addr
= ULONG_MAX
;
221 static unsigned long max_addr
;
223 static struct task_struct
*scan_thread
;
224 /* used to avoid reporting of recently allocated objects */
225 static unsigned long jiffies_min_age
;
226 static unsigned long jiffies_last_scan
;
227 /* delay between automatic memory scannings */
228 static signed long jiffies_scan_wait
;
229 /* enables or disables the task stacks scanning */
230 static int kmemleak_stack_scan
= 1;
231 /* protects the memory scanning, parameters and debug/kmemleak file access */
232 static DEFINE_MUTEX(scan_mutex
);
233 /* setting kmemleak=on, will set this var, skipping the disable */
234 static int kmemleak_skip_disable
;
235 /* If there are leaks that can be reported */
236 static bool kmemleak_found_leaks
;
239 * Early object allocation/freeing logging. Kmemleak is initialized after the
240 * kernel allocator. However, both the kernel allocator and kmemleak may
241 * allocate memory blocks which need to be tracked. Kmemleak defines an
242 * arbitrary buffer to hold the allocation/freeing information before it is
246 /* kmemleak operation type for early logging */
249 KMEMLEAK_ALLOC_PERCPU
,
252 KMEMLEAK_FREE_PERCPU
,
257 KMEMLEAK_SET_EXCESS_REF
261 * Structure holding the information passed to kmemleak callbacks during the
265 int op_type
; /* kmemleak operation type */
266 int min_count
; /* minimum reference count */
267 const void *ptr
; /* allocated/freed memory block */
269 size_t size
; /* memory block size */
270 unsigned long excess_ref
; /* surplus reference passing */
272 unsigned long trace
[MAX_TRACE
]; /* stack trace */
273 unsigned int trace_len
; /* stack trace length */
276 /* early logging buffer and current position */
277 static struct early_log
278 early_log
[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE
] __initdata
;
279 static int crt_early_log __initdata
;
281 static void kmemleak_disable(void);
284 * Print a warning and dump the stack trace.
286 #define kmemleak_warn(x...) do { \
289 kmemleak_warning = 1; \
293 * Macro invoked when a serious kmemleak condition occurred and cannot be
294 * recovered from. Kmemleak will be disabled and further allocation/freeing
295 * tracing no longer available.
297 #define kmemleak_stop(x...) do { \
299 kmemleak_disable(); \
303 * Printing of the objects hex dump to the seq file. The number of lines to be
304 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
305 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
306 * with the object->lock held.
308 static void hex_dump_object(struct seq_file
*seq
,
309 struct kmemleak_object
*object
)
311 const u8
*ptr
= (const u8
*)object
->pointer
;
314 /* limit the number of lines to HEX_MAX_LINES */
315 len
= min_t(size_t, object
->size
, HEX_MAX_LINES
* HEX_ROW_SIZE
);
317 seq_printf(seq
, " hex dump (first %zu bytes):\n", len
);
318 kasan_disable_current();
319 seq_hex_dump(seq
, " ", DUMP_PREFIX_NONE
, HEX_ROW_SIZE
,
320 HEX_GROUP_SIZE
, ptr
, len
, HEX_ASCII
);
321 kasan_enable_current();
325 * Object colors, encoded with count and min_count:
326 * - white - orphan object, not enough references to it (count < min_count)
327 * - gray - not orphan, not marked as false positive (min_count == 0) or
328 * sufficient references to it (count >= min_count)
329 * - black - ignore, it doesn't contain references (e.g. text section)
330 * (min_count == -1). No function defined for this color.
331 * Newly created objects don't have any color assigned (object->count == -1)
332 * before the next memory scan when they become white.
334 static bool color_white(const struct kmemleak_object
*object
)
336 return object
->count
!= KMEMLEAK_BLACK
&&
337 object
->count
< object
->min_count
;
340 static bool color_gray(const struct kmemleak_object
*object
)
342 return object
->min_count
!= KMEMLEAK_BLACK
&&
343 object
->count
>= object
->min_count
;
347 * Objects are considered unreferenced only if their color is white, they have
348 * not be deleted and have a minimum age to avoid false positives caused by
349 * pointers temporarily stored in CPU registers.
351 static bool unreferenced_object(struct kmemleak_object
*object
)
353 return (color_white(object
) && object
->flags
& OBJECT_ALLOCATED
) &&
354 time_before_eq(object
->jiffies
+ jiffies_min_age
,
359 * Printing of the unreferenced objects information to the seq file. The
360 * print_unreferenced function must be called with the object->lock held.
362 static void print_unreferenced(struct seq_file
*seq
,
363 struct kmemleak_object
*object
)
366 unsigned int msecs_age
= jiffies_to_msecs(jiffies
- object
->jiffies
);
368 seq_printf(seq
, "unreferenced object 0x%08lx (size %zu):\n",
369 object
->pointer
, object
->size
);
370 seq_printf(seq
, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
371 object
->comm
, object
->pid
, object
->jiffies
,
372 msecs_age
/ 1000, msecs_age
% 1000);
373 hex_dump_object(seq
, object
);
374 seq_printf(seq
, " backtrace:\n");
376 for (i
= 0; i
< object
->trace_len
; i
++) {
377 void *ptr
= (void *)object
->trace
[i
];
378 seq_printf(seq
, " [<%p>] %pS\n", ptr
, ptr
);
383 * Print the kmemleak_object information. This function is used mainly for
384 * debugging special cases when kmemleak operations. It must be called with
385 * the object->lock held.
387 static void dump_object_info(struct kmemleak_object
*object
)
389 struct stack_trace trace
;
391 trace
.nr_entries
= object
->trace_len
;
392 trace
.entries
= object
->trace
;
394 pr_notice("Object 0x%08lx (size %zu):\n",
395 object
->pointer
, object
->size
);
396 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
397 object
->comm
, object
->pid
, object
->jiffies
);
398 pr_notice(" min_count = %d\n", object
->min_count
);
399 pr_notice(" count = %d\n", object
->count
);
400 pr_notice(" flags = 0x%x\n", object
->flags
);
401 pr_notice(" checksum = %u\n", object
->checksum
);
402 pr_notice(" backtrace:\n");
403 print_stack_trace(&trace
, 4);
407 * Look-up a memory block metadata (kmemleak_object) in the object search
408 * tree based on a pointer value. If alias is 0, only values pointing to the
409 * beginning of the memory block are allowed. The kmemleak_lock must be held
410 * when calling this function.
412 static struct kmemleak_object
*lookup_object(unsigned long ptr
, int alias
)
414 struct rb_node
*rb
= object_tree_root
.rb_node
;
417 struct kmemleak_object
*object
=
418 rb_entry(rb
, struct kmemleak_object
, rb_node
);
419 if (ptr
< object
->pointer
)
420 rb
= object
->rb_node
.rb_left
;
421 else if (object
->pointer
+ object
->size
<= ptr
)
422 rb
= object
->rb_node
.rb_right
;
423 else if (object
->pointer
== ptr
|| alias
)
426 kmemleak_warn("Found object by alias at 0x%08lx\n",
428 dump_object_info(object
);
436 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
437 * that once an object's use_count reached 0, the RCU freeing was already
438 * registered and the object should no longer be used. This function must be
439 * called under the protection of rcu_read_lock().
441 static int get_object(struct kmemleak_object
*object
)
443 return atomic_inc_not_zero(&object
->use_count
);
447 * RCU callback to free a kmemleak_object.
449 static void free_object_rcu(struct rcu_head
*rcu
)
451 struct hlist_node
*tmp
;
452 struct kmemleak_scan_area
*area
;
453 struct kmemleak_object
*object
=
454 container_of(rcu
, struct kmemleak_object
, rcu
);
457 * Once use_count is 0 (guaranteed by put_object), there is no other
458 * code accessing this object, hence no need for locking.
460 hlist_for_each_entry_safe(area
, tmp
, &object
->area_list
, node
) {
461 hlist_del(&area
->node
);
462 kmem_cache_free(scan_area_cache
, area
);
464 kmem_cache_free(object_cache
, object
);
468 * Decrement the object use_count. Once the count is 0, free the object using
469 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
470 * delete_object() path, the delayed RCU freeing ensures that there is no
471 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
474 static void put_object(struct kmemleak_object
*object
)
476 if (!atomic_dec_and_test(&object
->use_count
))
479 /* should only get here after delete_object was called */
480 WARN_ON(object
->flags
& OBJECT_ALLOCATED
);
482 call_rcu(&object
->rcu
, free_object_rcu
);
486 * Look up an object in the object search tree and increase its use_count.
488 static struct kmemleak_object
*find_and_get_object(unsigned long ptr
, int alias
)
491 struct kmemleak_object
*object
;
494 read_lock_irqsave(&kmemleak_lock
, flags
);
495 object
= lookup_object(ptr
, alias
);
496 read_unlock_irqrestore(&kmemleak_lock
, flags
);
498 /* check whether the object is still available */
499 if (object
&& !get_object(object
))
507 * Look up an object in the object search tree and remove it from both
508 * object_tree_root and object_list. The returned object's use_count should be
509 * at least 1, as initially set by create_object().
511 static struct kmemleak_object
*find_and_remove_object(unsigned long ptr
, int alias
)
514 struct kmemleak_object
*object
;
516 write_lock_irqsave(&kmemleak_lock
, flags
);
517 object
= lookup_object(ptr
, alias
);
519 rb_erase(&object
->rb_node
, &object_tree_root
);
520 list_del_rcu(&object
->object_list
);
522 write_unlock_irqrestore(&kmemleak_lock
, flags
);
528 * Save stack trace to the given array of MAX_TRACE size.
530 static int __save_stack_trace(unsigned long *trace
)
532 struct stack_trace stack_trace
;
534 stack_trace
.max_entries
= MAX_TRACE
;
535 stack_trace
.nr_entries
= 0;
536 stack_trace
.entries
= trace
;
537 stack_trace
.skip
= 2;
538 save_stack_trace(&stack_trace
);
540 return stack_trace
.nr_entries
;
544 * Create the metadata (struct kmemleak_object) corresponding to an allocated
545 * memory block and add it to the object_list and object_tree_root.
547 static struct kmemleak_object
*create_object(unsigned long ptr
, size_t size
,
548 int min_count
, gfp_t gfp
)
551 struct kmemleak_object
*object
, *parent
;
552 struct rb_node
**link
, *rb_parent
;
554 object
= kmem_cache_alloc(object_cache
, gfp_kmemleak_mask(gfp
));
556 pr_warn("Cannot allocate a kmemleak_object structure\n");
561 INIT_LIST_HEAD(&object
->object_list
);
562 INIT_LIST_HEAD(&object
->gray_list
);
563 INIT_HLIST_HEAD(&object
->area_list
);
564 spin_lock_init(&object
->lock
);
565 atomic_set(&object
->use_count
, 1);
566 object
->flags
= OBJECT_ALLOCATED
;
567 object
->pointer
= ptr
;
569 object
->excess_ref
= 0;
570 object
->min_count
= min_count
;
571 object
->count
= 0; /* white color initially */
572 object
->jiffies
= jiffies
;
573 object
->checksum
= 0;
575 /* task information */
578 strncpy(object
->comm
, "hardirq", sizeof(object
->comm
));
579 } else if (in_softirq()) {
581 strncpy(object
->comm
, "softirq", sizeof(object
->comm
));
583 object
->pid
= current
->pid
;
585 * There is a small chance of a race with set_task_comm(),
586 * however using get_task_comm() here may cause locking
587 * dependency issues with current->alloc_lock. In the worst
588 * case, the command line is not correct.
590 strncpy(object
->comm
, current
->comm
, sizeof(object
->comm
));
593 /* kernel backtrace */
594 object
->trace_len
= __save_stack_trace(object
->trace
);
596 write_lock_irqsave(&kmemleak_lock
, flags
);
598 min_addr
= min(min_addr
, ptr
);
599 max_addr
= max(max_addr
, ptr
+ size
);
600 link
= &object_tree_root
.rb_node
;
604 parent
= rb_entry(rb_parent
, struct kmemleak_object
, rb_node
);
605 if (ptr
+ size
<= parent
->pointer
)
606 link
= &parent
->rb_node
.rb_left
;
607 else if (parent
->pointer
+ parent
->size
<= ptr
)
608 link
= &parent
->rb_node
.rb_right
;
610 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
613 * No need for parent->lock here since "parent" cannot
614 * be freed while the kmemleak_lock is held.
616 dump_object_info(parent
);
617 kmem_cache_free(object_cache
, object
);
622 rb_link_node(&object
->rb_node
, rb_parent
, link
);
623 rb_insert_color(&object
->rb_node
, &object_tree_root
);
625 list_add_tail_rcu(&object
->object_list
, &object_list
);
627 write_unlock_irqrestore(&kmemleak_lock
, flags
);
632 * Mark the object as not allocated and schedule RCU freeing via put_object().
634 static void __delete_object(struct kmemleak_object
*object
)
638 WARN_ON(!(object
->flags
& OBJECT_ALLOCATED
));
639 WARN_ON(atomic_read(&object
->use_count
) < 1);
642 * Locking here also ensures that the corresponding memory block
643 * cannot be freed when it is being scanned.
645 spin_lock_irqsave(&object
->lock
, flags
);
646 object
->flags
&= ~OBJECT_ALLOCATED
;
647 spin_unlock_irqrestore(&object
->lock
, flags
);
652 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
655 static void delete_object_full(unsigned long ptr
)
657 struct kmemleak_object
*object
;
659 object
= find_and_remove_object(ptr
, 0);
662 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
667 __delete_object(object
);
671 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
672 * delete it. If the memory block is partially freed, the function may create
673 * additional metadata for the remaining parts of the block.
675 static void delete_object_part(unsigned long ptr
, size_t size
)
677 struct kmemleak_object
*object
;
678 unsigned long start
, end
;
680 object
= find_and_remove_object(ptr
, 1);
683 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
690 * Create one or two objects that may result from the memory block
691 * split. Note that partial freeing is only done by free_bootmem() and
692 * this happens before kmemleak_init() is called. The path below is
693 * only executed during early log recording in kmemleak_init(), so
694 * GFP_KERNEL is enough.
696 start
= object
->pointer
;
697 end
= object
->pointer
+ object
->size
;
699 create_object(start
, ptr
- start
, object
->min_count
,
701 if (ptr
+ size
< end
)
702 create_object(ptr
+ size
, end
- ptr
- size
, object
->min_count
,
705 __delete_object(object
);
708 static void __paint_it(struct kmemleak_object
*object
, int color
)
710 object
->min_count
= color
;
711 if (color
== KMEMLEAK_BLACK
)
712 object
->flags
|= OBJECT_NO_SCAN
;
715 static void paint_it(struct kmemleak_object
*object
, int color
)
719 spin_lock_irqsave(&object
->lock
, flags
);
720 __paint_it(object
, color
);
721 spin_unlock_irqrestore(&object
->lock
, flags
);
724 static void paint_ptr(unsigned long ptr
, int color
)
726 struct kmemleak_object
*object
;
728 object
= find_and_get_object(ptr
, 0);
730 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
732 (color
== KMEMLEAK_GREY
) ? "Grey" :
733 (color
== KMEMLEAK_BLACK
) ? "Black" : "Unknown");
736 paint_it(object
, color
);
741 * Mark an object permanently as gray-colored so that it can no longer be
742 * reported as a leak. This is used in general to mark a false positive.
744 static void make_gray_object(unsigned long ptr
)
746 paint_ptr(ptr
, KMEMLEAK_GREY
);
750 * Mark the object as black-colored so that it is ignored from scans and
753 static void make_black_object(unsigned long ptr
)
755 paint_ptr(ptr
, KMEMLEAK_BLACK
);
759 * Add a scanning area to the object. If at least one such area is added,
760 * kmemleak will only scan these ranges rather than the whole memory block.
762 static void add_scan_area(unsigned long ptr
, size_t size
, gfp_t gfp
)
765 struct kmemleak_object
*object
;
766 struct kmemleak_scan_area
*area
;
768 object
= find_and_get_object(ptr
, 1);
770 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
775 area
= kmem_cache_alloc(scan_area_cache
, gfp_kmemleak_mask(gfp
));
777 pr_warn("Cannot allocate a scan area\n");
781 spin_lock_irqsave(&object
->lock
, flags
);
782 if (size
== SIZE_MAX
) {
783 size
= object
->pointer
+ object
->size
- ptr
;
784 } else if (ptr
+ size
> object
->pointer
+ object
->size
) {
785 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr
);
786 dump_object_info(object
);
787 kmem_cache_free(scan_area_cache
, area
);
791 INIT_HLIST_NODE(&area
->node
);
795 hlist_add_head(&area
->node
, &object
->area_list
);
797 spin_unlock_irqrestore(&object
->lock
, flags
);
803 * Any surplus references (object already gray) to 'ptr' are passed to
804 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
805 * vm_struct may be used as an alternative reference to the vmalloc'ed object
806 * (see free_thread_stack()).
808 static void object_set_excess_ref(unsigned long ptr
, unsigned long excess_ref
)
811 struct kmemleak_object
*object
;
813 object
= find_and_get_object(ptr
, 0);
815 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
820 spin_lock_irqsave(&object
->lock
, flags
);
821 object
->excess_ref
= excess_ref
;
822 spin_unlock_irqrestore(&object
->lock
, flags
);
827 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
828 * pointer. Such object will not be scanned by kmemleak but references to it
831 static void object_no_scan(unsigned long ptr
)
834 struct kmemleak_object
*object
;
836 object
= find_and_get_object(ptr
, 0);
838 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr
);
842 spin_lock_irqsave(&object
->lock
, flags
);
843 object
->flags
|= OBJECT_NO_SCAN
;
844 spin_unlock_irqrestore(&object
->lock
, flags
);
849 * Log an early kmemleak_* call to the early_log buffer. These calls will be
850 * processed later once kmemleak is fully initialized.
852 static void __init
log_early(int op_type
, const void *ptr
, size_t size
,
856 struct early_log
*log
;
858 if (kmemleak_error
) {
859 /* kmemleak stopped recording, just count the requests */
864 if (crt_early_log
>= ARRAY_SIZE(early_log
)) {
871 * There is no need for locking since the kernel is still in UP mode
872 * at this stage. Disabling the IRQs is enough.
874 local_irq_save(flags
);
875 log
= &early_log
[crt_early_log
];
876 log
->op_type
= op_type
;
879 log
->min_count
= min_count
;
880 log
->trace_len
= __save_stack_trace(log
->trace
);
882 local_irq_restore(flags
);
886 * Log an early allocated block and populate the stack trace.
888 static void early_alloc(struct early_log
*log
)
890 struct kmemleak_object
*object
;
894 if (!kmemleak_enabled
|| !log
->ptr
|| IS_ERR(log
->ptr
))
898 * RCU locking needed to ensure object is not freed via put_object().
901 object
= create_object((unsigned long)log
->ptr
, log
->size
,
902 log
->min_count
, GFP_ATOMIC
);
905 spin_lock_irqsave(&object
->lock
, flags
);
906 for (i
= 0; i
< log
->trace_len
; i
++)
907 object
->trace
[i
] = log
->trace
[i
];
908 object
->trace_len
= log
->trace_len
;
909 spin_unlock_irqrestore(&object
->lock
, flags
);
915 * Log an early allocated block and populate the stack trace.
917 static void early_alloc_percpu(struct early_log
*log
)
920 const void __percpu
*ptr
= log
->ptr
;
922 for_each_possible_cpu(cpu
) {
923 log
->ptr
= per_cpu_ptr(ptr
, cpu
);
929 * kmemleak_alloc - register a newly allocated object
930 * @ptr: pointer to beginning of the object
931 * @size: size of the object
932 * @min_count: minimum number of references to this object. If during memory
933 * scanning a number of references less than @min_count is found,
934 * the object is reported as a memory leak. If @min_count is 0,
935 * the object is never reported as a leak. If @min_count is -1,
936 * the object is ignored (not scanned and not reported as a leak)
937 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
939 * This function is called from the kernel allocators when a new object
940 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
942 void __ref
kmemleak_alloc(const void *ptr
, size_t size
, int min_count
,
945 pr_debug("%s(0x%p, %zu, %d)\n", __func__
, ptr
, size
, min_count
);
947 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
948 create_object((unsigned long)ptr
, size
, min_count
, gfp
);
949 else if (kmemleak_early_log
)
950 log_early(KMEMLEAK_ALLOC
, ptr
, size
, min_count
);
952 EXPORT_SYMBOL_GPL(kmemleak_alloc
);
955 * kmemleak_alloc_percpu - register a newly allocated __percpu object
956 * @ptr: __percpu pointer to beginning of the object
957 * @size: size of the object
958 * @gfp: flags used for kmemleak internal memory allocations
960 * This function is called from the kernel percpu allocator when a new object
961 * (memory block) is allocated (alloc_percpu).
963 void __ref
kmemleak_alloc_percpu(const void __percpu
*ptr
, size_t size
,
968 pr_debug("%s(0x%p, %zu)\n", __func__
, ptr
, size
);
971 * Percpu allocations are only scanned and not reported as leaks
972 * (min_count is set to 0).
974 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
975 for_each_possible_cpu(cpu
)
976 create_object((unsigned long)per_cpu_ptr(ptr
, cpu
),
978 else if (kmemleak_early_log
)
979 log_early(KMEMLEAK_ALLOC_PERCPU
, ptr
, size
, 0);
981 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu
);
984 * kmemleak_vmalloc - register a newly vmalloc'ed object
985 * @area: pointer to vm_struct
986 * @size: size of the object
987 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
989 * This function is called from the vmalloc() kernel allocator when a new
990 * object (memory block) is allocated.
992 void __ref
kmemleak_vmalloc(const struct vm_struct
*area
, size_t size
, gfp_t gfp
)
994 pr_debug("%s(0x%p, %zu)\n", __func__
, area
, size
);
997 * A min_count = 2 is needed because vm_struct contains a reference to
998 * the virtual address of the vmalloc'ed block.
1000 if (kmemleak_enabled
) {
1001 create_object((unsigned long)area
->addr
, size
, 2, gfp
);
1002 object_set_excess_ref((unsigned long)area
,
1003 (unsigned long)area
->addr
);
1004 } else if (kmemleak_early_log
) {
1005 log_early(KMEMLEAK_ALLOC
, area
->addr
, size
, 2);
1006 /* reusing early_log.size for storing area->addr */
1007 log_early(KMEMLEAK_SET_EXCESS_REF
,
1008 area
, (unsigned long)area
->addr
, 0);
1011 EXPORT_SYMBOL_GPL(kmemleak_vmalloc
);
1014 * kmemleak_free - unregister a previously registered object
1015 * @ptr: pointer to beginning of the object
1017 * This function is called from the kernel allocators when an object (memory
1018 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1020 void __ref
kmemleak_free(const void *ptr
)
1022 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1024 if (kmemleak_free_enabled
&& ptr
&& !IS_ERR(ptr
))
1025 delete_object_full((unsigned long)ptr
);
1026 else if (kmemleak_early_log
)
1027 log_early(KMEMLEAK_FREE
, ptr
, 0, 0);
1029 EXPORT_SYMBOL_GPL(kmemleak_free
);
1032 * kmemleak_free_part - partially unregister a previously registered object
1033 * @ptr: pointer to the beginning or inside the object. This also
1034 * represents the start of the range to be freed
1035 * @size: size to be unregistered
1037 * This function is called when only a part of a memory block is freed
1038 * (usually from the bootmem allocator).
1040 void __ref
kmemleak_free_part(const void *ptr
, size_t size
)
1042 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1044 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1045 delete_object_part((unsigned long)ptr
, size
);
1046 else if (kmemleak_early_log
)
1047 log_early(KMEMLEAK_FREE_PART
, ptr
, size
, 0);
1049 EXPORT_SYMBOL_GPL(kmemleak_free_part
);
1052 * kmemleak_free_percpu - unregister a previously registered __percpu object
1053 * @ptr: __percpu pointer to beginning of the object
1055 * This function is called from the kernel percpu allocator when an object
1056 * (memory block) is freed (free_percpu).
1058 void __ref
kmemleak_free_percpu(const void __percpu
*ptr
)
1062 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1064 if (kmemleak_free_enabled
&& ptr
&& !IS_ERR(ptr
))
1065 for_each_possible_cpu(cpu
)
1066 delete_object_full((unsigned long)per_cpu_ptr(ptr
,
1068 else if (kmemleak_early_log
)
1069 log_early(KMEMLEAK_FREE_PERCPU
, ptr
, 0, 0);
1071 EXPORT_SYMBOL_GPL(kmemleak_free_percpu
);
1074 * kmemleak_update_trace - update object allocation stack trace
1075 * @ptr: pointer to beginning of the object
1077 * Override the object allocation stack trace for cases where the actual
1078 * allocation place is not always useful.
1080 void __ref
kmemleak_update_trace(const void *ptr
)
1082 struct kmemleak_object
*object
;
1083 unsigned long flags
;
1085 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1087 if (!kmemleak_enabled
|| IS_ERR_OR_NULL(ptr
))
1090 object
= find_and_get_object((unsigned long)ptr
, 1);
1093 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1099 spin_lock_irqsave(&object
->lock
, flags
);
1100 object
->trace_len
= __save_stack_trace(object
->trace
);
1101 spin_unlock_irqrestore(&object
->lock
, flags
);
1105 EXPORT_SYMBOL(kmemleak_update_trace
);
1108 * kmemleak_not_leak - mark an allocated object as false positive
1109 * @ptr: pointer to beginning of the object
1111 * Calling this function on an object will cause the memory block to no longer
1112 * be reported as leak and always be scanned.
1114 void __ref
kmemleak_not_leak(const void *ptr
)
1116 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1118 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1119 make_gray_object((unsigned long)ptr
);
1120 else if (kmemleak_early_log
)
1121 log_early(KMEMLEAK_NOT_LEAK
, ptr
, 0, 0);
1123 EXPORT_SYMBOL(kmemleak_not_leak
);
1126 * kmemleak_ignore - ignore an allocated object
1127 * @ptr: pointer to beginning of the object
1129 * Calling this function on an object will cause the memory block to be
1130 * ignored (not scanned and not reported as a leak). This is usually done when
1131 * it is known that the corresponding block is not a leak and does not contain
1132 * any references to other allocated memory blocks.
1134 void __ref
kmemleak_ignore(const void *ptr
)
1136 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1138 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1139 make_black_object((unsigned long)ptr
);
1140 else if (kmemleak_early_log
)
1141 log_early(KMEMLEAK_IGNORE
, ptr
, 0, 0);
1143 EXPORT_SYMBOL(kmemleak_ignore
);
1146 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1147 * @ptr: pointer to beginning or inside the object. This also
1148 * represents the start of the scan area
1149 * @size: size of the scan area
1150 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1152 * This function is used when it is known that only certain parts of an object
1153 * contain references to other objects. Kmemleak will only scan these areas
1154 * reducing the number false negatives.
1156 void __ref
kmemleak_scan_area(const void *ptr
, size_t size
, gfp_t gfp
)
1158 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1160 if (kmemleak_enabled
&& ptr
&& size
&& !IS_ERR(ptr
))
1161 add_scan_area((unsigned long)ptr
, size
, gfp
);
1162 else if (kmemleak_early_log
)
1163 log_early(KMEMLEAK_SCAN_AREA
, ptr
, size
, 0);
1165 EXPORT_SYMBOL(kmemleak_scan_area
);
1168 * kmemleak_no_scan - do not scan an allocated object
1169 * @ptr: pointer to beginning of the object
1171 * This function notifies kmemleak not to scan the given memory block. Useful
1172 * in situations where it is known that the given object does not contain any
1173 * references to other objects. Kmemleak will not scan such objects reducing
1174 * the number of false negatives.
1176 void __ref
kmemleak_no_scan(const void *ptr
)
1178 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1180 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1181 object_no_scan((unsigned long)ptr
);
1182 else if (kmemleak_early_log
)
1183 log_early(KMEMLEAK_NO_SCAN
, ptr
, 0, 0);
1185 EXPORT_SYMBOL(kmemleak_no_scan
);
1188 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1190 * @phys: physical address of the object
1191 * @size: size of the object
1192 * @min_count: minimum number of references to this object.
1193 * See kmemleak_alloc()
1194 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1196 void __ref
kmemleak_alloc_phys(phys_addr_t phys
, size_t size
, int min_count
,
1199 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1200 kmemleak_alloc(__va(phys
), size
, min_count
, gfp
);
1202 EXPORT_SYMBOL(kmemleak_alloc_phys
);
1205 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1206 * physical address argument
1207 * @phys: physical address if the beginning or inside an object. This
1208 * also represents the start of the range to be freed
1209 * @size: size to be unregistered
1211 void __ref
kmemleak_free_part_phys(phys_addr_t phys
, size_t size
)
1213 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1214 kmemleak_free_part(__va(phys
), size
);
1216 EXPORT_SYMBOL(kmemleak_free_part_phys
);
1219 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1221 * @phys: physical address of the object
1223 void __ref
kmemleak_not_leak_phys(phys_addr_t phys
)
1225 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1226 kmemleak_not_leak(__va(phys
));
1228 EXPORT_SYMBOL(kmemleak_not_leak_phys
);
1231 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1233 * @phys: physical address of the object
1235 void __ref
kmemleak_ignore_phys(phys_addr_t phys
)
1237 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1238 kmemleak_ignore(__va(phys
));
1240 EXPORT_SYMBOL(kmemleak_ignore_phys
);
1243 * Update an object's checksum and return true if it was modified.
1245 static bool update_checksum(struct kmemleak_object
*object
)
1247 u32 old_csum
= object
->checksum
;
1249 kasan_disable_current();
1250 object
->checksum
= crc32(0, (void *)object
->pointer
, object
->size
);
1251 kasan_enable_current();
1253 return object
->checksum
!= old_csum
;
1257 * Update an object's references. object->lock must be held by the caller.
1259 static void update_refs(struct kmemleak_object
*object
)
1261 if (!color_white(object
)) {
1262 /* non-orphan, ignored or new */
1267 * Increase the object's reference count (number of pointers to the
1268 * memory block). If this count reaches the required minimum, the
1269 * object's color will become gray and it will be added to the
1273 if (color_gray(object
)) {
1274 /* put_object() called when removing from gray_list */
1275 WARN_ON(!get_object(object
));
1276 list_add_tail(&object
->gray_list
, &gray_list
);
1281 * Memory scanning is a long process and it needs to be interruptable. This
1282 * function checks whether such interrupt condition occurred.
1284 static int scan_should_stop(void)
1286 if (!kmemleak_enabled
)
1290 * This function may be called from either process or kthread context,
1291 * hence the need to check for both stop conditions.
1294 return signal_pending(current
);
1296 return kthread_should_stop();
1302 * Scan a memory block (exclusive range) for valid pointers and add those
1303 * found to the gray list.
1305 static void scan_block(void *_start
, void *_end
,
1306 struct kmemleak_object
*scanned
)
1309 unsigned long *start
= PTR_ALIGN(_start
, BYTES_PER_POINTER
);
1310 unsigned long *end
= _end
- (BYTES_PER_POINTER
- 1);
1311 unsigned long flags
;
1313 read_lock_irqsave(&kmemleak_lock
, flags
);
1314 for (ptr
= start
; ptr
< end
; ptr
++) {
1315 struct kmemleak_object
*object
;
1316 unsigned long pointer
;
1317 unsigned long excess_ref
;
1319 if (scan_should_stop())
1322 kasan_disable_current();
1324 kasan_enable_current();
1326 if (pointer
< min_addr
|| pointer
>= max_addr
)
1330 * No need for get_object() here since we hold kmemleak_lock.
1331 * object->use_count cannot be dropped to 0 while the object
1332 * is still present in object_tree_root and object_list
1333 * (with updates protected by kmemleak_lock).
1335 object
= lookup_object(pointer
, 1);
1338 if (object
== scanned
)
1339 /* self referenced, ignore */
1343 * Avoid the lockdep recursive warning on object->lock being
1344 * previously acquired in scan_object(). These locks are
1345 * enclosed by scan_mutex.
1347 spin_lock_nested(&object
->lock
, SINGLE_DEPTH_NESTING
);
1348 /* only pass surplus references (object already gray) */
1349 if (color_gray(object
)) {
1350 excess_ref
= object
->excess_ref
;
1351 /* no need for update_refs() if object already gray */
1354 update_refs(object
);
1356 spin_unlock(&object
->lock
);
1359 object
= lookup_object(excess_ref
, 0);
1362 if (object
== scanned
)
1363 /* circular reference, ignore */
1365 spin_lock_nested(&object
->lock
, SINGLE_DEPTH_NESTING
);
1366 update_refs(object
);
1367 spin_unlock(&object
->lock
);
1370 read_unlock_irqrestore(&kmemleak_lock
, flags
);
1374 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1376 static void scan_large_block(void *start
, void *end
)
1380 while (start
< end
) {
1381 next
= min(start
+ MAX_SCAN_SIZE
, end
);
1382 scan_block(start
, next
, NULL
);
1389 * Scan a memory block corresponding to a kmemleak_object. A condition is
1390 * that object->use_count >= 1.
1392 static void scan_object(struct kmemleak_object
*object
)
1394 struct kmemleak_scan_area
*area
;
1395 unsigned long flags
;
1398 * Once the object->lock is acquired, the corresponding memory block
1399 * cannot be freed (the same lock is acquired in delete_object).
1401 spin_lock_irqsave(&object
->lock
, flags
);
1402 if (object
->flags
& OBJECT_NO_SCAN
)
1404 if (!(object
->flags
& OBJECT_ALLOCATED
))
1405 /* already freed object */
1407 if (hlist_empty(&object
->area_list
)) {
1408 void *start
= (void *)object
->pointer
;
1409 void *end
= (void *)(object
->pointer
+ object
->size
);
1413 next
= min(start
+ MAX_SCAN_SIZE
, end
);
1414 scan_block(start
, next
, object
);
1420 spin_unlock_irqrestore(&object
->lock
, flags
);
1422 spin_lock_irqsave(&object
->lock
, flags
);
1423 } while (object
->flags
& OBJECT_ALLOCATED
);
1425 hlist_for_each_entry(area
, &object
->area_list
, node
)
1426 scan_block((void *)area
->start
,
1427 (void *)(area
->start
+ area
->size
),
1430 spin_unlock_irqrestore(&object
->lock
, flags
);
1434 * Scan the objects already referenced (gray objects). More objects will be
1435 * referenced and, if there are no memory leaks, all the objects are scanned.
1437 static void scan_gray_list(void)
1439 struct kmemleak_object
*object
, *tmp
;
1442 * The list traversal is safe for both tail additions and removals
1443 * from inside the loop. The kmemleak objects cannot be freed from
1444 * outside the loop because their use_count was incremented.
1446 object
= list_entry(gray_list
.next
, typeof(*object
), gray_list
);
1447 while (&object
->gray_list
!= &gray_list
) {
1450 /* may add new objects to the list */
1451 if (!scan_should_stop())
1452 scan_object(object
);
1454 tmp
= list_entry(object
->gray_list
.next
, typeof(*object
),
1457 /* remove the object from the list and release it */
1458 list_del(&object
->gray_list
);
1463 WARN_ON(!list_empty(&gray_list
));
1467 * Scan data sections and all the referenced memory blocks allocated via the
1468 * kernel's standard allocators. This function must be called with the
1471 static void kmemleak_scan(void)
1473 unsigned long flags
;
1474 struct kmemleak_object
*object
;
1478 jiffies_last_scan
= jiffies
;
1480 /* prepare the kmemleak_object's */
1482 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1483 spin_lock_irqsave(&object
->lock
, flags
);
1486 * With a few exceptions there should be a maximum of
1487 * 1 reference to any object at this point.
1489 if (atomic_read(&object
->use_count
) > 1) {
1490 pr_debug("object->use_count = %d\n",
1491 atomic_read(&object
->use_count
));
1492 dump_object_info(object
);
1495 /* reset the reference count (whiten the object) */
1497 if (color_gray(object
) && get_object(object
))
1498 list_add_tail(&object
->gray_list
, &gray_list
);
1500 spin_unlock_irqrestore(&object
->lock
, flags
);
1504 /* data/bss scanning */
1505 scan_large_block(_sdata
, _edata
);
1506 scan_large_block(__bss_start
, __bss_stop
);
1507 scan_large_block(__start_ro_after_init
, __end_ro_after_init
);
1510 /* per-cpu sections scanning */
1511 for_each_possible_cpu(i
)
1512 scan_large_block(__per_cpu_start
+ per_cpu_offset(i
),
1513 __per_cpu_end
+ per_cpu_offset(i
));
1517 * Struct page scanning for each node.
1520 for_each_online_node(i
) {
1521 unsigned long start_pfn
= node_start_pfn(i
);
1522 unsigned long end_pfn
= node_end_pfn(i
);
1525 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1528 if (!pfn_valid(pfn
))
1530 page
= pfn_to_page(pfn
);
1531 /* only scan if page is in use */
1532 if (page_count(page
) == 0)
1534 scan_block(page
, page
+ 1, NULL
);
1542 * Scanning the task stacks (may introduce false negatives).
1544 if (kmemleak_stack_scan
) {
1545 struct task_struct
*p
, *g
;
1547 read_lock(&tasklist_lock
);
1548 do_each_thread(g
, p
) {
1549 void *stack
= try_get_task_stack(p
);
1551 scan_block(stack
, stack
+ THREAD_SIZE
, NULL
);
1554 } while_each_thread(g
, p
);
1555 read_unlock(&tasklist_lock
);
1559 * Scan the objects already referenced from the sections scanned
1565 * Check for new or unreferenced objects modified since the previous
1566 * scan and color them gray until the next scan.
1569 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1570 spin_lock_irqsave(&object
->lock
, flags
);
1571 if (color_white(object
) && (object
->flags
& OBJECT_ALLOCATED
)
1572 && update_checksum(object
) && get_object(object
)) {
1573 /* color it gray temporarily */
1574 object
->count
= object
->min_count
;
1575 list_add_tail(&object
->gray_list
, &gray_list
);
1577 spin_unlock_irqrestore(&object
->lock
, flags
);
1582 * Re-scan the gray list for modified unreferenced objects.
1587 * If scanning was stopped do not report any new unreferenced objects.
1589 if (scan_should_stop())
1593 * Scanning result reporting.
1596 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1597 spin_lock_irqsave(&object
->lock
, flags
);
1598 if (unreferenced_object(object
) &&
1599 !(object
->flags
& OBJECT_REPORTED
)) {
1600 object
->flags
|= OBJECT_REPORTED
;
1603 spin_unlock_irqrestore(&object
->lock
, flags
);
1608 kmemleak_found_leaks
= true;
1610 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1617 * Thread function performing automatic memory scanning. Unreferenced objects
1618 * at the end of a memory scan are reported but only the first time.
1620 static int kmemleak_scan_thread(void *arg
)
1622 static int first_run
= 1;
1624 pr_info("Automatic memory scanning thread started\n");
1625 set_user_nice(current
, 10);
1628 * Wait before the first scan to allow the system to fully initialize.
1631 signed long timeout
= msecs_to_jiffies(SECS_FIRST_SCAN
* 1000);
1633 while (timeout
&& !kthread_should_stop())
1634 timeout
= schedule_timeout_interruptible(timeout
);
1637 while (!kthread_should_stop()) {
1638 signed long timeout
= jiffies_scan_wait
;
1640 mutex_lock(&scan_mutex
);
1642 mutex_unlock(&scan_mutex
);
1644 /* wait before the next scan */
1645 while (timeout
&& !kthread_should_stop())
1646 timeout
= schedule_timeout_interruptible(timeout
);
1649 pr_info("Automatic memory scanning thread ended\n");
1655 * Start the automatic memory scanning thread. This function must be called
1656 * with the scan_mutex held.
1658 static void start_scan_thread(void)
1662 scan_thread
= kthread_run(kmemleak_scan_thread
, NULL
, "kmemleak");
1663 if (IS_ERR(scan_thread
)) {
1664 pr_warn("Failed to create the scan thread\n");
1670 * Stop the automatic memory scanning thread.
1672 static void stop_scan_thread(void)
1675 kthread_stop(scan_thread
);
1681 * Iterate over the object_list and return the first valid object at or after
1682 * the required position with its use_count incremented. The function triggers
1683 * a memory scanning when the pos argument points to the first position.
1685 static void *kmemleak_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1687 struct kmemleak_object
*object
;
1691 err
= mutex_lock_interruptible(&scan_mutex
);
1693 return ERR_PTR(err
);
1696 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1699 if (get_object(object
))
1708 * Return the next object in the object_list. The function decrements the
1709 * use_count of the previous object and increases that of the next one.
1711 static void *kmemleak_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1713 struct kmemleak_object
*prev_obj
= v
;
1714 struct kmemleak_object
*next_obj
= NULL
;
1715 struct kmemleak_object
*obj
= prev_obj
;
1719 list_for_each_entry_continue_rcu(obj
, &object_list
, object_list
) {
1720 if (get_object(obj
)) {
1726 put_object(prev_obj
);
1731 * Decrement the use_count of the last object required, if any.
1733 static void kmemleak_seq_stop(struct seq_file
*seq
, void *v
)
1737 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1738 * waiting was interrupted, so only release it if !IS_ERR.
1741 mutex_unlock(&scan_mutex
);
1748 * Print the information for an unreferenced object to the seq file.
1750 static int kmemleak_seq_show(struct seq_file
*seq
, void *v
)
1752 struct kmemleak_object
*object
= v
;
1753 unsigned long flags
;
1755 spin_lock_irqsave(&object
->lock
, flags
);
1756 if ((object
->flags
& OBJECT_REPORTED
) && unreferenced_object(object
))
1757 print_unreferenced(seq
, object
);
1758 spin_unlock_irqrestore(&object
->lock
, flags
);
1762 static const struct seq_operations kmemleak_seq_ops
= {
1763 .start
= kmemleak_seq_start
,
1764 .next
= kmemleak_seq_next
,
1765 .stop
= kmemleak_seq_stop
,
1766 .show
= kmemleak_seq_show
,
1769 static int kmemleak_open(struct inode
*inode
, struct file
*file
)
1771 return seq_open(file
, &kmemleak_seq_ops
);
1774 static int dump_str_object_info(const char *str
)
1776 unsigned long flags
;
1777 struct kmemleak_object
*object
;
1780 if (kstrtoul(str
, 0, &addr
))
1782 object
= find_and_get_object(addr
, 0);
1784 pr_info("Unknown object at 0x%08lx\n", addr
);
1788 spin_lock_irqsave(&object
->lock
, flags
);
1789 dump_object_info(object
);
1790 spin_unlock_irqrestore(&object
->lock
, flags
);
1797 * We use grey instead of black to ensure we can do future scans on the same
1798 * objects. If we did not do future scans these black objects could
1799 * potentially contain references to newly allocated objects in the future and
1800 * we'd end up with false positives.
1802 static void kmemleak_clear(void)
1804 struct kmemleak_object
*object
;
1805 unsigned long flags
;
1808 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1809 spin_lock_irqsave(&object
->lock
, flags
);
1810 if ((object
->flags
& OBJECT_REPORTED
) &&
1811 unreferenced_object(object
))
1812 __paint_it(object
, KMEMLEAK_GREY
);
1813 spin_unlock_irqrestore(&object
->lock
, flags
);
1817 kmemleak_found_leaks
= false;
1820 static void __kmemleak_do_cleanup(void);
1823 * File write operation to configure kmemleak at run-time. The following
1824 * commands can be written to the /sys/kernel/debug/kmemleak file:
1825 * off - disable kmemleak (irreversible)
1826 * stack=on - enable the task stacks scanning
1827 * stack=off - disable the tasks stacks scanning
1828 * scan=on - start the automatic memory scanning thread
1829 * scan=off - stop the automatic memory scanning thread
1830 * scan=... - set the automatic memory scanning period in seconds (0 to
1832 * scan - trigger a memory scan
1833 * clear - mark all current reported unreferenced kmemleak objects as
1834 * grey to ignore printing them, or free all kmemleak objects
1835 * if kmemleak has been disabled.
1836 * dump=... - dump information about the object found at the given address
1838 static ssize_t
kmemleak_write(struct file
*file
, const char __user
*user_buf
,
1839 size_t size
, loff_t
*ppos
)
1845 buf_size
= min(size
, (sizeof(buf
) - 1));
1846 if (strncpy_from_user(buf
, user_buf
, buf_size
) < 0)
1850 ret
= mutex_lock_interruptible(&scan_mutex
);
1854 if (strncmp(buf
, "clear", 5) == 0) {
1855 if (kmemleak_enabled
)
1858 __kmemleak_do_cleanup();
1862 if (!kmemleak_enabled
) {
1867 if (strncmp(buf
, "off", 3) == 0)
1869 else if (strncmp(buf
, "stack=on", 8) == 0)
1870 kmemleak_stack_scan
= 1;
1871 else if (strncmp(buf
, "stack=off", 9) == 0)
1872 kmemleak_stack_scan
= 0;
1873 else if (strncmp(buf
, "scan=on", 7) == 0)
1874 start_scan_thread();
1875 else if (strncmp(buf
, "scan=off", 8) == 0)
1877 else if (strncmp(buf
, "scan=", 5) == 0) {
1880 ret
= kstrtoul(buf
+ 5, 0, &secs
);
1885 jiffies_scan_wait
= msecs_to_jiffies(secs
* 1000);
1886 start_scan_thread();
1888 } else if (strncmp(buf
, "scan", 4) == 0)
1890 else if (strncmp(buf
, "dump=", 5) == 0)
1891 ret
= dump_str_object_info(buf
+ 5);
1896 mutex_unlock(&scan_mutex
);
1900 /* ignore the rest of the buffer, only one command at a time */
1905 static const struct file_operations kmemleak_fops
= {
1906 .owner
= THIS_MODULE
,
1907 .open
= kmemleak_open
,
1909 .write
= kmemleak_write
,
1910 .llseek
= seq_lseek
,
1911 .release
= seq_release
,
1914 static void __kmemleak_do_cleanup(void)
1916 struct kmemleak_object
*object
;
1919 list_for_each_entry_rcu(object
, &object_list
, object_list
)
1920 delete_object_full(object
->pointer
);
1925 * Stop the memory scanning thread and free the kmemleak internal objects if
1926 * no previous scan thread (otherwise, kmemleak may still have some useful
1927 * information on memory leaks).
1929 static void kmemleak_do_cleanup(struct work_struct
*work
)
1933 mutex_lock(&scan_mutex
);
1935 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1936 * longer track object freeing. Ordering of the scan thread stopping and
1937 * the memory accesses below is guaranteed by the kthread_stop()
1940 kmemleak_free_enabled
= 0;
1941 mutex_unlock(&scan_mutex
);
1943 if (!kmemleak_found_leaks
)
1944 __kmemleak_do_cleanup();
1946 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1949 static DECLARE_WORK(cleanup_work
, kmemleak_do_cleanup
);
1952 * Disable kmemleak. No memory allocation/freeing will be traced once this
1953 * function is called. Disabling kmemleak is an irreversible operation.
1955 static void kmemleak_disable(void)
1957 /* atomically check whether it was already invoked */
1958 if (cmpxchg(&kmemleak_error
, 0, 1))
1961 /* stop any memory operation tracing */
1962 kmemleak_enabled
= 0;
1964 /* check whether it is too early for a kernel thread */
1965 if (kmemleak_initialized
)
1966 schedule_work(&cleanup_work
);
1968 kmemleak_free_enabled
= 0;
1970 pr_info("Kernel memory leak detector disabled\n");
1974 * Allow boot-time kmemleak disabling (enabled by default).
1976 static int __init
kmemleak_boot_config(char *str
)
1980 if (strcmp(str
, "off") == 0)
1982 else if (strcmp(str
, "on") == 0)
1983 kmemleak_skip_disable
= 1;
1988 early_param("kmemleak", kmemleak_boot_config
);
1990 static void __init
print_log_trace(struct early_log
*log
)
1992 struct stack_trace trace
;
1994 trace
.nr_entries
= log
->trace_len
;
1995 trace
.entries
= log
->trace
;
1997 pr_notice("Early log backtrace:\n");
1998 print_stack_trace(&trace
, 2);
2002 * Kmemleak initialization.
2004 void __init
kmemleak_init(void)
2007 unsigned long flags
;
2009 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2010 if (!kmemleak_skip_disable
) {
2011 kmemleak_early_log
= 0;
2017 jiffies_min_age
= msecs_to_jiffies(MSECS_MIN_AGE
);
2018 jiffies_scan_wait
= msecs_to_jiffies(SECS_SCAN_WAIT
* 1000);
2020 object_cache
= KMEM_CACHE(kmemleak_object
, SLAB_NOLEAKTRACE
);
2021 scan_area_cache
= KMEM_CACHE(kmemleak_scan_area
, SLAB_NOLEAKTRACE
);
2023 if (crt_early_log
> ARRAY_SIZE(early_log
))
2024 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2027 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2028 local_irq_save(flags
);
2029 kmemleak_early_log
= 0;
2030 if (kmemleak_error
) {
2031 local_irq_restore(flags
);
2034 kmemleak_enabled
= 1;
2035 kmemleak_free_enabled
= 1;
2037 local_irq_restore(flags
);
2040 * This is the point where tracking allocations is safe. Automatic
2041 * scanning is started during the late initcall. Add the early logged
2042 * callbacks to the kmemleak infrastructure.
2044 for (i
= 0; i
< crt_early_log
; i
++) {
2045 struct early_log
*log
= &early_log
[i
];
2047 switch (log
->op_type
) {
2048 case KMEMLEAK_ALLOC
:
2051 case KMEMLEAK_ALLOC_PERCPU
:
2052 early_alloc_percpu(log
);
2055 kmemleak_free(log
->ptr
);
2057 case KMEMLEAK_FREE_PART
:
2058 kmemleak_free_part(log
->ptr
, log
->size
);
2060 case KMEMLEAK_FREE_PERCPU
:
2061 kmemleak_free_percpu(log
->ptr
);
2063 case KMEMLEAK_NOT_LEAK
:
2064 kmemleak_not_leak(log
->ptr
);
2066 case KMEMLEAK_IGNORE
:
2067 kmemleak_ignore(log
->ptr
);
2069 case KMEMLEAK_SCAN_AREA
:
2070 kmemleak_scan_area(log
->ptr
, log
->size
, GFP_KERNEL
);
2072 case KMEMLEAK_NO_SCAN
:
2073 kmemleak_no_scan(log
->ptr
);
2075 case KMEMLEAK_SET_EXCESS_REF
:
2076 object_set_excess_ref((unsigned long)log
->ptr
,
2080 kmemleak_warn("Unknown early log operation: %d\n",
2084 if (kmemleak_warning
) {
2085 print_log_trace(log
);
2086 kmemleak_warning
= 0;
2092 * Late initialization function.
2094 static int __init
kmemleak_late_init(void)
2096 struct dentry
*dentry
;
2098 kmemleak_initialized
= 1;
2100 dentry
= debugfs_create_file("kmemleak", 0644, NULL
, NULL
,
2103 pr_warn("Failed to create the debugfs kmemleak file\n");
2105 if (kmemleak_error
) {
2107 * Some error occurred and kmemleak was disabled. There is a
2108 * small chance that kmemleak_disable() was called immediately
2109 * after setting kmemleak_initialized and we may end up with
2110 * two clean-up threads but serialized by scan_mutex.
2112 schedule_work(&cleanup_work
);
2116 mutex_lock(&scan_mutex
);
2117 start_scan_thread();
2118 mutex_unlock(&scan_mutex
);
2120 pr_info("Kernel memory leak detector initialized\n");
2124 late_initcall(kmemleak_late_init
);