1 // SPDX-License-Identifier: GPL-2.0
3 * Slab allocator functions that are independent of the allocator strategy
5 * (C) 2012 Christoph Lameter <cl@linux.com>
7 #include <linux/slab.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/kfence.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/seq_file.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/swiotlb.h>
22 #include <linux/proc_fs.h>
23 #include <linux/debugfs.h>
24 #include <linux/kmemleak.h>
25 #include <linux/kasan.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
29 #include <linux/memcontrol.h>
30 #include <linux/stackdepot.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/kmem.h>
38 enum slab_state slab_state
;
39 LIST_HEAD(slab_caches
);
40 DEFINE_MUTEX(slab_mutex
);
41 struct kmem_cache
*kmem_cache
;
44 * Set of flags that will prevent slab merging
46 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
47 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
48 SLAB_FAILSLAB | SLAB_NO_MERGE)
50 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
51 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
54 * Merge control. If this is set then no merging of slab caches will occur.
56 static bool slab_nomerge
= !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT
);
58 static int __init
setup_slab_nomerge(char *str
)
64 static int __init
setup_slab_merge(char *str
)
70 __setup_param("slub_nomerge", slub_nomerge
, setup_slab_nomerge
, 0);
71 __setup_param("slub_merge", slub_merge
, setup_slab_merge
, 0);
73 __setup("slab_nomerge", setup_slab_nomerge
);
74 __setup("slab_merge", setup_slab_merge
);
77 * Determine the size of a slab object
79 unsigned int kmem_cache_size(struct kmem_cache
*s
)
81 return s
->object_size
;
83 EXPORT_SYMBOL(kmem_cache_size
);
85 #ifdef CONFIG_DEBUG_VM
87 static bool kmem_cache_is_duplicate_name(const char *name
)
91 list_for_each_entry(s
, &slab_caches
, list
) {
92 if (!strcmp(s
->name
, name
))
99 static int kmem_cache_sanity_check(const char *name
, unsigned int size
)
101 if (!name
|| in_interrupt() || size
> KMALLOC_MAX_SIZE
) {
102 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
106 /* Duplicate names will confuse slabtop, et al */
107 WARN(kmem_cache_is_duplicate_name(name
),
108 "kmem_cache of name '%s' already exists\n", name
);
110 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
114 static inline int kmem_cache_sanity_check(const char *name
, unsigned int size
)
121 * Figure out what the alignment of the objects will be given a set of
122 * flags, a user specified alignment and the size of the objects.
124 static unsigned int calculate_alignment(slab_flags_t flags
,
125 unsigned int align
, unsigned int size
)
128 * If the user wants hardware cache aligned objects then follow that
129 * suggestion if the object is sufficiently large.
131 * The hardware cache alignment cannot override the specified
132 * alignment though. If that is greater then use it.
134 if (flags
& SLAB_HWCACHE_ALIGN
) {
137 ralign
= cache_line_size();
138 while (size
<= ralign
/ 2)
140 align
= max(align
, ralign
);
143 align
= max(align
, arch_slab_minalign());
145 return ALIGN(align
, sizeof(void *));
149 * Find a mergeable slab cache
151 int slab_unmergeable(struct kmem_cache
*s
)
153 if (slab_nomerge
|| (s
->flags
& SLAB_NEVER_MERGE
))
159 #ifdef CONFIG_HARDENED_USERCOPY
165 * We may have set a slab to be unmergeable during bootstrap.
173 struct kmem_cache
*find_mergeable(unsigned int size
, unsigned int align
,
174 slab_flags_t flags
, const char *name
, void (*ctor
)(void *))
176 struct kmem_cache
*s
;
184 flags
= kmem_cache_flags(flags
, name
);
186 if (flags
& SLAB_NEVER_MERGE
)
189 size
= ALIGN(size
, sizeof(void *));
190 align
= calculate_alignment(flags
, align
, size
);
191 size
= ALIGN(size
, align
);
193 list_for_each_entry_reverse(s
, &slab_caches
, list
) {
194 if (slab_unmergeable(s
))
200 if ((flags
& SLAB_MERGE_SAME
) != (s
->flags
& SLAB_MERGE_SAME
))
203 * Check if alignment is compatible.
204 * Courtesy of Adrian Drzewiecki
206 if ((s
->size
& ~(align
- 1)) != s
->size
)
209 if (s
->size
- size
>= sizeof(void *))
217 static struct kmem_cache
*create_cache(const char *name
,
218 unsigned int object_size
,
219 struct kmem_cache_args
*args
,
222 struct kmem_cache
*s
;
225 if (WARN_ON(args
->useroffset
+ args
->usersize
> object_size
))
226 args
->useroffset
= args
->usersize
= 0;
228 /* If a custom freelist pointer is requested make sure it's sane. */
230 if (args
->use_freeptr_offset
&&
231 (args
->freeptr_offset
>= object_size
||
232 !(flags
& SLAB_TYPESAFE_BY_RCU
) ||
233 !IS_ALIGNED(args
->freeptr_offset
, sizeof(freeptr_t
))))
237 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
240 err
= do_kmem_cache_create(s
, name
, object_size
, args
, flags
);
245 list_add(&s
->list
, &slab_caches
);
249 kmem_cache_free(kmem_cache
, s
);
255 * __kmem_cache_create_args - Create a kmem cache.
256 * @name: A string which is used in /proc/slabinfo to identify this cache.
257 * @object_size: The size of objects to be created in this cache.
258 * @args: Additional arguments for the cache creation (see
259 * &struct kmem_cache_args).
260 * @flags: See %SLAB_* flags for an explanation of individual @flags.
262 * Not to be called directly, use the kmem_cache_create() wrapper with the same
265 * Context: Cannot be called within a interrupt, but can be interrupted.
267 * Return: a pointer to the cache on success, NULL on failure.
269 struct kmem_cache
*__kmem_cache_create_args(const char *name
,
270 unsigned int object_size
,
271 struct kmem_cache_args
*args
,
274 struct kmem_cache
*s
= NULL
;
275 const char *cache_name
;
278 #ifdef CONFIG_SLUB_DEBUG
280 * If no slab_debug was enabled globally, the static key is not yet
281 * enabled by setup_slub_debug(). Enable it if the cache is being
282 * created with any of the debugging flags passed explicitly.
283 * It's also possible that this is the first cache created with
284 * SLAB_STORE_USER and we should init stack_depot for it.
286 if (flags
& SLAB_DEBUG_FLAGS
)
287 static_branch_enable(&slub_debug_enabled
);
288 if (flags
& SLAB_STORE_USER
)
292 mutex_lock(&slab_mutex
);
294 err
= kmem_cache_sanity_check(name
, object_size
);
299 /* Refuse requests with allocator specific flags */
300 if (flags
& ~SLAB_FLAGS_PERMITTED
) {
306 * Some allocators will constraint the set of valid flags to a subset
307 * of all flags. We expect them to define CACHE_CREATE_MASK in this
308 * case, and we'll just provide them with a sanitized version of the
311 flags
&= CACHE_CREATE_MASK
;
313 /* Fail closed on bad usersize of useroffset values. */
314 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY
) ||
315 WARN_ON(!args
->usersize
&& args
->useroffset
) ||
316 WARN_ON(object_size
< args
->usersize
||
317 object_size
- args
->usersize
< args
->useroffset
))
318 args
->usersize
= args
->useroffset
= 0;
321 s
= __kmem_cache_alias(name
, object_size
, args
->align
, flags
,
326 cache_name
= kstrdup_const(name
, GFP_KERNEL
);
332 args
->align
= calculate_alignment(flags
, args
->align
, object_size
);
333 s
= create_cache(cache_name
, object_size
, args
, flags
);
336 kfree_const(cache_name
);
340 mutex_unlock(&slab_mutex
);
343 if (flags
& SLAB_PANIC
)
344 panic("%s: Failed to create slab '%s'. Error %d\n",
345 __func__
, name
, err
);
347 pr_warn("%s(%s) failed with error %d\n",
348 __func__
, name
, err
);
355 EXPORT_SYMBOL(__kmem_cache_create_args
);
357 static struct kmem_cache
*kmem_buckets_cache __ro_after_init
;
360 * kmem_buckets_create - Create a set of caches that handle dynamic sized
361 * allocations via kmem_buckets_alloc()
362 * @name: A prefix string which is used in /proc/slabinfo to identify this
363 * cache. The individual caches with have their sizes as the suffix.
364 * @flags: SLAB flags (see kmem_cache_create() for details).
365 * @useroffset: Starting offset within an allocation that may be copied
367 * @usersize: How many bytes, starting at @useroffset, may be copied
369 * @ctor: A constructor for the objects, run when new allocations are made.
371 * Cannot be called within an interrupt, but can be interrupted.
373 * Return: a pointer to the cache on success, NULL on failure. When
374 * CONFIG_SLAB_BUCKETS is not enabled, ZERO_SIZE_PTR is returned, and
375 * subsequent calls to kmem_buckets_alloc() will fall back to kmalloc().
376 * (i.e. callers only need to check for NULL on failure.)
378 kmem_buckets
*kmem_buckets_create(const char *name
, slab_flags_t flags
,
379 unsigned int useroffset
,
380 unsigned int usersize
,
381 void (*ctor
)(void *))
387 * When the separate buckets API is not built in, just return
388 * a non-NULL value for the kmem_buckets pointer, which will be
389 * unused when performing allocations.
391 if (!IS_ENABLED(CONFIG_SLAB_BUCKETS
))
392 return ZERO_SIZE_PTR
;
394 if (WARN_ON(!kmem_buckets_cache
))
397 b
= kmem_cache_alloc(kmem_buckets_cache
, GFP_KERNEL
|__GFP_ZERO
);
401 flags
|= SLAB_NO_MERGE
;
403 for (idx
= 0; idx
< ARRAY_SIZE(kmalloc_caches
[KMALLOC_NORMAL
]); idx
++) {
404 char *short_size
, *cache_name
;
405 unsigned int cache_useroffset
, cache_usersize
;
408 if (!kmalloc_caches
[KMALLOC_NORMAL
][idx
])
411 size
= kmalloc_caches
[KMALLOC_NORMAL
][idx
]->object_size
;
415 short_size
= strchr(kmalloc_caches
[KMALLOC_NORMAL
][idx
]->name
, '-');
416 if (WARN_ON(!short_size
))
419 cache_name
= kasprintf(GFP_KERNEL
, "%s-%s", name
, short_size
+ 1);
420 if (WARN_ON(!cache_name
))
423 if (useroffset
>= size
) {
424 cache_useroffset
= 0;
427 cache_useroffset
= useroffset
;
428 cache_usersize
= min(size
- cache_useroffset
, usersize
);
430 (*b
)[idx
] = kmem_cache_create_usercopy(cache_name
, size
,
431 0, flags
, cache_useroffset
,
432 cache_usersize
, ctor
);
434 if (WARN_ON(!(*b
)[idx
]))
441 for (idx
= 0; idx
< ARRAY_SIZE(kmalloc_caches
[KMALLOC_NORMAL
]); idx
++)
442 kmem_cache_destroy((*b
)[idx
]);
443 kmem_cache_free(kmem_buckets_cache
, b
);
447 EXPORT_SYMBOL(kmem_buckets_create
);
450 * For a given kmem_cache, kmem_cache_destroy() should only be called
451 * once or there will be a use-after-free problem. The actual deletion
452 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
453 * protection. So they are now done without holding those locks.
455 static void kmem_cache_release(struct kmem_cache
*s
)
457 kfence_shutdown_cache(s
);
458 if (__is_defined(SLAB_SUPPORTS_SYSFS
) && slab_state
>= FULL
)
459 sysfs_slab_release(s
);
461 slab_kmem_cache_release(s
);
464 void slab_kmem_cache_release(struct kmem_cache
*s
)
466 __kmem_cache_release(s
);
467 kfree_const(s
->name
);
468 kmem_cache_free(kmem_cache
, s
);
471 void kmem_cache_destroy(struct kmem_cache
*s
)
475 if (unlikely(!s
) || !kasan_check_byte(s
))
478 /* in-flight kfree_rcu()'s may include objects from our cache */
479 kvfree_rcu_barrier();
481 if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG
) &&
482 (s
->flags
& SLAB_TYPESAFE_BY_RCU
)) {
484 * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
485 * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
486 * defer their freeing with call_rcu().
487 * Wait for such call_rcu() invocations here before actually
488 * destroying the cache.
490 * It doesn't matter that we haven't looked at the slab refcount
491 * yet - slabs with SLAB_TYPESAFE_BY_RCU can't be merged, so
492 * the refcount should be 1 here.
498 mutex_lock(&slab_mutex
);
502 mutex_unlock(&slab_mutex
);
507 /* free asan quarantined objects */
508 kasan_cache_shutdown(s
);
510 err
= __kmem_cache_shutdown(s
);
511 if (!slab_in_kunit_test())
512 WARN(err
, "%s %s: Slab cache still has objects when called from %pS",
513 __func__
, s
->name
, (void *)_RET_IP_
);
517 mutex_unlock(&slab_mutex
);
520 if (slab_state
>= FULL
)
521 sysfs_slab_unlink(s
);
522 debugfs_slab_release(s
);
527 if (s
->flags
& SLAB_TYPESAFE_BY_RCU
)
530 kmem_cache_release(s
);
532 EXPORT_SYMBOL(kmem_cache_destroy
);
535 * kmem_cache_shrink - Shrink a cache.
536 * @cachep: The cache to shrink.
538 * Releases as many slabs as possible for a cache.
539 * To help debugging, a zero exit status indicates all slabs were released.
541 * Return: %0 if all slabs were released, non-zero otherwise
543 int kmem_cache_shrink(struct kmem_cache
*cachep
)
545 kasan_cache_shrink(cachep
);
547 return __kmem_cache_shrink(cachep
);
549 EXPORT_SYMBOL(kmem_cache_shrink
);
551 bool slab_is_available(void)
553 return slab_state
>= UP
;
557 static void kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
)
559 if (__kfence_obj_info(kpp
, object
, slab
))
561 __kmem_obj_info(kpp
, object
, slab
);
565 * kmem_dump_obj - Print available slab provenance information
566 * @object: slab object for which to find provenance information.
568 * This function uses pr_cont(), so that the caller is expected to have
569 * printed out whatever preamble is appropriate. The provenance information
570 * depends on the type of object and on how much debugging is enabled.
571 * For a slab-cache object, the fact that it is a slab object is printed,
572 * and, if available, the slab name, return address, and stack trace from
573 * the allocation and last free path of that object.
575 * Return: %true if the pointer is to a not-yet-freed object from
576 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
577 * is to an already-freed object, and %false otherwise.
579 bool kmem_dump_obj(void *object
)
581 char *cp
= IS_ENABLED(CONFIG_MMU
) ? "" : "/vmalloc";
584 unsigned long ptroffset
;
585 struct kmem_obj_info kp
= { };
587 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
588 if (object
< (void *)PAGE_SIZE
|| !virt_addr_valid(object
))
590 slab
= virt_to_slab(object
);
594 kmem_obj_info(&kp
, object
, slab
);
595 if (kp
.kp_slab_cache
)
596 pr_cont(" slab%s %s", cp
, kp
.kp_slab_cache
->name
);
598 pr_cont(" slab%s", cp
);
599 if (is_kfence_address(object
))
600 pr_cont(" (kfence)");
602 pr_cont(" start %px", kp
.kp_objp
);
603 if (kp
.kp_data_offset
)
604 pr_cont(" data offset %lu", kp
.kp_data_offset
);
606 ptroffset
= ((char *)object
- (char *)kp
.kp_objp
) - kp
.kp_data_offset
;
607 pr_cont(" pointer offset %lu", ptroffset
);
609 if (kp
.kp_slab_cache
&& kp
.kp_slab_cache
->object_size
)
610 pr_cont(" size %u", kp
.kp_slab_cache
->object_size
);
612 pr_cont(" allocated at %pS\n", kp
.kp_ret
);
615 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_stack
); i
++) {
618 pr_info(" %pS\n", kp
.kp_stack
[i
]);
621 if (kp
.kp_free_stack
[0])
622 pr_cont(" Free path:\n");
624 for (i
= 0; i
< ARRAY_SIZE(kp
.kp_free_stack
); i
++) {
625 if (!kp
.kp_free_stack
[i
])
627 pr_info(" %pS\n", kp
.kp_free_stack
[i
]);
632 EXPORT_SYMBOL_GPL(kmem_dump_obj
);
635 /* Create a cache during boot when no slab services are available yet */
636 void __init
create_boot_cache(struct kmem_cache
*s
, const char *name
,
637 unsigned int size
, slab_flags_t flags
,
638 unsigned int useroffset
, unsigned int usersize
)
641 unsigned int align
= ARCH_KMALLOC_MINALIGN
;
642 struct kmem_cache_args kmem_args
= {};
645 * kmalloc caches guarantee alignment of at least the largest
646 * power-of-two divisor of the size. For power-of-two sizes,
647 * it is the size itself.
649 if (flags
& SLAB_KMALLOC
)
650 align
= max(align
, 1U << (ffs(size
) - 1));
651 kmem_args
.align
= calculate_alignment(flags
, align
, size
);
653 #ifdef CONFIG_HARDENED_USERCOPY
654 kmem_args
.useroffset
= useroffset
;
655 kmem_args
.usersize
= usersize
;
658 err
= do_kmem_cache_create(s
, name
, size
, &kmem_args
, flags
);
661 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
664 s
->refcount
= -1; /* Exempt from merging for now */
667 static struct kmem_cache
*__init
create_kmalloc_cache(const char *name
,
671 struct kmem_cache
*s
= kmem_cache_zalloc(kmem_cache
, GFP_NOWAIT
);
674 panic("Out of memory when creating slab %s\n", name
);
676 create_boot_cache(s
, name
, size
, flags
| SLAB_KMALLOC
, 0, size
);
677 list_add(&s
->list
, &slab_caches
);
682 kmem_buckets kmalloc_caches
[NR_KMALLOC_TYPES
] __ro_after_init
=
683 { /* initialization for https://llvm.org/pr42570 */ };
684 EXPORT_SYMBOL(kmalloc_caches
);
686 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
687 unsigned long random_kmalloc_seed __ro_after_init
;
688 EXPORT_SYMBOL(random_kmalloc_seed
);
692 * Conversion table for small slabs sizes / 8 to the index in the
693 * kmalloc array. This is necessary for slabs < 192 since we have non power
694 * of two cache sizes there. The size of larger slabs can be determined using
697 u8 kmalloc_size_index
[24] __ro_after_init
= {
724 size_t kmalloc_size_roundup(size_t size
)
726 if (size
&& size
<= KMALLOC_MAX_CACHE_SIZE
) {
728 * The flags don't matter since size_index is common to all.
729 * Neither does the caller for just getting ->object_size.
731 return kmalloc_slab(size
, NULL
, GFP_KERNEL
, 0)->object_size
;
734 /* Above the smaller buckets, size is a multiple of page size. */
735 if (size
&& size
<= KMALLOC_MAX_SIZE
)
736 return PAGE_SIZE
<< get_order(size
);
739 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
740 * and very large size - kmalloc() may fail.
745 EXPORT_SYMBOL(kmalloc_size_roundup
);
747 #ifdef CONFIG_ZONE_DMA
748 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
750 #define KMALLOC_DMA_NAME(sz)
754 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
756 #define KMALLOC_CGROUP_NAME(sz)
759 #ifndef CONFIG_SLUB_TINY
760 #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
762 #define KMALLOC_RCL_NAME(sz)
765 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
766 #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
767 #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
768 #define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz,
769 #define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz,
770 #define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz,
771 #define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz,
772 #define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz,
773 #define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz,
774 #define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz,
775 #define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz,
776 #define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz,
777 #define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
778 #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
779 #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
780 #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
781 #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
782 #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
783 #else // CONFIG_RANDOM_KMALLOC_CACHES
784 #define KMALLOC_RANDOM_NAME(N, sz)
787 #define INIT_KMALLOC_INFO(__size, __short_size) \
789 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
790 KMALLOC_RCL_NAME(__short_size) \
791 KMALLOC_CGROUP_NAME(__short_size) \
792 KMALLOC_DMA_NAME(__short_size) \
793 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
798 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
799 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
802 const struct kmalloc_info_struct kmalloc_info
[] __initconst
= {
803 INIT_KMALLOC_INFO(0, 0),
804 INIT_KMALLOC_INFO(96, 96),
805 INIT_KMALLOC_INFO(192, 192),
806 INIT_KMALLOC_INFO(8, 8),
807 INIT_KMALLOC_INFO(16, 16),
808 INIT_KMALLOC_INFO(32, 32),
809 INIT_KMALLOC_INFO(64, 64),
810 INIT_KMALLOC_INFO(128, 128),
811 INIT_KMALLOC_INFO(256, 256),
812 INIT_KMALLOC_INFO(512, 512),
813 INIT_KMALLOC_INFO(1024, 1k
),
814 INIT_KMALLOC_INFO(2048, 2k
),
815 INIT_KMALLOC_INFO(4096, 4k
),
816 INIT_KMALLOC_INFO(8192, 8k
),
817 INIT_KMALLOC_INFO(16384, 16k
),
818 INIT_KMALLOC_INFO(32768, 32k
),
819 INIT_KMALLOC_INFO(65536, 64k
),
820 INIT_KMALLOC_INFO(131072, 128k
),
821 INIT_KMALLOC_INFO(262144, 256k
),
822 INIT_KMALLOC_INFO(524288, 512k
),
823 INIT_KMALLOC_INFO(1048576, 1M
),
824 INIT_KMALLOC_INFO(2097152, 2M
)
828 * Patch up the size_index table if we have strange large alignment
829 * requirements for the kmalloc array. This is only the case for
830 * MIPS it seems. The standard arches will not generate any code here.
832 * Largest permitted alignment is 256 bytes due to the way we
833 * handle the index determination for the smaller caches.
835 * Make sure that nothing crazy happens if someone starts tinkering
836 * around with ARCH_KMALLOC_MINALIGN
838 void __init
setup_kmalloc_cache_index_table(void)
842 BUILD_BUG_ON(KMALLOC_MIN_SIZE
> 256 ||
843 !is_power_of_2(KMALLOC_MIN_SIZE
));
845 for (i
= 8; i
< KMALLOC_MIN_SIZE
; i
+= 8) {
846 unsigned int elem
= size_index_elem(i
);
848 if (elem
>= ARRAY_SIZE(kmalloc_size_index
))
850 kmalloc_size_index
[elem
] = KMALLOC_SHIFT_LOW
;
853 if (KMALLOC_MIN_SIZE
>= 64) {
855 * The 96 byte sized cache is not used if the alignment
858 for (i
= 64 + 8; i
<= 96; i
+= 8)
859 kmalloc_size_index
[size_index_elem(i
)] = 7;
863 if (KMALLOC_MIN_SIZE
>= 128) {
865 * The 192 byte sized cache is not used if the alignment
866 * is 128 byte. Redirect kmalloc to use the 256 byte cache
869 for (i
= 128 + 8; i
<= 192; i
+= 8)
870 kmalloc_size_index
[size_index_elem(i
)] = 8;
874 static unsigned int __kmalloc_minalign(void)
876 unsigned int minalign
= dma_get_cache_alignment();
878 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
) &&
879 is_swiotlb_allocated())
880 minalign
= ARCH_KMALLOC_MINALIGN
;
882 return max(minalign
, arch_slab_minalign());
886 new_kmalloc_cache(int idx
, enum kmalloc_cache_type type
)
888 slab_flags_t flags
= 0;
889 unsigned int minalign
= __kmalloc_minalign();
890 unsigned int aligned_size
= kmalloc_info
[idx
].size
;
891 int aligned_idx
= idx
;
893 if ((KMALLOC_RECLAIM
!= KMALLOC_NORMAL
) && (type
== KMALLOC_RECLAIM
)) {
894 flags
|= SLAB_RECLAIM_ACCOUNT
;
895 } else if (IS_ENABLED(CONFIG_MEMCG
) && (type
== KMALLOC_CGROUP
)) {
896 if (mem_cgroup_kmem_disabled()) {
897 kmalloc_caches
[type
][idx
] = kmalloc_caches
[KMALLOC_NORMAL
][idx
];
900 flags
|= SLAB_ACCOUNT
;
901 } else if (IS_ENABLED(CONFIG_ZONE_DMA
) && (type
== KMALLOC_DMA
)) {
902 flags
|= SLAB_CACHE_DMA
;
905 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
906 if (type
>= KMALLOC_RANDOM_START
&& type
<= KMALLOC_RANDOM_END
)
907 flags
|= SLAB_NO_MERGE
;
911 * If CONFIG_MEMCG is enabled, disable cache merging for
912 * KMALLOC_NORMAL caches.
914 if (IS_ENABLED(CONFIG_MEMCG
) && (type
== KMALLOC_NORMAL
))
915 flags
|= SLAB_NO_MERGE
;
917 if (minalign
> ARCH_KMALLOC_MINALIGN
) {
918 aligned_size
= ALIGN(aligned_size
, minalign
);
919 aligned_idx
= __kmalloc_index(aligned_size
, false);
922 if (!kmalloc_caches
[type
][aligned_idx
])
923 kmalloc_caches
[type
][aligned_idx
] = create_kmalloc_cache(
924 kmalloc_info
[aligned_idx
].name
[type
],
925 aligned_size
, flags
);
926 if (idx
!= aligned_idx
)
927 kmalloc_caches
[type
][idx
] = kmalloc_caches
[type
][aligned_idx
];
931 * Create the kmalloc array. Some of the regular kmalloc arrays
932 * may already have been created because they were needed to
933 * enable allocations for slab creation.
935 void __init
create_kmalloc_caches(void)
938 enum kmalloc_cache_type type
;
941 * Including KMALLOC_CGROUP if CONFIG_MEMCG defined
943 for (type
= KMALLOC_NORMAL
; type
< NR_KMALLOC_TYPES
; type
++) {
944 /* Caches that are NOT of the two-to-the-power-of size. */
945 if (KMALLOC_MIN_SIZE
<= 32)
946 new_kmalloc_cache(1, type
);
947 if (KMALLOC_MIN_SIZE
<= 64)
948 new_kmalloc_cache(2, type
);
950 /* Caches that are of the two-to-the-power-of size. */
951 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++)
952 new_kmalloc_cache(i
, type
);
954 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
955 random_kmalloc_seed
= get_random_u64();
958 /* Kmalloc array is now usable */
961 if (IS_ENABLED(CONFIG_SLAB_BUCKETS
))
962 kmem_buckets_cache
= kmem_cache_create("kmalloc_buckets",
963 sizeof(kmem_buckets
),
964 0, SLAB_NO_MERGE
, NULL
);
968 * __ksize -- Report full size of underlying allocation
969 * @object: pointer to the object
971 * This should only be used internally to query the true size of allocations.
972 * It is not meant to be a way to discover the usable size of an allocation
973 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
974 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
975 * and/or FORTIFY_SOURCE.
977 * Return: size of the actual memory used by @object in bytes
979 size_t __ksize(const void *object
)
983 if (unlikely(object
== ZERO_SIZE_PTR
))
986 folio
= virt_to_folio(object
);
988 if (unlikely(!folio_test_slab(folio
))) {
989 if (WARN_ON(folio_size(folio
) <= KMALLOC_MAX_CACHE_SIZE
))
991 if (WARN_ON(object
!= folio_address(folio
)))
993 return folio_size(folio
);
996 #ifdef CONFIG_SLUB_DEBUG
997 skip_orig_size_check(folio_slab(folio
)->slab_cache
, object
);
1000 return slab_ksize(folio_slab(folio
)->slab_cache
);
1003 gfp_t
kmalloc_fix_flags(gfp_t flags
)
1005 gfp_t invalid_mask
= flags
& GFP_SLAB_BUG_MASK
;
1007 flags
&= ~GFP_SLAB_BUG_MASK
;
1008 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1009 invalid_mask
, &invalid_mask
, flags
, &flags
);
1015 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1016 /* Randomize a generic freelist */
1017 static void freelist_randomize(unsigned int *list
,
1023 for (i
= 0; i
< count
; i
++)
1026 /* Fisher-Yates shuffle */
1027 for (i
= count
- 1; i
> 0; i
--) {
1028 rand
= get_random_u32_below(i
+ 1);
1029 swap(list
[i
], list
[rand
]);
1033 /* Create a random sequence per cache */
1034 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
1038 if (count
< 2 || cachep
->random_seq
)
1041 cachep
->random_seq
= kcalloc(count
, sizeof(unsigned int), gfp
);
1042 if (!cachep
->random_seq
)
1045 freelist_randomize(cachep
->random_seq
, count
);
1049 /* Destroy the per-cache random freelist sequence */
1050 void cache_random_seq_destroy(struct kmem_cache
*cachep
)
1052 kfree(cachep
->random_seq
);
1053 cachep
->random_seq
= NULL
;
1055 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1057 #ifdef CONFIG_SLUB_DEBUG
1058 #define SLABINFO_RIGHTS (0400)
1060 static void print_slabinfo_header(struct seq_file
*m
)
1063 * Output format version, so at least we can change it
1064 * without _too_ many complaints.
1066 seq_puts(m
, "slabinfo - version: 2.1\n");
1067 seq_puts(m
, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1068 seq_puts(m
, " : tunables <limit> <batchcount> <sharedfactor>");
1069 seq_puts(m
, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1073 static void *slab_start(struct seq_file
*m
, loff_t
*pos
)
1075 mutex_lock(&slab_mutex
);
1076 return seq_list_start(&slab_caches
, *pos
);
1079 static void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1081 return seq_list_next(p
, &slab_caches
, pos
);
1084 static void slab_stop(struct seq_file
*m
, void *p
)
1086 mutex_unlock(&slab_mutex
);
1089 static void cache_show(struct kmem_cache
*s
, struct seq_file
*m
)
1091 struct slabinfo sinfo
;
1093 memset(&sinfo
, 0, sizeof(sinfo
));
1094 get_slabinfo(s
, &sinfo
);
1096 seq_printf(m
, "%-17s %6lu %6lu %6u %4u %4d",
1097 s
->name
, sinfo
.active_objs
, sinfo
.num_objs
, s
->size
,
1098 sinfo
.objects_per_slab
, (1 << sinfo
.cache_order
));
1100 seq_printf(m
, " : tunables %4u %4u %4u",
1101 sinfo
.limit
, sinfo
.batchcount
, sinfo
.shared
);
1102 seq_printf(m
, " : slabdata %6lu %6lu %6lu",
1103 sinfo
.active_slabs
, sinfo
.num_slabs
, sinfo
.shared_avail
);
1107 static int slab_show(struct seq_file
*m
, void *p
)
1109 struct kmem_cache
*s
= list_entry(p
, struct kmem_cache
, list
);
1111 if (p
== slab_caches
.next
)
1112 print_slabinfo_header(m
);
1117 void dump_unreclaimable_slab(void)
1119 struct kmem_cache
*s
;
1120 struct slabinfo sinfo
;
1123 * Here acquiring slab_mutex is risky since we don't prefer to get
1124 * sleep in oom path. But, without mutex hold, it may introduce a
1126 * Use mutex_trylock to protect the list traverse, dump nothing
1127 * without acquiring the mutex.
1129 if (!mutex_trylock(&slab_mutex
)) {
1130 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1134 pr_info("Unreclaimable slab info:\n");
1135 pr_info("Name Used Total\n");
1137 list_for_each_entry(s
, &slab_caches
, list
) {
1138 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
1141 get_slabinfo(s
, &sinfo
);
1143 if (sinfo
.num_objs
> 0)
1144 pr_info("%-17s %10luKB %10luKB\n", s
->name
,
1145 (sinfo
.active_objs
* s
->size
) / 1024,
1146 (sinfo
.num_objs
* s
->size
) / 1024);
1148 mutex_unlock(&slab_mutex
);
1152 * slabinfo_op - iterator that generates /proc/slabinfo
1161 * num-pages-per-slab
1162 * + further values on SMP and with statistics enabled
1164 static const struct seq_operations slabinfo_op
= {
1165 .start
= slab_start
,
1171 static int slabinfo_open(struct inode
*inode
, struct file
*file
)
1173 return seq_open(file
, &slabinfo_op
);
1176 static const struct proc_ops slabinfo_proc_ops
= {
1177 .proc_flags
= PROC_ENTRY_PERMANENT
,
1178 .proc_open
= slabinfo_open
,
1179 .proc_read
= seq_read
,
1180 .proc_lseek
= seq_lseek
,
1181 .proc_release
= seq_release
,
1184 static int __init
slab_proc_init(void)
1186 proc_create("slabinfo", SLABINFO_RIGHTS
, NULL
, &slabinfo_proc_ops
);
1189 module_init(slab_proc_init
);
1191 #endif /* CONFIG_SLUB_DEBUG */
1193 static __always_inline
__realloc_size(2) void *
1194 __do_krealloc(const void *p
, size_t new_size
, gfp_t flags
)
1199 /* Check for double-free before calling ksize. */
1200 if (likely(!ZERO_OR_NULL_PTR(p
))) {
1201 if (!kasan_check_byte(p
))
1207 /* If the object still fits, repoison it precisely. */
1208 if (ks
>= new_size
) {
1209 /* Zero out spare memory. */
1210 if (want_init_on_alloc(flags
)) {
1211 kasan_disable_current();
1212 memset((void *)p
+ new_size
, 0, ks
- new_size
);
1213 kasan_enable_current();
1216 p
= kasan_krealloc((void *)p
, new_size
, flags
);
1220 ret
= kmalloc_node_track_caller_noprof(new_size
, flags
, NUMA_NO_NODE
, _RET_IP_
);
1222 /* Disable KASAN checks as the object's redzone is accessed. */
1223 kasan_disable_current();
1224 memcpy(ret
, kasan_reset_tag(p
), ks
);
1225 kasan_enable_current();
1232 * krealloc - reallocate memory. The contents will remain unchanged.
1233 * @p: object to reallocate memory for.
1234 * @new_size: how many bytes of memory are required.
1235 * @flags: the type of memory to allocate.
1237 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1238 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1240 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
1241 * initial memory allocation, every subsequent call to this API for the same
1242 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
1243 * __GFP_ZERO is not fully honored by this API.
1245 * This is the case, since krealloc() only knows about the bucket size of an
1246 * allocation (but not the exact size it was allocated with) and hence
1247 * implements the following semantics for shrinking and growing buffers with
1252 * |--------|----------------|
1255 * In any case, the contents of the object pointed to are preserved up to the
1256 * lesser of the new and old sizes.
1258 * Return: pointer to the allocated memory or %NULL in case of error
1260 void *krealloc_noprof(const void *p
, size_t new_size
, gfp_t flags
)
1264 if (unlikely(!new_size
)) {
1266 return ZERO_SIZE_PTR
;
1269 ret
= __do_krealloc(p
, new_size
, flags
);
1270 if (ret
&& kasan_reset_tag(p
) != kasan_reset_tag(ret
))
1275 EXPORT_SYMBOL(krealloc_noprof
);
1278 * kfree_sensitive - Clear sensitive information in memory before freeing
1279 * @p: object to free memory of
1281 * The memory of the object @p points to is zeroed before freed.
1282 * If @p is %NULL, kfree_sensitive() does nothing.
1284 * Note: this function zeroes the whole allocated buffer which can be a good
1285 * deal bigger than the requested buffer size passed to kmalloc(). So be
1286 * careful when using this function in performance sensitive code.
1288 void kfree_sensitive(const void *p
)
1291 void *mem
= (void *)p
;
1295 kasan_unpoison_range(mem
, ks
);
1296 memzero_explicit(mem
, ks
);
1300 EXPORT_SYMBOL(kfree_sensitive
);
1302 size_t ksize(const void *objp
)
1305 * We need to first check that the pointer to the object is valid.
1306 * The KASAN report printed from ksize() is more useful, then when
1307 * it's printed later when the behaviour could be undefined due to
1308 * a potential use-after-free or double-free.
1310 * We use kasan_check_byte(), which is supported for the hardware
1311 * tag-based KASAN mode, unlike kasan_check_read/write().
1313 * If the pointed to memory is invalid, we return 0 to avoid users of
1314 * ksize() writing to and potentially corrupting the memory region.
1316 * We want to perform the check before __ksize(), to avoid potentially
1317 * crashing in __ksize() due to accessing invalid metadata.
1319 if (unlikely(ZERO_OR_NULL_PTR(objp
)) || !kasan_check_byte(objp
))
1322 return kfence_ksize(objp
) ?: __ksize(objp
);
1324 EXPORT_SYMBOL(ksize
);
1326 /* Tracepoints definitions. */
1327 EXPORT_TRACEPOINT_SYMBOL(kmalloc
);
1328 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc
);
1329 EXPORT_TRACEPOINT_SYMBOL(kfree
);
1330 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free
);