2 * Slab allocator functions that are independent of the allocator strategy
4 * (C) 2012 Christoph Lameter <cl@linux.com>
6 #include <linux/slab.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
22 enum slab_state slab_state
;
23 LIST_HEAD(slab_caches
);
24 DEFINE_MUTEX(slab_mutex
);
25 struct kmem_cache
*kmem_cache
;
27 #ifdef CONFIG_DEBUG_VM
28 static int kmem_cache_sanity_check(const char *name
, size_t size
)
30 struct kmem_cache
*s
= NULL
;
32 if (!name
|| in_interrupt() || size
< sizeof(void *) ||
33 size
> KMALLOC_MAX_SIZE
) {
34 pr_err("kmem_cache_create(%s) integrity check failed\n", name
);
38 list_for_each_entry(s
, &slab_caches
, list
) {
43 * This happens when the module gets unloaded and doesn't
44 * destroy its slab cache and no-one else reuses the vmalloc
45 * area of the module. Print a warning.
47 res
= probe_kernel_address(s
->name
, tmp
);
49 pr_err("Slab cache with size %d has lost its name\n",
54 if (!strcmp(s
->name
, name
)) {
55 pr_err("%s (%s): Cache name already exists.\n",
63 WARN_ON(strchr(name
, ' ')); /* It confuses parsers */
67 static inline int kmem_cache_sanity_check(const char *name
, size_t size
)
74 * kmem_cache_create - Create a cache.
75 * @name: A string which is used in /proc/slabinfo to identify this cache.
76 * @size: The size of objects to be created in this cache.
77 * @align: The required alignment for the objects.
79 * @ctor: A constructor for the objects.
81 * Returns a ptr to the cache on success, NULL on failure.
82 * Cannot be called within a interrupt, but can be interrupted.
83 * The @ctor is run when new pages are allocated by the cache.
87 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
88 * to catch references to uninitialised memory.
90 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
91 * for buffer overruns.
93 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
94 * cacheline. This can be beneficial if you're counting cycles as closely
98 struct kmem_cache
*kmem_cache_create(const char *name
, size_t size
, size_t align
,
99 unsigned long flags
, void (*ctor
)(void *))
101 struct kmem_cache
*s
= NULL
;
105 mutex_lock(&slab_mutex
);
107 if (!kmem_cache_sanity_check(name
, size
) == 0)
111 s
= __kmem_cache_alias(name
, size
, align
, flags
, ctor
);
115 s
= kmem_cache_zalloc(kmem_cache
, GFP_KERNEL
);
117 s
->object_size
= s
->size
= size
;
120 s
->name
= kstrdup(name
, GFP_KERNEL
);
122 kmem_cache_free(kmem_cache
, s
);
127 err
= __kmem_cache_create(s
, flags
);
131 list_add(&s
->list
, &slab_caches
);
135 kmem_cache_free(kmem_cache
, s
);
141 mutex_unlock(&slab_mutex
);
146 if (flags
& SLAB_PANIC
)
147 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
150 printk(KERN_WARNING
"kmem_cache_create(%s) failed with error %d",
160 EXPORT_SYMBOL(kmem_cache_create
);
162 void kmem_cache_destroy(struct kmem_cache
*s
)
165 mutex_lock(&slab_mutex
);
170 if (!__kmem_cache_shutdown(s
)) {
171 mutex_unlock(&slab_mutex
);
172 if (s
->flags
& SLAB_DESTROY_BY_RCU
)
176 kmem_cache_free(kmem_cache
, s
);
178 list_add(&s
->list
, &slab_caches
);
179 mutex_unlock(&slab_mutex
);
180 printk(KERN_ERR
"kmem_cache_destroy %s: Slab cache still has objects\n",
185 mutex_unlock(&slab_mutex
);
189 EXPORT_SYMBOL(kmem_cache_destroy
);
191 int slab_is_available(void)
193 return slab_state
>= UP
;