2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
12 #include <linux/module.h>
13 #include <linux/bit_spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/slab.h>
17 #include <linux/seq_file.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuset.h>
20 #include <linux/mempolicy.h>
21 #include <linux/ctype.h>
22 #include <linux/kallsyms.h>
29 * The slab_lock protects operations on the object of a particular
30 * slab and its metadata in the page struct. If the slab lock
31 * has been taken then no allocations nor frees can be performed
32 * on the objects in the slab nor can the slab be added or removed
33 * from the partial or full lists since this would mean modifying
34 * the page_struct of the slab.
36 * The list_lock protects the partial and full list on each node and
37 * the partial slab counter. If taken then no new slabs may be added or
38 * removed from the lists nor make the number of partial slabs be modified.
39 * (Note that the total number of slabs is an atomic value that may be
40 * modified without taking the list lock).
42 * The list_lock is a centralized lock and thus we avoid taking it as
43 * much as possible. As long as SLUB does not have to handle partial
44 * slabs, operations can continue without any centralized lock. F.e.
45 * allocating a long series of objects that fill up slabs does not require
48 * The lock order is sometimes inverted when we are trying to get a slab
49 * off a list. We take the list_lock and then look for a page on the list
50 * to use. While we do that objects in the slabs may be freed. We can
51 * only operate on the slab if we have also taken the slab_lock. So we use
52 * a slab_trylock() on the slab. If trylock was successful then no frees
53 * can occur anymore and we can use the slab for allocations etc. If the
54 * slab_trylock() does not succeed then frees are in progress in the slab and
55 * we must stay away from it for a while since we may cause a bouncing
56 * cacheline if we try to acquire the lock. So go onto the next slab.
57 * If all pages are busy then we may allocate a new slab instead of reusing
58 * a partial slab. A new slab has noone operating on it and thus there is
59 * no danger of cacheline contention.
61 * Interrupts are disabled during allocation and deallocation in order to
62 * make the slab allocator safe to use in the context of an irq. In addition
63 * interrupts are disabled to ensure that the processor does not change
64 * while handling per_cpu slabs, due to kernel preemption.
66 * SLUB assigns one slab for allocation to each processor.
67 * Allocations only occur from these slabs called cpu slabs.
69 * Slabs with free elements are kept on a partial list and during regular
70 * operations no list for full slabs is used. If an object in a full slab is
71 * freed then the slab will show up again on the partial lists.
72 * We track full slabs for debugging purposes though because otherwise we
73 * cannot scan all objects.
75 * Slabs are freed when they become empty. Teardown and setup is
76 * minimal so we rely on the page allocators per cpu caches for
77 * fast frees and allocs.
79 * Overloading of page flags that are otherwise used for LRU management.
81 * PageActive The slab is frozen and exempt from list processing.
82 * This means that the slab is dedicated to a purpose
83 * such as satisfying allocations for a specific
84 * processor. Objects may be freed in the slab while
85 * it is frozen but slab_free will then skip the usual
86 * list operations. It is up to the processor holding
87 * the slab to integrate the slab into the slab lists
88 * when the slab is no longer needed.
90 * One use of this flag is to mark slabs that are
91 * used for allocations. Then such a slab becomes a cpu
92 * slab. The cpu slab may be equipped with an additional
93 * lockless_freelist that allows lockless access to
94 * free objects in addition to the regular freelist
95 * that requires the slab lock.
97 * PageError Slab requires special handling due to debug
98 * options set. This moves slab handling out of
99 * the fast path and disables lockless freelists.
102 #define FROZEN (1 << PG_active)
104 #ifdef CONFIG_SLUB_DEBUG
105 #define SLABDEBUG (1 << PG_error)
110 static inline int SlabFrozen(struct page
*page
)
112 return page
->flags
& FROZEN
;
115 static inline void SetSlabFrozen(struct page
*page
)
117 page
->flags
|= FROZEN
;
120 static inline void ClearSlabFrozen(struct page
*page
)
122 page
->flags
&= ~FROZEN
;
125 static inline int SlabDebug(struct page
*page
)
127 return page
->flags
& SLABDEBUG
;
130 static inline void SetSlabDebug(struct page
*page
)
132 page
->flags
|= SLABDEBUG
;
135 static inline void ClearSlabDebug(struct page
*page
)
137 page
->flags
&= ~SLABDEBUG
;
141 * Issues still to be resolved:
143 * - The per cpu array is updated for each new slab and and is a remote
144 * cacheline for most nodes. This could become a bouncing cacheline given
145 * enough frequent updates. There are 16 pointers in a cacheline, so at
146 * max 16 cpus could compete for the cacheline which may be okay.
148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
150 * - Variable sizing of the per node arrays
153 /* Enable to test recovery from slab corruption on boot */
154 #undef SLUB_RESILIENCY_TEST
159 * Small page size. Make sure that we do not fragment memory
161 #define DEFAULT_MAX_ORDER 1
162 #define DEFAULT_MIN_OBJECTS 4
167 * Large page machines are customarily able to handle larger
170 #define DEFAULT_MAX_ORDER 2
171 #define DEFAULT_MIN_OBJECTS 8
176 * Mininum number of partial slabs. These will be left on the partial
177 * lists even if they are empty. kmem_cache_shrink may reclaim them.
179 #define MIN_PARTIAL 2
182 * Maximum number of desirable partial slabs.
183 * The existence of more partial slabs makes kmem_cache_shrink
184 * sort the partial list by the number of objects in the.
186 #define MAX_PARTIAL 10
188 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
189 SLAB_POISON | SLAB_STORE_USER)
192 * Set of flags that will prevent slab merging
194 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
195 SLAB_TRACE | SLAB_DESTROY_BY_RCU)
197 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
200 #ifndef ARCH_KMALLOC_MINALIGN
201 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
204 #ifndef ARCH_SLAB_MINALIGN
205 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
208 /* Internal SLUB flags */
209 #define __OBJECT_POISON 0x80000000 /* Poison object */
211 /* Not all arches define cache_line_size */
212 #ifndef cache_line_size
213 #define cache_line_size() L1_CACHE_BYTES
216 static int kmem_size
= sizeof(struct kmem_cache
);
219 static struct notifier_block slab_notifier
;
223 DOWN
, /* No slab functionality available */
224 PARTIAL
, /* kmem_cache_open() works but kmalloc does not */
225 UP
, /* Everything works but does not show up in sysfs */
229 /* A list of all slab caches on the system */
230 static DECLARE_RWSEM(slub_lock
);
231 LIST_HEAD(slab_caches
);
234 * Tracking user of a slab.
237 void *addr
; /* Called from address */
238 int cpu
; /* Was running on cpu */
239 int pid
; /* Pid context */
240 unsigned long when
; /* When did the operation occur */
243 enum track_item
{ TRACK_ALLOC
, TRACK_FREE
};
245 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
246 static int sysfs_slab_add(struct kmem_cache
*);
247 static int sysfs_slab_alias(struct kmem_cache
*, const char *);
248 static void sysfs_slab_remove(struct kmem_cache
*);
250 static int sysfs_slab_add(struct kmem_cache
*s
) { return 0; }
251 static int sysfs_slab_alias(struct kmem_cache
*s
, const char *p
) { return 0; }
252 static void sysfs_slab_remove(struct kmem_cache
*s
) {}
255 /********************************************************************
256 * Core slab cache functions
257 *******************************************************************/
259 int slab_is_available(void)
261 return slab_state
>= UP
;
264 static inline struct kmem_cache_node
*get_node(struct kmem_cache
*s
, int node
)
267 return s
->node
[node
];
269 return &s
->local_node
;
273 static inline int check_valid_pointer(struct kmem_cache
*s
,
274 struct page
*page
, const void *object
)
281 base
= page_address(page
);
282 if (object
< base
|| object
>= base
+ s
->objects
* s
->size
||
283 (object
- base
) % s
->size
) {
291 * Slow version of get and set free pointer.
293 * This version requires touching the cache lines of kmem_cache which
294 * we avoid to do in the fast alloc free paths. There we obtain the offset
295 * from the page struct.
297 static inline void *get_freepointer(struct kmem_cache
*s
, void *object
)
299 return *(void **)(object
+ s
->offset
);
302 static inline void set_freepointer(struct kmem_cache
*s
, void *object
, void *fp
)
304 *(void **)(object
+ s
->offset
) = fp
;
307 /* Loop over all objects in a slab */
308 #define for_each_object(__p, __s, __addr) \
309 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
313 #define for_each_free_object(__p, __s, __free) \
314 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
316 /* Determine object index from a given position */
317 static inline int slab_index(void *p
, struct kmem_cache
*s
, void *addr
)
319 return (p
- addr
) / s
->size
;
322 #ifdef CONFIG_SLUB_DEBUG
326 #ifdef CONFIG_SLUB_DEBUG_ON
327 static int slub_debug
= DEBUG_DEFAULT_FLAGS
;
329 static int slub_debug
;
332 static char *slub_debug_slabs
;
337 static void print_section(char *text
, u8
*addr
, unsigned int length
)
345 for (i
= 0; i
< length
; i
++) {
347 printk(KERN_ERR
"%10s 0x%p: ", text
, addr
+ i
);
350 printk(" %02x", addr
[i
]);
352 ascii
[offset
] = isgraph(addr
[i
]) ? addr
[i
] : '.';
354 printk(" %s\n",ascii
);
365 printk(" %s\n", ascii
);
369 static struct track
*get_track(struct kmem_cache
*s
, void *object
,
370 enum track_item alloc
)
375 p
= object
+ s
->offset
+ sizeof(void *);
377 p
= object
+ s
->inuse
;
382 static void set_track(struct kmem_cache
*s
, void *object
,
383 enum track_item alloc
, void *addr
)
388 p
= object
+ s
->offset
+ sizeof(void *);
390 p
= object
+ s
->inuse
;
395 p
->cpu
= smp_processor_id();
396 p
->pid
= current
? current
->pid
: -1;
399 memset(p
, 0, sizeof(struct track
));
402 static void init_tracking(struct kmem_cache
*s
, void *object
)
404 if (s
->flags
& SLAB_STORE_USER
) {
405 set_track(s
, object
, TRACK_FREE
, NULL
);
406 set_track(s
, object
, TRACK_ALLOC
, NULL
);
410 static void print_track(const char *s
, struct track
*t
)
415 printk(KERN_ERR
"%s: ", s
);
416 __print_symbol("%s", (unsigned long)t
->addr
);
417 printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies
- t
->when
, t
->cpu
, t
->pid
);
420 static void print_trailer(struct kmem_cache
*s
, u8
*p
)
422 unsigned int off
; /* Offset of last byte */
424 if (s
->flags
& SLAB_RED_ZONE
)
425 print_section("Redzone", p
+ s
->objsize
,
426 s
->inuse
- s
->objsize
);
428 printk(KERN_ERR
"FreePointer 0x%p -> 0x%p\n",
430 get_freepointer(s
, p
));
433 off
= s
->offset
+ sizeof(void *);
437 if (s
->flags
& SLAB_STORE_USER
) {
438 print_track("Last alloc", get_track(s
, p
, TRACK_ALLOC
));
439 print_track("Last free ", get_track(s
, p
, TRACK_FREE
));
440 off
+= 2 * sizeof(struct track
);
444 /* Beginning of the filler is the free pointer */
445 print_section("Filler", p
+ off
, s
->size
- off
);
448 static void object_err(struct kmem_cache
*s
, struct page
*page
,
449 u8
*object
, char *reason
)
451 u8
*addr
= page_address(page
);
453 printk(KERN_ERR
"*** SLUB %s: %s@0x%p slab 0x%p\n",
454 s
->name
, reason
, object
, page
);
455 printk(KERN_ERR
" offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
456 object
- addr
, page
->flags
, page
->inuse
, page
->freelist
);
457 if (object
> addr
+ 16)
458 print_section("Bytes b4", object
- 16, 16);
459 print_section("Object", object
, min(s
->objsize
, 128));
460 print_trailer(s
, object
);
464 static void slab_err(struct kmem_cache
*s
, struct page
*page
, char *reason
, ...)
469 va_start(args
, reason
);
470 vsnprintf(buf
, sizeof(buf
), reason
, args
);
472 printk(KERN_ERR
"*** SLUB %s: %s in slab @0x%p\n", s
->name
, buf
,
477 static void init_object(struct kmem_cache
*s
, void *object
, int active
)
481 if (s
->flags
& __OBJECT_POISON
) {
482 memset(p
, POISON_FREE
, s
->objsize
- 1);
483 p
[s
->objsize
-1] = POISON_END
;
486 if (s
->flags
& SLAB_RED_ZONE
)
487 memset(p
+ s
->objsize
,
488 active
? SLUB_RED_ACTIVE
: SLUB_RED_INACTIVE
,
489 s
->inuse
- s
->objsize
);
492 static int check_bytes(u8
*start
, unsigned int value
, unsigned int bytes
)
495 if (*start
!= (u8
)value
)
507 * Bytes of the object to be managed.
508 * If the freepointer may overlay the object then the free
509 * pointer is the first word of the object.
511 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
514 * object + s->objsize
515 * Padding to reach word boundary. This is also used for Redzoning.
516 * Padding is extended by another word if Redzoning is enabled and
519 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
520 * 0xcc (RED_ACTIVE) for objects in use.
523 * Meta data starts here.
525 * A. Free pointer (if we cannot overwrite object on free)
526 * B. Tracking data for SLAB_STORE_USER
527 * C. Padding to reach required alignment boundary or at mininum
528 * one word if debuggin is on to be able to detect writes
529 * before the word boundary.
531 * Padding is done using 0x5a (POISON_INUSE)
534 * Nothing is used beyond s->size.
536 * If slabcaches are merged then the objsize and inuse boundaries are mostly
537 * ignored. And therefore no slab options that rely on these boundaries
538 * may be used with merged slabcaches.
541 static void restore_bytes(struct kmem_cache
*s
, char *message
, u8 data
,
542 void *from
, void *to
)
544 printk(KERN_ERR
"@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
545 s
->name
, message
, data
, from
, to
- 1);
546 memset(from
, data
, to
- from
);
549 static int check_pad_bytes(struct kmem_cache
*s
, struct page
*page
, u8
*p
)
551 unsigned long off
= s
->inuse
; /* The end of info */
554 /* Freepointer is placed after the object. */
555 off
+= sizeof(void *);
557 if (s
->flags
& SLAB_STORE_USER
)
558 /* We also have user information there */
559 off
+= 2 * sizeof(struct track
);
564 if (check_bytes(p
+ off
, POISON_INUSE
, s
->size
- off
))
567 object_err(s
, page
, p
, "Object padding check fails");
572 restore_bytes(s
, "object padding", POISON_INUSE
, p
+ off
, p
+ s
->size
);
576 static int slab_pad_check(struct kmem_cache
*s
, struct page
*page
)
579 int length
, remainder
;
581 if (!(s
->flags
& SLAB_POISON
))
584 p
= page_address(page
);
585 length
= s
->objects
* s
->size
;
586 remainder
= (PAGE_SIZE
<< s
->order
) - length
;
590 if (!check_bytes(p
+ length
, POISON_INUSE
, remainder
)) {
591 slab_err(s
, page
, "Padding check failed");
592 restore_bytes(s
, "slab padding", POISON_INUSE
, p
+ length
,
593 p
+ length
+ remainder
);
599 static int check_object(struct kmem_cache
*s
, struct page
*page
,
600 void *object
, int active
)
603 u8
*endobject
= object
+ s
->objsize
;
605 if (s
->flags
& SLAB_RED_ZONE
) {
607 active
? SLUB_RED_ACTIVE
: SLUB_RED_INACTIVE
;
609 if (!check_bytes(endobject
, red
, s
->inuse
- s
->objsize
)) {
610 object_err(s
, page
, object
,
611 active
? "Redzone Active" : "Redzone Inactive");
612 restore_bytes(s
, "redzone", red
,
613 endobject
, object
+ s
->inuse
);
617 if ((s
->flags
& SLAB_POISON
) && s
->objsize
< s
->inuse
&&
618 !check_bytes(endobject
, POISON_INUSE
,
619 s
->inuse
- s
->objsize
)) {
620 object_err(s
, page
, p
, "Alignment padding check fails");
622 * Fix it so that there will not be another report.
624 * Hmmm... We may be corrupting an object that now expects
625 * to be longer than allowed.
627 restore_bytes(s
, "alignment padding", POISON_INUSE
,
628 endobject
, object
+ s
->inuse
);
632 if (s
->flags
& SLAB_POISON
) {
633 if (!active
&& (s
->flags
& __OBJECT_POISON
) &&
634 (!check_bytes(p
, POISON_FREE
, s
->objsize
- 1) ||
635 p
[s
->objsize
- 1] != POISON_END
)) {
637 object_err(s
, page
, p
, "Poison check failed");
638 restore_bytes(s
, "Poison", POISON_FREE
,
639 p
, p
+ s
->objsize
-1);
640 restore_bytes(s
, "Poison", POISON_END
,
641 p
+ s
->objsize
- 1, p
+ s
->objsize
);
645 * check_pad_bytes cleans up on its own.
647 check_pad_bytes(s
, page
, p
);
650 if (!s
->offset
&& active
)
652 * Object and freepointer overlap. Cannot check
653 * freepointer while object is allocated.
657 /* Check free pointer validity */
658 if (!check_valid_pointer(s
, page
, get_freepointer(s
, p
))) {
659 object_err(s
, page
, p
, "Freepointer corrupt");
661 * No choice but to zap it and thus loose the remainder
662 * of the free objects in this slab. May cause
663 * another error because the object count is now wrong.
665 set_freepointer(s
, p
, NULL
);
671 static int check_slab(struct kmem_cache
*s
, struct page
*page
)
673 VM_BUG_ON(!irqs_disabled());
675 if (!PageSlab(page
)) {
676 slab_err(s
, page
, "Not a valid slab page flags=%lx "
677 "mapping=0x%p count=%d", page
->flags
, page
->mapping
,
681 if (page
->offset
* sizeof(void *) != s
->offset
) {
682 slab_err(s
, page
, "Corrupted offset %lu flags=0x%lx "
683 "mapping=0x%p count=%d",
684 (unsigned long)(page
->offset
* sizeof(void *)),
690 if (page
->inuse
> s
->objects
) {
691 slab_err(s
, page
, "inuse %u > max %u @0x%p flags=%lx "
692 "mapping=0x%p count=%d",
693 s
->name
, page
->inuse
, s
->objects
, page
->flags
,
694 page
->mapping
, page_count(page
));
697 /* Slab_pad_check fixes things up after itself */
698 slab_pad_check(s
, page
);
703 * Determine if a certain object on a page is on the freelist. Must hold the
704 * slab lock to guarantee that the chains are in a consistent state.
706 static int on_freelist(struct kmem_cache
*s
, struct page
*page
, void *search
)
709 void *fp
= page
->freelist
;
712 while (fp
&& nr
<= s
->objects
) {
715 if (!check_valid_pointer(s
, page
, fp
)) {
717 object_err(s
, page
, object
,
718 "Freechain corrupt");
719 set_freepointer(s
, object
, NULL
);
722 slab_err(s
, page
, "Freepointer 0x%p corrupt",
724 page
->freelist
= NULL
;
725 page
->inuse
= s
->objects
;
726 printk(KERN_ERR
"@@@ SLUB %s: Freelist "
727 "cleared. Slab 0x%p\n",
734 fp
= get_freepointer(s
, object
);
738 if (page
->inuse
!= s
->objects
- nr
) {
739 slab_err(s
, page
, "Wrong object count. Counter is %d but "
740 "counted were %d", s
, page
, page
->inuse
,
742 page
->inuse
= s
->objects
- nr
;
743 printk(KERN_ERR
"@@@ SLUB %s: Object count adjusted. "
744 "Slab @0x%p\n", s
->name
, page
);
746 return search
== NULL
;
749 static void trace(struct kmem_cache
*s
, struct page
*page
, void *object
, int alloc
)
751 if (s
->flags
& SLAB_TRACE
) {
752 printk(KERN_INFO
"TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
754 alloc
? "alloc" : "free",
759 print_section("Object", (void *)object
, s
->objsize
);
766 * Tracking of fully allocated slabs for debugging purposes.
768 static void add_full(struct kmem_cache_node
*n
, struct page
*page
)
770 spin_lock(&n
->list_lock
);
771 list_add(&page
->lru
, &n
->full
);
772 spin_unlock(&n
->list_lock
);
775 static void remove_full(struct kmem_cache
*s
, struct page
*page
)
777 struct kmem_cache_node
*n
;
779 if (!(s
->flags
& SLAB_STORE_USER
))
782 n
= get_node(s
, page_to_nid(page
));
784 spin_lock(&n
->list_lock
);
785 list_del(&page
->lru
);
786 spin_unlock(&n
->list_lock
);
789 static void setup_object_debug(struct kmem_cache
*s
, struct page
*page
,
792 if (!(s
->flags
& (SLAB_STORE_USER
|SLAB_RED_ZONE
|__OBJECT_POISON
)))
795 init_object(s
, object
, 0);
796 init_tracking(s
, object
);
799 static int alloc_debug_processing(struct kmem_cache
*s
, struct page
*page
,
800 void *object
, void *addr
)
802 if (!check_slab(s
, page
))
805 if (object
&& !on_freelist(s
, page
, object
)) {
806 slab_err(s
, page
, "Object 0x%p already allocated", object
);
810 if (!check_valid_pointer(s
, page
, object
)) {
811 object_err(s
, page
, object
, "Freelist Pointer check fails");
815 if (object
&& !check_object(s
, page
, object
, 0))
818 /* Success perform special debug activities for allocs */
819 if (s
->flags
& SLAB_STORE_USER
)
820 set_track(s
, object
, TRACK_ALLOC
, addr
);
821 trace(s
, page
, object
, 1);
822 init_object(s
, object
, 1);
826 if (PageSlab(page
)) {
828 * If this is a slab page then lets do the best we can
829 * to avoid issues in the future. Marking all objects
830 * as used avoids touching the remaining objects.
832 printk(KERN_ERR
"@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
834 page
->inuse
= s
->objects
;
835 page
->freelist
= NULL
;
836 /* Fix up fields that may be corrupted */
837 page
->offset
= s
->offset
/ sizeof(void *);
842 static int free_debug_processing(struct kmem_cache
*s
, struct page
*page
,
843 void *object
, void *addr
)
845 if (!check_slab(s
, page
))
848 if (!check_valid_pointer(s
, page
, object
)) {
849 slab_err(s
, page
, "Invalid object pointer 0x%p", object
);
853 if (on_freelist(s
, page
, object
)) {
854 slab_err(s
, page
, "Object 0x%p already free", object
);
858 if (!check_object(s
, page
, object
, 1))
861 if (unlikely(s
!= page
->slab
)) {
863 slab_err(s
, page
, "Attempt to free object(0x%p) "
864 "outside of slab", object
);
868 "SLUB <none>: no slab for object 0x%p.\n",
873 slab_err(s
, page
, "object at 0x%p belongs "
874 "to slab %s", object
, page
->slab
->name
);
878 /* Special debug activities for freeing objects */
879 if (!SlabFrozen(page
) && !page
->freelist
)
880 remove_full(s
, page
);
881 if (s
->flags
& SLAB_STORE_USER
)
882 set_track(s
, object
, TRACK_FREE
, addr
);
883 trace(s
, page
, object
, 0);
884 init_object(s
, object
, 0);
888 printk(KERN_ERR
"@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
889 s
->name
, page
, object
);
893 static int __init
setup_slub_debug(char *str
)
895 slub_debug
= DEBUG_DEFAULT_FLAGS
;
896 if (*str
++ != '=' || !*str
)
898 * No options specified. Switch on full debugging.
904 * No options but restriction on slabs. This means full
905 * debugging for slabs matching a pattern.
912 * Switch off all debugging measures.
917 * Determine which debug features should be switched on
919 for ( ;*str
&& *str
!= ','; str
++) {
920 switch (tolower(*str
)) {
922 slub_debug
|= SLAB_DEBUG_FREE
;
925 slub_debug
|= SLAB_RED_ZONE
;
928 slub_debug
|= SLAB_POISON
;
931 slub_debug
|= SLAB_STORE_USER
;
934 slub_debug
|= SLAB_TRACE
;
937 printk(KERN_ERR
"slub_debug option '%c' "
938 "unknown. skipped\n",*str
);
944 slub_debug_slabs
= str
+ 1;
949 __setup("slub_debug", setup_slub_debug
);
951 static void kmem_cache_open_debug_check(struct kmem_cache
*s
)
954 * The page->offset field is only 16 bit wide. This is an offset
955 * in units of words from the beginning of an object. If the slab
956 * size is bigger then we cannot move the free pointer behind the
959 * On 32 bit platforms the limit is 256k. On 64bit platforms
962 * Debugging or ctor may create a need to move the free
963 * pointer. Fail if this happens.
965 if (s
->objsize
>= 65535 * sizeof(void *)) {
966 BUG_ON(s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
|
967 SLAB_STORE_USER
| SLAB_DESTROY_BY_RCU
));
972 * Enable debugging if selected on the kernel commandline.
974 if (slub_debug
&& (!slub_debug_slabs
||
975 strncmp(slub_debug_slabs
, s
->name
,
976 strlen(slub_debug_slabs
)) == 0))
977 s
->flags
|= slub_debug
;
980 static inline void setup_object_debug(struct kmem_cache
*s
,
981 struct page
*page
, void *object
) {}
983 static inline int alloc_debug_processing(struct kmem_cache
*s
,
984 struct page
*page
, void *object
, void *addr
) { return 0; }
986 static inline int free_debug_processing(struct kmem_cache
*s
,
987 struct page
*page
, void *object
, void *addr
) { return 0; }
989 static inline int slab_pad_check(struct kmem_cache
*s
, struct page
*page
)
991 static inline int check_object(struct kmem_cache
*s
, struct page
*page
,
992 void *object
, int active
) { return 1; }
993 static inline void add_full(struct kmem_cache_node
*n
, struct page
*page
) {}
994 static inline void kmem_cache_open_debug_check(struct kmem_cache
*s
) {}
998 * Slab allocation and freeing
1000 static struct page
*allocate_slab(struct kmem_cache
*s
, gfp_t flags
, int node
)
1003 int pages
= 1 << s
->order
;
1006 flags
|= __GFP_COMP
;
1008 if (s
->flags
& SLAB_CACHE_DMA
)
1012 page
= alloc_pages(flags
, s
->order
);
1014 page
= alloc_pages_node(node
, flags
, s
->order
);
1019 mod_zone_page_state(page_zone(page
),
1020 (s
->flags
& SLAB_RECLAIM_ACCOUNT
) ?
1021 NR_SLAB_RECLAIMABLE
: NR_SLAB_UNRECLAIMABLE
,
1027 static void setup_object(struct kmem_cache
*s
, struct page
*page
,
1030 setup_object_debug(s
, page
, object
);
1031 if (unlikely(s
->ctor
))
1032 s
->ctor(object
, s
, 0);
1035 static struct page
*new_slab(struct kmem_cache
*s
, gfp_t flags
, int node
)
1038 struct kmem_cache_node
*n
;
1044 BUG_ON(flags
& ~(GFP_DMA
| GFP_LEVEL_MASK
));
1046 if (flags
& __GFP_WAIT
)
1049 page
= allocate_slab(s
, flags
& GFP_LEVEL_MASK
, node
);
1053 n
= get_node(s
, page_to_nid(page
));
1055 atomic_long_inc(&n
->nr_slabs
);
1056 page
->offset
= s
->offset
/ sizeof(void *);
1058 page
->flags
|= 1 << PG_slab
;
1059 if (s
->flags
& (SLAB_DEBUG_FREE
| SLAB_RED_ZONE
| SLAB_POISON
|
1060 SLAB_STORE_USER
| SLAB_TRACE
))
1063 start
= page_address(page
);
1064 end
= start
+ s
->objects
* s
->size
;
1066 if (unlikely(s
->flags
& SLAB_POISON
))
1067 memset(start
, POISON_INUSE
, PAGE_SIZE
<< s
->order
);
1070 for_each_object(p
, s
, start
) {
1071 setup_object(s
, page
, last
);
1072 set_freepointer(s
, last
, p
);
1075 setup_object(s
, page
, last
);
1076 set_freepointer(s
, last
, NULL
);
1078 page
->freelist
= start
;
1079 page
->lockless_freelist
= NULL
;
1082 if (flags
& __GFP_WAIT
)
1083 local_irq_disable();
1087 static void __free_slab(struct kmem_cache
*s
, struct page
*page
)
1089 int pages
= 1 << s
->order
;
1091 if (unlikely(SlabDebug(page
))) {
1094 slab_pad_check(s
, page
);
1095 for_each_object(p
, s
, page_address(page
))
1096 check_object(s
, page
, p
, 0);
1099 mod_zone_page_state(page_zone(page
),
1100 (s
->flags
& SLAB_RECLAIM_ACCOUNT
) ?
1101 NR_SLAB_RECLAIMABLE
: NR_SLAB_UNRECLAIMABLE
,
1104 page
->mapping
= NULL
;
1105 __free_pages(page
, s
->order
);
1108 static void rcu_free_slab(struct rcu_head
*h
)
1112 page
= container_of((struct list_head
*)h
, struct page
, lru
);
1113 __free_slab(page
->slab
, page
);
1116 static void free_slab(struct kmem_cache
*s
, struct page
*page
)
1118 if (unlikely(s
->flags
& SLAB_DESTROY_BY_RCU
)) {
1120 * RCU free overloads the RCU head over the LRU
1122 struct rcu_head
*head
= (void *)&page
->lru
;
1124 call_rcu(head
, rcu_free_slab
);
1126 __free_slab(s
, page
);
1129 static void discard_slab(struct kmem_cache
*s
, struct page
*page
)
1131 struct kmem_cache_node
*n
= get_node(s
, page_to_nid(page
));
1133 atomic_long_dec(&n
->nr_slabs
);
1134 reset_page_mapcount(page
);
1135 ClearSlabDebug(page
);
1136 __ClearPageSlab(page
);
1141 * Per slab locking using the pagelock
1143 static __always_inline
void slab_lock(struct page
*page
)
1145 bit_spin_lock(PG_locked
, &page
->flags
);
1148 static __always_inline
void slab_unlock(struct page
*page
)
1150 bit_spin_unlock(PG_locked
, &page
->flags
);
1153 static __always_inline
int slab_trylock(struct page
*page
)
1157 rc
= bit_spin_trylock(PG_locked
, &page
->flags
);
1162 * Management of partially allocated slabs
1164 static void add_partial_tail(struct kmem_cache_node
*n
, struct page
*page
)
1166 spin_lock(&n
->list_lock
);
1168 list_add_tail(&page
->lru
, &n
->partial
);
1169 spin_unlock(&n
->list_lock
);
1172 static void add_partial(struct kmem_cache_node
*n
, struct page
*page
)
1174 spin_lock(&n
->list_lock
);
1176 list_add(&page
->lru
, &n
->partial
);
1177 spin_unlock(&n
->list_lock
);
1180 static void remove_partial(struct kmem_cache
*s
,
1183 struct kmem_cache_node
*n
= get_node(s
, page_to_nid(page
));
1185 spin_lock(&n
->list_lock
);
1186 list_del(&page
->lru
);
1188 spin_unlock(&n
->list_lock
);
1192 * Lock slab and remove from the partial list.
1194 * Must hold list_lock.
1196 static inline int lock_and_freeze_slab(struct kmem_cache_node
*n
, struct page
*page
)
1198 if (slab_trylock(page
)) {
1199 list_del(&page
->lru
);
1201 SetSlabFrozen(page
);
1208 * Try to allocate a partial slab from a specific node.
1210 static struct page
*get_partial_node(struct kmem_cache_node
*n
)
1215 * Racy check. If we mistakenly see no partial slabs then we
1216 * just allocate an empty slab. If we mistakenly try to get a
1217 * partial slab and there is none available then get_partials()
1220 if (!n
|| !n
->nr_partial
)
1223 spin_lock(&n
->list_lock
);
1224 list_for_each_entry(page
, &n
->partial
, lru
)
1225 if (lock_and_freeze_slab(n
, page
))
1229 spin_unlock(&n
->list_lock
);
1234 * Get a page from somewhere. Search in increasing NUMA distances.
1236 static struct page
*get_any_partial(struct kmem_cache
*s
, gfp_t flags
)
1239 struct zonelist
*zonelist
;
1244 * The defrag ratio allows a configuration of the tradeoffs between
1245 * inter node defragmentation and node local allocations. A lower
1246 * defrag_ratio increases the tendency to do local allocations
1247 * instead of attempting to obtain partial slabs from other nodes.
1249 * If the defrag_ratio is set to 0 then kmalloc() always
1250 * returns node local objects. If the ratio is higher then kmalloc()
1251 * may return off node objects because partial slabs are obtained
1252 * from other nodes and filled up.
1254 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
1255 * defrag_ratio = 1000) then every (well almost) allocation will
1256 * first attempt to defrag slab caches on other nodes. This means
1257 * scanning over all nodes to look for partial slabs which may be
1258 * expensive if we do it every time we are trying to find a slab
1259 * with available objects.
1261 if (!s
->defrag_ratio
|| get_cycles() % 1024 > s
->defrag_ratio
)
1264 zonelist
= &NODE_DATA(slab_node(current
->mempolicy
))
1265 ->node_zonelists
[gfp_zone(flags
)];
1266 for (z
= zonelist
->zones
; *z
; z
++) {
1267 struct kmem_cache_node
*n
;
1269 n
= get_node(s
, zone_to_nid(*z
));
1271 if (n
&& cpuset_zone_allowed_hardwall(*z
, flags
) &&
1272 n
->nr_partial
> MIN_PARTIAL
) {
1273 page
= get_partial_node(n
);
1283 * Get a partial page, lock it and return it.
1285 static struct page
*get_partial(struct kmem_cache
*s
, gfp_t flags
, int node
)
1288 int searchnode
= (node
== -1) ? numa_node_id() : node
;
1290 page
= get_partial_node(get_node(s
, searchnode
));
1291 if (page
|| (flags
& __GFP_THISNODE
))
1294 return get_any_partial(s
, flags
);
1298 * Move a page back to the lists.
1300 * Must be called with the slab lock held.
1302 * On exit the slab lock will have been dropped.
1304 static void unfreeze_slab(struct kmem_cache
*s
, struct page
*page
)
1306 struct kmem_cache_node
*n
= get_node(s
, page_to_nid(page
));
1308 ClearSlabFrozen(page
);
1312 add_partial(n
, page
);
1313 else if (SlabDebug(page
) && (s
->flags
& SLAB_STORE_USER
))
1318 if (n
->nr_partial
< MIN_PARTIAL
) {
1320 * Adding an empty slab to the partial slabs in order
1321 * to avoid page allocator overhead. This slab needs
1322 * to come after the other slabs with objects in
1323 * order to fill them up. That way the size of the
1324 * partial list stays small. kmem_cache_shrink can
1325 * reclaim empty slabs from the partial list.
1327 add_partial_tail(n
, page
);
1331 discard_slab(s
, page
);
1337 * Remove the cpu slab
1339 static void deactivate_slab(struct kmem_cache
*s
, struct page
*page
, int cpu
)
1342 * Merge cpu freelist into freelist. Typically we get here
1343 * because both freelists are empty. So this is unlikely
1346 while (unlikely(page
->lockless_freelist
)) {
1349 /* Retrieve object from cpu_freelist */
1350 object
= page
->lockless_freelist
;
1351 page
->lockless_freelist
= page
->lockless_freelist
[page
->offset
];
1353 /* And put onto the regular freelist */
1354 object
[page
->offset
] = page
->freelist
;
1355 page
->freelist
= object
;
1358 s
->cpu_slab
[cpu
] = NULL
;
1359 unfreeze_slab(s
, page
);
1362 static void flush_slab(struct kmem_cache
*s
, struct page
*page
, int cpu
)
1365 deactivate_slab(s
, page
, cpu
);
1370 * Called from IPI handler with interrupts disabled.
1372 static void __flush_cpu_slab(struct kmem_cache
*s
, int cpu
)
1374 struct page
*page
= s
->cpu_slab
[cpu
];
1377 flush_slab(s
, page
, cpu
);
1380 static void flush_cpu_slab(void *d
)
1382 struct kmem_cache
*s
= d
;
1383 int cpu
= smp_processor_id();
1385 __flush_cpu_slab(s
, cpu
);
1388 static void flush_all(struct kmem_cache
*s
)
1391 on_each_cpu(flush_cpu_slab
, s
, 1, 1);
1393 unsigned long flags
;
1395 local_irq_save(flags
);
1397 local_irq_restore(flags
);
1402 * Slow path. The lockless freelist is empty or we need to perform
1405 * Interrupts are disabled.
1407 * Processing is still very fast if new objects have been freed to the
1408 * regular freelist. In that case we simply take over the regular freelist
1409 * as the lockless freelist and zap the regular freelist.
1411 * If that is not working then we fall back to the partial lists. We take the
1412 * first element of the freelist as the object to allocate now and move the
1413 * rest of the freelist to the lockless freelist.
1415 * And if we were unable to get a new slab from the partial slab lists then
1416 * we need to allocate a new slab. This is slowest path since we may sleep.
1418 static void *__slab_alloc(struct kmem_cache
*s
,
1419 gfp_t gfpflags
, int node
, void *addr
, struct page
*page
)
1422 int cpu
= smp_processor_id();
1428 if (unlikely(node
!= -1 && page_to_nid(page
) != node
))
1431 object
= page
->freelist
;
1432 if (unlikely(!object
))
1434 if (unlikely(SlabDebug(page
)))
1437 object
= page
->freelist
;
1438 page
->lockless_freelist
= object
[page
->offset
];
1439 page
->inuse
= s
->objects
;
1440 page
->freelist
= NULL
;
1445 deactivate_slab(s
, page
, cpu
);
1448 page
= get_partial(s
, gfpflags
, node
);
1450 s
->cpu_slab
[cpu
] = page
;
1454 page
= new_slab(s
, gfpflags
, node
);
1456 cpu
= smp_processor_id();
1457 if (s
->cpu_slab
[cpu
]) {
1459 * Someone else populated the cpu_slab while we
1460 * enabled interrupts, or we have gotten scheduled
1461 * on another cpu. The page may not be on the
1462 * requested node even if __GFP_THISNODE was
1463 * specified. So we need to recheck.
1466 page_to_nid(s
->cpu_slab
[cpu
]) == node
) {
1468 * Current cpuslab is acceptable and we
1469 * want the current one since its cache hot
1471 discard_slab(s
, page
);
1472 page
= s
->cpu_slab
[cpu
];
1476 /* New slab does not fit our expectations */
1477 flush_slab(s
, s
->cpu_slab
[cpu
], cpu
);
1480 SetSlabFrozen(page
);
1481 s
->cpu_slab
[cpu
] = page
;
1486 object
= page
->freelist
;
1487 if (!alloc_debug_processing(s
, page
, object
, addr
))
1491 page
->freelist
= object
[page
->offset
];
1497 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1498 * have the fastpath folded into their functions. So no function call
1499 * overhead for requests that can be satisfied on the fastpath.
1501 * The fastpath works by first checking if the lockless freelist can be used.
1502 * If not then __slab_alloc is called for slow processing.
1504 * Otherwise we can simply pick the next object from the lockless free list.
1506 static void __always_inline
*slab_alloc(struct kmem_cache
*s
,
1507 gfp_t gfpflags
, int node
, void *addr
)
1511 unsigned long flags
;
1513 local_irq_save(flags
);
1514 page
= s
->cpu_slab
[smp_processor_id()];
1515 if (unlikely(!page
|| !page
->lockless_freelist
||
1516 (node
!= -1 && page_to_nid(page
) != node
)))
1518 object
= __slab_alloc(s
, gfpflags
, node
, addr
, page
);
1521 object
= page
->lockless_freelist
;
1522 page
->lockless_freelist
= object
[page
->offset
];
1524 local_irq_restore(flags
);
1528 void *kmem_cache_alloc(struct kmem_cache
*s
, gfp_t gfpflags
)
1530 return slab_alloc(s
, gfpflags
, -1, __builtin_return_address(0));
1532 EXPORT_SYMBOL(kmem_cache_alloc
);
1535 void *kmem_cache_alloc_node(struct kmem_cache
*s
, gfp_t gfpflags
, int node
)
1537 return slab_alloc(s
, gfpflags
, node
, __builtin_return_address(0));
1539 EXPORT_SYMBOL(kmem_cache_alloc_node
);
1543 * Slow patch handling. This may still be called frequently since objects
1544 * have a longer lifetime than the cpu slabs in most processing loads.
1546 * So we still attempt to reduce cache line usage. Just take the slab
1547 * lock and free the item. If there is no additional partial page
1548 * handling required then we can return immediately.
1550 static void __slab_free(struct kmem_cache
*s
, struct page
*page
,
1551 void *x
, void *addr
)
1554 void **object
= (void *)x
;
1558 if (unlikely(SlabDebug(page
)))
1561 prior
= object
[page
->offset
] = page
->freelist
;
1562 page
->freelist
= object
;
1565 if (unlikely(SlabFrozen(page
)))
1568 if (unlikely(!page
->inuse
))
1572 * Objects left in the slab. If it
1573 * was not on the partial list before
1576 if (unlikely(!prior
))
1577 add_partial(get_node(s
, page_to_nid(page
)), page
);
1586 * Slab still on the partial list.
1588 remove_partial(s
, page
);
1591 discard_slab(s
, page
);
1595 if (!free_debug_processing(s
, page
, x
, addr
))
1601 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1602 * can perform fastpath freeing without additional function calls.
1604 * The fastpath is only possible if we are freeing to the current cpu slab
1605 * of this processor. This typically the case if we have just allocated
1608 * If fastpath is not possible then fall back to __slab_free where we deal
1609 * with all sorts of special processing.
1611 static void __always_inline
slab_free(struct kmem_cache
*s
,
1612 struct page
*page
, void *x
, void *addr
)
1614 void **object
= (void *)x
;
1615 unsigned long flags
;
1617 local_irq_save(flags
);
1618 if (likely(page
== s
->cpu_slab
[smp_processor_id()] &&
1619 !SlabDebug(page
))) {
1620 object
[page
->offset
] = page
->lockless_freelist
;
1621 page
->lockless_freelist
= object
;
1623 __slab_free(s
, page
, x
, addr
);
1625 local_irq_restore(flags
);
1628 void kmem_cache_free(struct kmem_cache
*s
, void *x
)
1632 page
= virt_to_head_page(x
);
1634 slab_free(s
, page
, x
, __builtin_return_address(0));
1636 EXPORT_SYMBOL(kmem_cache_free
);
1638 /* Figure out on which slab object the object resides */
1639 static struct page
*get_object_page(const void *x
)
1641 struct page
*page
= virt_to_head_page(x
);
1643 if (!PageSlab(page
))
1650 * Object placement in a slab is made very easy because we always start at
1651 * offset 0. If we tune the size of the object to the alignment then we can
1652 * get the required alignment by putting one properly sized object after
1655 * Notice that the allocation order determines the sizes of the per cpu
1656 * caches. Each processor has always one slab available for allocations.
1657 * Increasing the allocation order reduces the number of times that slabs
1658 * must be moved on and off the partial lists and is therefore a factor in
1663 * Mininum / Maximum order of slab pages. This influences locking overhead
1664 * and slab fragmentation. A higher order reduces the number of partial slabs
1665 * and increases the number of allocations possible without having to
1666 * take the list_lock.
1668 static int slub_min_order
;
1669 static int slub_max_order
= DEFAULT_MAX_ORDER
;
1670 static int slub_min_objects
= DEFAULT_MIN_OBJECTS
;
1673 * Merge control. If this is set then no merging of slab caches will occur.
1674 * (Could be removed. This was introduced to pacify the merge skeptics.)
1676 static int slub_nomerge
;
1679 * Calculate the order of allocation given an slab object size.
1681 * The order of allocation has significant impact on performance and other
1682 * system components. Generally order 0 allocations should be preferred since
1683 * order 0 does not cause fragmentation in the page allocator. Larger objects
1684 * be problematic to put into order 0 slabs because there may be too much
1685 * unused space left. We go to a higher order if more than 1/8th of the slab
1688 * In order to reach satisfactory performance we must ensure that a minimum
1689 * number of objects is in one slab. Otherwise we may generate too much
1690 * activity on the partial lists which requires taking the list_lock. This is
1691 * less a concern for large slabs though which are rarely used.
1693 * slub_max_order specifies the order where we begin to stop considering the
1694 * number of objects in a slab as critical. If we reach slub_max_order then
1695 * we try to keep the page order as low as possible. So we accept more waste
1696 * of space in favor of a small page order.
1698 * Higher order allocations also allow the placement of more objects in a
1699 * slab and thereby reduce object handling overhead. If the user has
1700 * requested a higher mininum order then we start with that one instead of
1701 * the smallest order which will fit the object.
1703 static inline int slab_order(int size
, int min_objects
,
1704 int max_order
, int fract_leftover
)
1709 for (order
= max(slub_min_order
,
1710 fls(min_objects
* size
- 1) - PAGE_SHIFT
);
1711 order
<= max_order
; order
++) {
1713 unsigned long slab_size
= PAGE_SIZE
<< order
;
1715 if (slab_size
< min_objects
* size
)
1718 rem
= slab_size
% size
;
1720 if (rem
<= slab_size
/ fract_leftover
)
1728 static inline int calculate_order(int size
)
1735 * Attempt to find best configuration for a slab. This
1736 * works by first attempting to generate a layout with
1737 * the best configuration and backing off gradually.
1739 * First we reduce the acceptable waste in a slab. Then
1740 * we reduce the minimum objects required in a slab.
1742 min_objects
= slub_min_objects
;
1743 while (min_objects
> 1) {
1745 while (fraction
>= 4) {
1746 order
= slab_order(size
, min_objects
,
1747 slub_max_order
, fraction
);
1748 if (order
<= slub_max_order
)
1756 * We were unable to place multiple objects in a slab. Now
1757 * lets see if we can place a single object there.
1759 order
= slab_order(size
, 1, slub_max_order
, 1);
1760 if (order
<= slub_max_order
)
1764 * Doh this slab cannot be placed using slub_max_order.
1766 order
= slab_order(size
, 1, MAX_ORDER
, 1);
1767 if (order
<= MAX_ORDER
)
1773 * Figure out what the alignment of the objects will be.
1775 static unsigned long calculate_alignment(unsigned long flags
,
1776 unsigned long align
, unsigned long size
)
1779 * If the user wants hardware cache aligned objects then
1780 * follow that suggestion if the object is sufficiently
1783 * The hardware cache alignment cannot override the
1784 * specified alignment though. If that is greater
1787 if ((flags
& SLAB_HWCACHE_ALIGN
) &&
1788 size
> cache_line_size() / 2)
1789 return max_t(unsigned long, align
, cache_line_size());
1791 if (align
< ARCH_SLAB_MINALIGN
)
1792 return ARCH_SLAB_MINALIGN
;
1794 return ALIGN(align
, sizeof(void *));
1797 static void init_kmem_cache_node(struct kmem_cache_node
*n
)
1800 atomic_long_set(&n
->nr_slabs
, 0);
1801 spin_lock_init(&n
->list_lock
);
1802 INIT_LIST_HEAD(&n
->partial
);
1803 INIT_LIST_HEAD(&n
->full
);
1808 * No kmalloc_node yet so do it by hand. We know that this is the first
1809 * slab on the node for this slabcache. There are no concurrent accesses
1812 * Note that this function only works on the kmalloc_node_cache
1813 * when allocating for the kmalloc_node_cache.
1815 static struct kmem_cache_node
* __init
early_kmem_cache_node_alloc(gfp_t gfpflags
,
1819 struct kmem_cache_node
*n
;
1821 BUG_ON(kmalloc_caches
->size
< sizeof(struct kmem_cache_node
));
1823 page
= new_slab(kmalloc_caches
, gfpflags
| GFP_THISNODE
, node
);
1828 page
->freelist
= get_freepointer(kmalloc_caches
, n
);
1830 kmalloc_caches
->node
[node
] = n
;
1831 setup_object_debug(kmalloc_caches
, page
, n
);
1832 init_kmem_cache_node(n
);
1833 atomic_long_inc(&n
->nr_slabs
);
1834 add_partial(n
, page
);
1837 * new_slab() disables interupts. If we do not reenable interrupts here
1838 * then bootup would continue with interrupts disabled.
1844 static void free_kmem_cache_nodes(struct kmem_cache
*s
)
1848 for_each_online_node(node
) {
1849 struct kmem_cache_node
*n
= s
->node
[node
];
1850 if (n
&& n
!= &s
->local_node
)
1851 kmem_cache_free(kmalloc_caches
, n
);
1852 s
->node
[node
] = NULL
;
1856 static int init_kmem_cache_nodes(struct kmem_cache
*s
, gfp_t gfpflags
)
1861 if (slab_state
>= UP
)
1862 local_node
= page_to_nid(virt_to_page(s
));
1866 for_each_online_node(node
) {
1867 struct kmem_cache_node
*n
;
1869 if (local_node
== node
)
1872 if (slab_state
== DOWN
) {
1873 n
= early_kmem_cache_node_alloc(gfpflags
,
1877 n
= kmem_cache_alloc_node(kmalloc_caches
,
1881 free_kmem_cache_nodes(s
);
1887 init_kmem_cache_node(n
);
1892 static void free_kmem_cache_nodes(struct kmem_cache
*s
)
1896 static int init_kmem_cache_nodes(struct kmem_cache
*s
, gfp_t gfpflags
)
1898 init_kmem_cache_node(&s
->local_node
);
1904 * calculate_sizes() determines the order and the distribution of data within
1907 static int calculate_sizes(struct kmem_cache
*s
)
1909 unsigned long flags
= s
->flags
;
1910 unsigned long size
= s
->objsize
;
1911 unsigned long align
= s
->align
;
1914 * Determine if we can poison the object itself. If the user of
1915 * the slab may touch the object after free or before allocation
1916 * then we should never poison the object itself.
1918 if ((flags
& SLAB_POISON
) && !(flags
& SLAB_DESTROY_BY_RCU
) &&
1920 s
->flags
|= __OBJECT_POISON
;
1922 s
->flags
&= ~__OBJECT_POISON
;
1925 * Round up object size to the next word boundary. We can only
1926 * place the free pointer at word boundaries and this determines
1927 * the possible location of the free pointer.
1929 size
= ALIGN(size
, sizeof(void *));
1931 #ifdef CONFIG_SLUB_DEBUG
1933 * If we are Redzoning then check if there is some space between the
1934 * end of the object and the free pointer. If not then add an
1935 * additional word to have some bytes to store Redzone information.
1937 if ((flags
& SLAB_RED_ZONE
) && size
== s
->objsize
)
1938 size
+= sizeof(void *);
1942 * With that we have determined the number of bytes in actual use
1943 * by the object. This is the potential offset to the free pointer.
1947 if (((flags
& (SLAB_DESTROY_BY_RCU
| SLAB_POISON
)) ||
1950 * Relocate free pointer after the object if it is not
1951 * permitted to overwrite the first word of the object on
1954 * This is the case if we do RCU, have a constructor or
1955 * destructor or are poisoning the objects.
1958 size
+= sizeof(void *);
1961 #ifdef CONFIG_SLUB_DEBUG
1962 if (flags
& SLAB_STORE_USER
)
1964 * Need to store information about allocs and frees after
1967 size
+= 2 * sizeof(struct track
);
1969 if (flags
& SLAB_RED_ZONE
)
1971 * Add some empty padding so that we can catch
1972 * overwrites from earlier objects rather than let
1973 * tracking information or the free pointer be
1974 * corrupted if an user writes before the start
1977 size
+= sizeof(void *);
1981 * Determine the alignment based on various parameters that the
1982 * user specified and the dynamic determination of cache line size
1985 align
= calculate_alignment(flags
, align
, s
->objsize
);
1988 * SLUB stores one object immediately after another beginning from
1989 * offset 0. In order to align the objects we have to simply size
1990 * each object to conform to the alignment.
1992 size
= ALIGN(size
, align
);
1995 s
->order
= calculate_order(size
);
2000 * Determine the number of objects per slab
2002 s
->objects
= (PAGE_SIZE
<< s
->order
) / size
;
2005 * Verify that the number of objects is within permitted limits.
2006 * The page->inuse field is only 16 bit wide! So we cannot have
2007 * more than 64k objects per slab.
2009 if (!s
->objects
|| s
->objects
> 65535)
2015 static int kmem_cache_open(struct kmem_cache
*s
, gfp_t gfpflags
,
2016 const char *name
, size_t size
,
2017 size_t align
, unsigned long flags
,
2018 void (*ctor
)(void *, struct kmem_cache
*, unsigned long))
2020 memset(s
, 0, kmem_size
);
2026 kmem_cache_open_debug_check(s
);
2028 if (!calculate_sizes(s
))
2033 s
->defrag_ratio
= 100;
2036 if (init_kmem_cache_nodes(s
, gfpflags
& ~SLUB_DMA
))
2039 if (flags
& SLAB_PANIC
)
2040 panic("Cannot create slab %s size=%lu realsize=%u "
2041 "order=%u offset=%u flags=%lx\n",
2042 s
->name
, (unsigned long)size
, s
->size
, s
->order
,
2048 * Check if a given pointer is valid
2050 int kmem_ptr_validate(struct kmem_cache
*s
, const void *object
)
2054 page
= get_object_page(object
);
2056 if (!page
|| s
!= page
->slab
)
2057 /* No slab or wrong slab */
2060 if (!check_valid_pointer(s
, page
, object
))
2064 * We could also check if the object is on the slabs freelist.
2065 * But this would be too expensive and it seems that the main
2066 * purpose of kmem_ptr_valid is to check if the object belongs
2067 * to a certain slab.
2071 EXPORT_SYMBOL(kmem_ptr_validate
);
2074 * Determine the size of a slab object
2076 unsigned int kmem_cache_size(struct kmem_cache
*s
)
2080 EXPORT_SYMBOL(kmem_cache_size
);
2082 const char *kmem_cache_name(struct kmem_cache
*s
)
2086 EXPORT_SYMBOL(kmem_cache_name
);
2089 * Attempt to free all slabs on a node. Return the number of slabs we
2090 * were unable to free.
2092 static int free_list(struct kmem_cache
*s
, struct kmem_cache_node
*n
,
2093 struct list_head
*list
)
2095 int slabs_inuse
= 0;
2096 unsigned long flags
;
2097 struct page
*page
, *h
;
2099 spin_lock_irqsave(&n
->list_lock
, flags
);
2100 list_for_each_entry_safe(page
, h
, list
, lru
)
2102 list_del(&page
->lru
);
2103 discard_slab(s
, page
);
2106 spin_unlock_irqrestore(&n
->list_lock
, flags
);
2111 * Release all resources used by a slab cache.
2113 static int kmem_cache_close(struct kmem_cache
*s
)
2119 /* Attempt to free all objects */
2120 for_each_online_node(node
) {
2121 struct kmem_cache_node
*n
= get_node(s
, node
);
2123 n
->nr_partial
-= free_list(s
, n
, &n
->partial
);
2124 if (atomic_long_read(&n
->nr_slabs
))
2127 free_kmem_cache_nodes(s
);
2132 * Close a cache and release the kmem_cache structure
2133 * (must be used for caches created using kmem_cache_create)
2135 void kmem_cache_destroy(struct kmem_cache
*s
)
2137 down_write(&slub_lock
);
2141 if (kmem_cache_close(s
))
2143 sysfs_slab_remove(s
);
2146 up_write(&slub_lock
);
2148 EXPORT_SYMBOL(kmem_cache_destroy
);
2150 /********************************************************************
2152 *******************************************************************/
2154 struct kmem_cache kmalloc_caches
[KMALLOC_SHIFT_HIGH
+ 1] __cacheline_aligned
;
2155 EXPORT_SYMBOL(kmalloc_caches
);
2157 #ifdef CONFIG_ZONE_DMA
2158 static struct kmem_cache
*kmalloc_caches_dma
[KMALLOC_SHIFT_HIGH
+ 1];
2161 static int __init
setup_slub_min_order(char *str
)
2163 get_option (&str
, &slub_min_order
);
2168 __setup("slub_min_order=", setup_slub_min_order
);
2170 static int __init
setup_slub_max_order(char *str
)
2172 get_option (&str
, &slub_max_order
);
2177 __setup("slub_max_order=", setup_slub_max_order
);
2179 static int __init
setup_slub_min_objects(char *str
)
2181 get_option (&str
, &slub_min_objects
);
2186 __setup("slub_min_objects=", setup_slub_min_objects
);
2188 static int __init
setup_slub_nomerge(char *str
)
2194 __setup("slub_nomerge", setup_slub_nomerge
);
2196 static struct kmem_cache
*create_kmalloc_cache(struct kmem_cache
*s
,
2197 const char *name
, int size
, gfp_t gfp_flags
)
2199 unsigned int flags
= 0;
2201 if (gfp_flags
& SLUB_DMA
)
2202 flags
= SLAB_CACHE_DMA
;
2204 down_write(&slub_lock
);
2205 if (!kmem_cache_open(s
, gfp_flags
, name
, size
, ARCH_KMALLOC_MINALIGN
,
2209 list_add(&s
->list
, &slab_caches
);
2210 up_write(&slub_lock
);
2211 if (sysfs_slab_add(s
))
2216 panic("Creation of kmalloc slab %s size=%d failed.\n", name
, size
);
2219 static struct kmem_cache
*get_slab(size_t size
, gfp_t flags
)
2221 int index
= kmalloc_index(size
);
2226 /* Allocation too large? */
2229 #ifdef CONFIG_ZONE_DMA
2230 if ((flags
& SLUB_DMA
)) {
2231 struct kmem_cache
*s
;
2232 struct kmem_cache
*x
;
2236 s
= kmalloc_caches_dma
[index
];
2240 /* Dynamically create dma cache */
2241 x
= kmalloc(kmem_size
, flags
& ~SLUB_DMA
);
2243 panic("Unable to allocate memory for dma cache\n");
2245 if (index
<= KMALLOC_SHIFT_HIGH
)
2246 realsize
= 1 << index
;
2254 text
= kasprintf(flags
& ~SLUB_DMA
, "kmalloc_dma-%d",
2255 (unsigned int)realsize
);
2256 s
= create_kmalloc_cache(x
, text
, realsize
, flags
);
2257 kmalloc_caches_dma
[index
] = s
;
2261 return &kmalloc_caches
[index
];
2264 void *__kmalloc(size_t size
, gfp_t flags
)
2266 struct kmem_cache
*s
= get_slab(size
, flags
);
2269 return slab_alloc(s
, flags
, -1, __builtin_return_address(0));
2270 return ZERO_SIZE_PTR
;
2272 EXPORT_SYMBOL(__kmalloc
);
2275 void *__kmalloc_node(size_t size
, gfp_t flags
, int node
)
2277 struct kmem_cache
*s
= get_slab(size
, flags
);
2280 return slab_alloc(s
, flags
, node
, __builtin_return_address(0));
2281 return ZERO_SIZE_PTR
;
2283 EXPORT_SYMBOL(__kmalloc_node
);
2286 size_t ksize(const void *object
)
2289 struct kmem_cache
*s
;
2291 if (object
== ZERO_SIZE_PTR
)
2294 page
= get_object_page(object
);
2300 * Debugging requires use of the padding between object
2301 * and whatever may come after it.
2303 if (s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
))
2307 * If we have the need to store the freelist pointer
2308 * back there or track user information then we can
2309 * only use the space before that information.
2311 if (s
->flags
& (SLAB_DESTROY_BY_RCU
| SLAB_STORE_USER
))
2315 * Else we can use all the padding etc for the allocation
2319 EXPORT_SYMBOL(ksize
);
2321 void kfree(const void *x
)
2323 struct kmem_cache
*s
;
2327 * This has to be an unsigned comparison. According to Linus
2328 * some gcc version treat a pointer as a signed entity. Then
2329 * this comparison would be true for all "negative" pointers
2330 * (which would cover the whole upper half of the address space).
2332 if ((unsigned long)x
<= (unsigned long)ZERO_SIZE_PTR
)
2335 page
= virt_to_head_page(x
);
2338 slab_free(s
, page
, (void *)x
, __builtin_return_address(0));
2340 EXPORT_SYMBOL(kfree
);
2343 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2344 * the remaining slabs by the number of items in use. The slabs with the
2345 * most items in use come first. New allocations will then fill those up
2346 * and thus they can be removed from the partial lists.
2348 * The slabs with the least items are placed last. This results in them
2349 * being allocated from last increasing the chance that the last objects
2350 * are freed in them.
2352 int kmem_cache_shrink(struct kmem_cache
*s
)
2356 struct kmem_cache_node
*n
;
2359 struct list_head
*slabs_by_inuse
=
2360 kmalloc(sizeof(struct list_head
) * s
->objects
, GFP_KERNEL
);
2361 unsigned long flags
;
2363 if (!slabs_by_inuse
)
2367 for_each_online_node(node
) {
2368 n
= get_node(s
, node
);
2373 for (i
= 0; i
< s
->objects
; i
++)
2374 INIT_LIST_HEAD(slabs_by_inuse
+ i
);
2376 spin_lock_irqsave(&n
->list_lock
, flags
);
2379 * Build lists indexed by the items in use in each slab.
2381 * Note that concurrent frees may occur while we hold the
2382 * list_lock. page->inuse here is the upper limit.
2384 list_for_each_entry_safe(page
, t
, &n
->partial
, lru
) {
2385 if (!page
->inuse
&& slab_trylock(page
)) {
2387 * Must hold slab lock here because slab_free
2388 * may have freed the last object and be
2389 * waiting to release the slab.
2391 list_del(&page
->lru
);
2394 discard_slab(s
, page
);
2396 if (n
->nr_partial
> MAX_PARTIAL
)
2397 list_move(&page
->lru
,
2398 slabs_by_inuse
+ page
->inuse
);
2402 if (n
->nr_partial
<= MAX_PARTIAL
)
2406 * Rebuild the partial list with the slabs filled up most
2407 * first and the least used slabs at the end.
2409 for (i
= s
->objects
- 1; i
>= 0; i
--)
2410 list_splice(slabs_by_inuse
+ i
, n
->partial
.prev
);
2413 spin_unlock_irqrestore(&n
->list_lock
, flags
);
2416 kfree(slabs_by_inuse
);
2419 EXPORT_SYMBOL(kmem_cache_shrink
);
2422 * krealloc - reallocate memory. The contents will remain unchanged.
2423 * @p: object to reallocate memory for.
2424 * @new_size: how many bytes of memory are required.
2425 * @flags: the type of memory to allocate.
2427 * The contents of the object pointed to are preserved up to the
2428 * lesser of the new and old sizes. If @p is %NULL, krealloc()
2429 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
2430 * %NULL pointer, the object pointed to is freed.
2432 void *krealloc(const void *p
, size_t new_size
, gfp_t flags
)
2437 if (unlikely(!p
|| p
== ZERO_SIZE_PTR
))
2438 return kmalloc(new_size
, flags
);
2440 if (unlikely(!new_size
)) {
2442 return ZERO_SIZE_PTR
;
2449 ret
= kmalloc(new_size
, flags
);
2451 memcpy(ret
, p
, min(new_size
, ks
));
2456 EXPORT_SYMBOL(krealloc
);
2458 /********************************************************************
2459 * Basic setup of slabs
2460 *******************************************************************/
2462 void __init
kmem_cache_init(void)
2469 * Must first have the slab cache available for the allocations of the
2470 * struct kmem_cache_node's. There is special bootstrap code in
2471 * kmem_cache_open for slab_state == DOWN.
2473 create_kmalloc_cache(&kmalloc_caches
[0], "kmem_cache_node",
2474 sizeof(struct kmem_cache_node
), GFP_KERNEL
);
2475 kmalloc_caches
[0].refcount
= -1;
2479 /* Able to allocate the per node structures */
2480 slab_state
= PARTIAL
;
2482 /* Caches that are not of the two-to-the-power-of size */
2483 if (KMALLOC_MIN_SIZE
<= 64) {
2484 create_kmalloc_cache(&kmalloc_caches
[1],
2485 "kmalloc-96", 96, GFP_KERNEL
);
2488 if (KMALLOC_MIN_SIZE
<= 128) {
2489 create_kmalloc_cache(&kmalloc_caches
[2],
2490 "kmalloc-192", 192, GFP_KERNEL
);
2494 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++) {
2495 create_kmalloc_cache(&kmalloc_caches
[i
],
2496 "kmalloc", 1 << i
, GFP_KERNEL
);
2502 /* Provide the correct kmalloc names now that the caches are up */
2503 for (i
= KMALLOC_SHIFT_LOW
; i
<= KMALLOC_SHIFT_HIGH
; i
++)
2504 kmalloc_caches
[i
]. name
=
2505 kasprintf(GFP_KERNEL
, "kmalloc-%d", 1 << i
);
2508 register_cpu_notifier(&slab_notifier
);
2511 kmem_size
= offsetof(struct kmem_cache
, cpu_slab
) +
2512 nr_cpu_ids
* sizeof(struct page
*);
2514 printk(KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2515 " CPUs=%d, Nodes=%d\n",
2516 caches
, cache_line_size(),
2517 slub_min_order
, slub_max_order
, slub_min_objects
,
2518 nr_cpu_ids
, nr_node_ids
);
2522 * Find a mergeable slab cache
2524 static int slab_unmergeable(struct kmem_cache
*s
)
2526 if (slub_nomerge
|| (s
->flags
& SLUB_NEVER_MERGE
))
2533 * We may have set a slab to be unmergeable during bootstrap.
2535 if (s
->refcount
< 0)
2541 static struct kmem_cache
*find_mergeable(size_t size
,
2542 size_t align
, unsigned long flags
,
2543 void (*ctor
)(void *, struct kmem_cache
*, unsigned long))
2545 struct list_head
*h
;
2547 if (slub_nomerge
|| (flags
& SLUB_NEVER_MERGE
))
2553 size
= ALIGN(size
, sizeof(void *));
2554 align
= calculate_alignment(flags
, align
, size
);
2555 size
= ALIGN(size
, align
);
2557 list_for_each(h
, &slab_caches
) {
2558 struct kmem_cache
*s
=
2559 container_of(h
, struct kmem_cache
, list
);
2561 if (slab_unmergeable(s
))
2567 if (((flags
| slub_debug
) & SLUB_MERGE_SAME
) !=
2568 (s
->flags
& SLUB_MERGE_SAME
))
2571 * Check if alignment is compatible.
2572 * Courtesy of Adrian Drzewiecki
2574 if ((s
->size
& ~(align
-1)) != s
->size
)
2577 if (s
->size
- size
>= sizeof(void *))
2585 struct kmem_cache
*kmem_cache_create(const char *name
, size_t size
,
2586 size_t align
, unsigned long flags
,
2587 void (*ctor
)(void *, struct kmem_cache
*, unsigned long),
2588 void (*dtor
)(void *, struct kmem_cache
*, unsigned long))
2590 struct kmem_cache
*s
;
2593 down_write(&slub_lock
);
2594 s
= find_mergeable(size
, align
, flags
, ctor
);
2598 * Adjust the object sizes so that we clear
2599 * the complete object on kzalloc.
2601 s
->objsize
= max(s
->objsize
, (int)size
);
2602 s
->inuse
= max_t(int, s
->inuse
, ALIGN(size
, sizeof(void *)));
2603 if (sysfs_slab_alias(s
, name
))
2606 s
= kmalloc(kmem_size
, GFP_KERNEL
);
2607 if (s
&& kmem_cache_open(s
, GFP_KERNEL
, name
,
2608 size
, align
, flags
, ctor
)) {
2609 if (sysfs_slab_add(s
)) {
2613 list_add(&s
->list
, &slab_caches
);
2617 up_write(&slub_lock
);
2621 up_write(&slub_lock
);
2622 if (flags
& SLAB_PANIC
)
2623 panic("Cannot create slabcache %s\n", name
);
2628 EXPORT_SYMBOL(kmem_cache_create
);
2630 void *kmem_cache_zalloc(struct kmem_cache
*s
, gfp_t flags
)
2634 x
= slab_alloc(s
, flags
, -1, __builtin_return_address(0));
2636 memset(x
, 0, s
->objsize
);
2639 EXPORT_SYMBOL(kmem_cache_zalloc
);
2642 static void for_all_slabs(void (*func
)(struct kmem_cache
*, int), int cpu
)
2644 struct list_head
*h
;
2646 down_read(&slub_lock
);
2647 list_for_each(h
, &slab_caches
) {
2648 struct kmem_cache
*s
=
2649 container_of(h
, struct kmem_cache
, list
);
2653 up_read(&slub_lock
);
2657 * Version of __flush_cpu_slab for the case that interrupts
2660 static void cpu_slab_flush(struct kmem_cache
*s
, int cpu
)
2662 unsigned long flags
;
2664 local_irq_save(flags
);
2665 __flush_cpu_slab(s
, cpu
);
2666 local_irq_restore(flags
);
2670 * Use the cpu notifier to insure that the cpu slabs are flushed when
2673 static int __cpuinit
slab_cpuup_callback(struct notifier_block
*nfb
,
2674 unsigned long action
, void *hcpu
)
2676 long cpu
= (long)hcpu
;
2679 case CPU_UP_CANCELED
:
2680 case CPU_UP_CANCELED_FROZEN
:
2682 case CPU_DEAD_FROZEN
:
2683 for_all_slabs(cpu_slab_flush
, cpu
);
2691 static struct notifier_block __cpuinitdata slab_notifier
=
2692 { &slab_cpuup_callback
, NULL
, 0 };
2696 void *__kmalloc_track_caller(size_t size
, gfp_t gfpflags
, void *caller
)
2698 struct kmem_cache
*s
= get_slab(size
, gfpflags
);
2701 return ZERO_SIZE_PTR
;
2703 return slab_alloc(s
, gfpflags
, -1, caller
);
2706 void *__kmalloc_node_track_caller(size_t size
, gfp_t gfpflags
,
2707 int node
, void *caller
)
2709 struct kmem_cache
*s
= get_slab(size
, gfpflags
);
2712 return ZERO_SIZE_PTR
;
2714 return slab_alloc(s
, gfpflags
, node
, caller
);
2717 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
2718 static int validate_slab(struct kmem_cache
*s
, struct page
*page
)
2721 void *addr
= page_address(page
);
2722 DECLARE_BITMAP(map
, s
->objects
);
2724 if (!check_slab(s
, page
) ||
2725 !on_freelist(s
, page
, NULL
))
2728 /* Now we know that a valid freelist exists */
2729 bitmap_zero(map
, s
->objects
);
2731 for_each_free_object(p
, s
, page
->freelist
) {
2732 set_bit(slab_index(p
, s
, addr
), map
);
2733 if (!check_object(s
, page
, p
, 0))
2737 for_each_object(p
, s
, addr
)
2738 if (!test_bit(slab_index(p
, s
, addr
), map
))
2739 if (!check_object(s
, page
, p
, 1))
2744 static void validate_slab_slab(struct kmem_cache
*s
, struct page
*page
)
2746 if (slab_trylock(page
)) {
2747 validate_slab(s
, page
);
2750 printk(KERN_INFO
"SLUB %s: Skipped busy slab 0x%p\n",
2753 if (s
->flags
& DEBUG_DEFAULT_FLAGS
) {
2754 if (!SlabDebug(page
))
2755 printk(KERN_ERR
"SLUB %s: SlabDebug not set "
2756 "on slab 0x%p\n", s
->name
, page
);
2758 if (SlabDebug(page
))
2759 printk(KERN_ERR
"SLUB %s: SlabDebug set on "
2760 "slab 0x%p\n", s
->name
, page
);
2764 static int validate_slab_node(struct kmem_cache
*s
, struct kmem_cache_node
*n
)
2766 unsigned long count
= 0;
2768 unsigned long flags
;
2770 spin_lock_irqsave(&n
->list_lock
, flags
);
2772 list_for_each_entry(page
, &n
->partial
, lru
) {
2773 validate_slab_slab(s
, page
);
2776 if (count
!= n
->nr_partial
)
2777 printk(KERN_ERR
"SLUB %s: %ld partial slabs counted but "
2778 "counter=%ld\n", s
->name
, count
, n
->nr_partial
);
2780 if (!(s
->flags
& SLAB_STORE_USER
))
2783 list_for_each_entry(page
, &n
->full
, lru
) {
2784 validate_slab_slab(s
, page
);
2787 if (count
!= atomic_long_read(&n
->nr_slabs
))
2788 printk(KERN_ERR
"SLUB: %s %ld slabs counted but "
2789 "counter=%ld\n", s
->name
, count
,
2790 atomic_long_read(&n
->nr_slabs
));
2793 spin_unlock_irqrestore(&n
->list_lock
, flags
);
2797 static unsigned long validate_slab_cache(struct kmem_cache
*s
)
2800 unsigned long count
= 0;
2803 for_each_online_node(node
) {
2804 struct kmem_cache_node
*n
= get_node(s
, node
);
2806 count
+= validate_slab_node(s
, n
);
2811 #ifdef SLUB_RESILIENCY_TEST
2812 static void resiliency_test(void)
2816 printk(KERN_ERR
"SLUB resiliency testing\n");
2817 printk(KERN_ERR
"-----------------------\n");
2818 printk(KERN_ERR
"A. Corruption after allocation\n");
2820 p
= kzalloc(16, GFP_KERNEL
);
2822 printk(KERN_ERR
"\n1. kmalloc-16: Clobber Redzone/next pointer"
2823 " 0x12->0x%p\n\n", p
+ 16);
2825 validate_slab_cache(kmalloc_caches
+ 4);
2827 /* Hmmm... The next two are dangerous */
2828 p
= kzalloc(32, GFP_KERNEL
);
2829 p
[32 + sizeof(void *)] = 0x34;
2830 printk(KERN_ERR
"\n2. kmalloc-32: Clobber next pointer/next slab"
2831 " 0x34 -> -0x%p\n", p
);
2832 printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
2834 validate_slab_cache(kmalloc_caches
+ 5);
2835 p
= kzalloc(64, GFP_KERNEL
);
2836 p
+= 64 + (get_cycles() & 0xff) * sizeof(void *);
2838 printk(KERN_ERR
"\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
2840 printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
2841 validate_slab_cache(kmalloc_caches
+ 6);
2843 printk(KERN_ERR
"\nB. Corruption after free\n");
2844 p
= kzalloc(128, GFP_KERNEL
);
2847 printk(KERN_ERR
"1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p
);
2848 validate_slab_cache(kmalloc_caches
+ 7);
2850 p
= kzalloc(256, GFP_KERNEL
);
2853 printk(KERN_ERR
"\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p
);
2854 validate_slab_cache(kmalloc_caches
+ 8);
2856 p
= kzalloc(512, GFP_KERNEL
);
2859 printk(KERN_ERR
"\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p
);
2860 validate_slab_cache(kmalloc_caches
+ 9);
2863 static void resiliency_test(void) {};
2867 * Generate lists of code addresses where slabcache objects are allocated
2872 unsigned long count
;
2885 unsigned long count
;
2886 struct location
*loc
;
2889 static void free_loc_track(struct loc_track
*t
)
2892 free_pages((unsigned long)t
->loc
,
2893 get_order(sizeof(struct location
) * t
->max
));
2896 static int alloc_loc_track(struct loc_track
*t
, unsigned long max
)
2902 max
= PAGE_SIZE
/ sizeof(struct location
);
2904 order
= get_order(sizeof(struct location
) * max
);
2906 l
= (void *)__get_free_pages(GFP_ATOMIC
, order
);
2912 memcpy(l
, t
->loc
, sizeof(struct location
) * t
->count
);
2920 static int add_location(struct loc_track
*t
, struct kmem_cache
*s
,
2921 const struct track
*track
)
2923 long start
, end
, pos
;
2926 unsigned long age
= jiffies
- track
->when
;
2932 pos
= start
+ (end
- start
+ 1) / 2;
2935 * There is nothing at "end". If we end up there
2936 * we need to add something to before end.
2941 caddr
= t
->loc
[pos
].addr
;
2942 if (track
->addr
== caddr
) {
2948 if (age
< l
->min_time
)
2950 if (age
> l
->max_time
)
2953 if (track
->pid
< l
->min_pid
)
2954 l
->min_pid
= track
->pid
;
2955 if (track
->pid
> l
->max_pid
)
2956 l
->max_pid
= track
->pid
;
2958 cpu_set(track
->cpu
, l
->cpus
);
2960 node_set(page_to_nid(virt_to_page(track
)), l
->nodes
);
2964 if (track
->addr
< caddr
)
2971 * Not found. Insert new tracking element.
2973 if (t
->count
>= t
->max
&& !alloc_loc_track(t
, 2 * t
->max
))
2979 (t
->count
- pos
) * sizeof(struct location
));
2982 l
->addr
= track
->addr
;
2986 l
->min_pid
= track
->pid
;
2987 l
->max_pid
= track
->pid
;
2988 cpus_clear(l
->cpus
);
2989 cpu_set(track
->cpu
, l
->cpus
);
2990 nodes_clear(l
->nodes
);
2991 node_set(page_to_nid(virt_to_page(track
)), l
->nodes
);
2995 static void process_slab(struct loc_track
*t
, struct kmem_cache
*s
,
2996 struct page
*page
, enum track_item alloc
)
2998 void *addr
= page_address(page
);
2999 DECLARE_BITMAP(map
, s
->objects
);
3002 bitmap_zero(map
, s
->objects
);
3003 for_each_free_object(p
, s
, page
->freelist
)
3004 set_bit(slab_index(p
, s
, addr
), map
);
3006 for_each_object(p
, s
, addr
)
3007 if (!test_bit(slab_index(p
, s
, addr
), map
))
3008 add_location(t
, s
, get_track(s
, p
, alloc
));
3011 static int list_locations(struct kmem_cache
*s
, char *buf
,
3012 enum track_item alloc
)
3022 /* Push back cpu slabs */
3025 for_each_online_node(node
) {
3026 struct kmem_cache_node
*n
= get_node(s
, node
);
3027 unsigned long flags
;
3030 if (!atomic_read(&n
->nr_slabs
))
3033 spin_lock_irqsave(&n
->list_lock
, flags
);
3034 list_for_each_entry(page
, &n
->partial
, lru
)
3035 process_slab(&t
, s
, page
, alloc
);
3036 list_for_each_entry(page
, &n
->full
, lru
)
3037 process_slab(&t
, s
, page
, alloc
);
3038 spin_unlock_irqrestore(&n
->list_lock
, flags
);
3041 for (i
= 0; i
< t
.count
; i
++) {
3042 struct location
*l
= &t
.loc
[i
];
3044 if (n
> PAGE_SIZE
- 100)
3046 n
+= sprintf(buf
+ n
, "%7ld ", l
->count
);
3049 n
+= sprint_symbol(buf
+ n
, (unsigned long)l
->addr
);
3051 n
+= sprintf(buf
+ n
, "<not-available>");
3053 if (l
->sum_time
!= l
->min_time
) {
3054 unsigned long remainder
;
3056 n
+= sprintf(buf
+ n
, " age=%ld/%ld/%ld",
3058 div_long_long_rem(l
->sum_time
, l
->count
, &remainder
),
3061 n
+= sprintf(buf
+ n
, " age=%ld",
3064 if (l
->min_pid
!= l
->max_pid
)
3065 n
+= sprintf(buf
+ n
, " pid=%ld-%ld",
3066 l
->min_pid
, l
->max_pid
);
3068 n
+= sprintf(buf
+ n
, " pid=%ld",
3071 if (num_online_cpus() > 1 && !cpus_empty(l
->cpus
) &&
3072 n
< PAGE_SIZE
- 60) {
3073 n
+= sprintf(buf
+ n
, " cpus=");
3074 n
+= cpulist_scnprintf(buf
+ n
, PAGE_SIZE
- n
- 50,
3078 if (num_online_nodes() > 1 && !nodes_empty(l
->nodes
) &&
3079 n
< PAGE_SIZE
- 60) {
3080 n
+= sprintf(buf
+ n
, " nodes=");
3081 n
+= nodelist_scnprintf(buf
+ n
, PAGE_SIZE
- n
- 50,
3085 n
+= sprintf(buf
+ n
, "\n");
3090 n
+= sprintf(buf
, "No data\n");
3094 static unsigned long count_partial(struct kmem_cache_node
*n
)
3096 unsigned long flags
;
3097 unsigned long x
= 0;
3100 spin_lock_irqsave(&n
->list_lock
, flags
);
3101 list_for_each_entry(page
, &n
->partial
, lru
)
3103 spin_unlock_irqrestore(&n
->list_lock
, flags
);
3107 enum slab_stat_type
{
3114 #define SO_FULL (1 << SL_FULL)
3115 #define SO_PARTIAL (1 << SL_PARTIAL)
3116 #define SO_CPU (1 << SL_CPU)
3117 #define SO_OBJECTS (1 << SL_OBJECTS)
3119 static unsigned long slab_objects(struct kmem_cache
*s
,
3120 char *buf
, unsigned long flags
)
3122 unsigned long total
= 0;
3126 unsigned long *nodes
;
3127 unsigned long *per_cpu
;
3129 nodes
= kzalloc(2 * sizeof(unsigned long) * nr_node_ids
, GFP_KERNEL
);
3130 per_cpu
= nodes
+ nr_node_ids
;
3132 for_each_possible_cpu(cpu
) {
3133 struct page
*page
= s
->cpu_slab
[cpu
];
3137 node
= page_to_nid(page
);
3138 if (flags
& SO_CPU
) {
3141 if (flags
& SO_OBJECTS
)
3152 for_each_online_node(node
) {
3153 struct kmem_cache_node
*n
= get_node(s
, node
);
3155 if (flags
& SO_PARTIAL
) {
3156 if (flags
& SO_OBJECTS
)
3157 x
= count_partial(n
);
3164 if (flags
& SO_FULL
) {
3165 int full_slabs
= atomic_read(&n
->nr_slabs
)
3169 if (flags
& SO_OBJECTS
)
3170 x
= full_slabs
* s
->objects
;
3178 x
= sprintf(buf
, "%lu", total
);
3180 for_each_online_node(node
)
3182 x
+= sprintf(buf
+ x
, " N%d=%lu",
3186 return x
+ sprintf(buf
+ x
, "\n");
3189 static int any_slab_objects(struct kmem_cache
*s
)
3194 for_each_possible_cpu(cpu
)
3195 if (s
->cpu_slab
[cpu
])
3198 for_each_node(node
) {
3199 struct kmem_cache_node
*n
= get_node(s
, node
);
3201 if (n
->nr_partial
|| atomic_read(&n
->nr_slabs
))
3207 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3208 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3210 struct slab_attribute
{
3211 struct attribute attr
;
3212 ssize_t (*show
)(struct kmem_cache
*s
, char *buf
);
3213 ssize_t (*store
)(struct kmem_cache
*s
, const char *x
, size_t count
);
3216 #define SLAB_ATTR_RO(_name) \
3217 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3219 #define SLAB_ATTR(_name) \
3220 static struct slab_attribute _name##_attr = \
3221 __ATTR(_name, 0644, _name##_show, _name##_store)
3223 static ssize_t
slab_size_show(struct kmem_cache
*s
, char *buf
)
3225 return sprintf(buf
, "%d\n", s
->size
);
3227 SLAB_ATTR_RO(slab_size
);
3229 static ssize_t
align_show(struct kmem_cache
*s
, char *buf
)
3231 return sprintf(buf
, "%d\n", s
->align
);
3233 SLAB_ATTR_RO(align
);
3235 static ssize_t
object_size_show(struct kmem_cache
*s
, char *buf
)
3237 return sprintf(buf
, "%d\n", s
->objsize
);
3239 SLAB_ATTR_RO(object_size
);
3241 static ssize_t
objs_per_slab_show(struct kmem_cache
*s
, char *buf
)
3243 return sprintf(buf
, "%d\n", s
->objects
);
3245 SLAB_ATTR_RO(objs_per_slab
);
3247 static ssize_t
order_show(struct kmem_cache
*s
, char *buf
)
3249 return sprintf(buf
, "%d\n", s
->order
);
3251 SLAB_ATTR_RO(order
);
3253 static ssize_t
ctor_show(struct kmem_cache
*s
, char *buf
)
3256 int n
= sprint_symbol(buf
, (unsigned long)s
->ctor
);
3258 return n
+ sprintf(buf
+ n
, "\n");
3264 static ssize_t
aliases_show(struct kmem_cache
*s
, char *buf
)
3266 return sprintf(buf
, "%d\n", s
->refcount
- 1);
3268 SLAB_ATTR_RO(aliases
);
3270 static ssize_t
slabs_show(struct kmem_cache
*s
, char *buf
)
3272 return slab_objects(s
, buf
, SO_FULL
|SO_PARTIAL
|SO_CPU
);
3274 SLAB_ATTR_RO(slabs
);
3276 static ssize_t
partial_show(struct kmem_cache
*s
, char *buf
)
3278 return slab_objects(s
, buf
, SO_PARTIAL
);
3280 SLAB_ATTR_RO(partial
);
3282 static ssize_t
cpu_slabs_show(struct kmem_cache
*s
, char *buf
)
3284 return slab_objects(s
, buf
, SO_CPU
);
3286 SLAB_ATTR_RO(cpu_slabs
);
3288 static ssize_t
objects_show(struct kmem_cache
*s
, char *buf
)
3290 return slab_objects(s
, buf
, SO_FULL
|SO_PARTIAL
|SO_CPU
|SO_OBJECTS
);
3292 SLAB_ATTR_RO(objects
);
3294 static ssize_t
sanity_checks_show(struct kmem_cache
*s
, char *buf
)
3296 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_DEBUG_FREE
));
3299 static ssize_t
sanity_checks_store(struct kmem_cache
*s
,
3300 const char *buf
, size_t length
)
3302 s
->flags
&= ~SLAB_DEBUG_FREE
;
3304 s
->flags
|= SLAB_DEBUG_FREE
;
3307 SLAB_ATTR(sanity_checks
);
3309 static ssize_t
trace_show(struct kmem_cache
*s
, char *buf
)
3311 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_TRACE
));
3314 static ssize_t
trace_store(struct kmem_cache
*s
, const char *buf
,
3317 s
->flags
&= ~SLAB_TRACE
;
3319 s
->flags
|= SLAB_TRACE
;
3324 static ssize_t
reclaim_account_show(struct kmem_cache
*s
, char *buf
)
3326 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_RECLAIM_ACCOUNT
));
3329 static ssize_t
reclaim_account_store(struct kmem_cache
*s
,
3330 const char *buf
, size_t length
)
3332 s
->flags
&= ~SLAB_RECLAIM_ACCOUNT
;
3334 s
->flags
|= SLAB_RECLAIM_ACCOUNT
;
3337 SLAB_ATTR(reclaim_account
);
3339 static ssize_t
hwcache_align_show(struct kmem_cache
*s
, char *buf
)
3341 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_HWCACHE_ALIGN
));
3343 SLAB_ATTR_RO(hwcache_align
);
3345 #ifdef CONFIG_ZONE_DMA
3346 static ssize_t
cache_dma_show(struct kmem_cache
*s
, char *buf
)
3348 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_CACHE_DMA
));
3350 SLAB_ATTR_RO(cache_dma
);
3353 static ssize_t
destroy_by_rcu_show(struct kmem_cache
*s
, char *buf
)
3355 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_DESTROY_BY_RCU
));
3357 SLAB_ATTR_RO(destroy_by_rcu
);
3359 static ssize_t
red_zone_show(struct kmem_cache
*s
, char *buf
)
3361 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_RED_ZONE
));
3364 static ssize_t
red_zone_store(struct kmem_cache
*s
,
3365 const char *buf
, size_t length
)
3367 if (any_slab_objects(s
))
3370 s
->flags
&= ~SLAB_RED_ZONE
;
3372 s
->flags
|= SLAB_RED_ZONE
;
3376 SLAB_ATTR(red_zone
);
3378 static ssize_t
poison_show(struct kmem_cache
*s
, char *buf
)
3380 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_POISON
));
3383 static ssize_t
poison_store(struct kmem_cache
*s
,
3384 const char *buf
, size_t length
)
3386 if (any_slab_objects(s
))
3389 s
->flags
&= ~SLAB_POISON
;
3391 s
->flags
|= SLAB_POISON
;
3397 static ssize_t
store_user_show(struct kmem_cache
*s
, char *buf
)
3399 return sprintf(buf
, "%d\n", !!(s
->flags
& SLAB_STORE_USER
));
3402 static ssize_t
store_user_store(struct kmem_cache
*s
,
3403 const char *buf
, size_t length
)
3405 if (any_slab_objects(s
))
3408 s
->flags
&= ~SLAB_STORE_USER
;
3410 s
->flags
|= SLAB_STORE_USER
;
3414 SLAB_ATTR(store_user
);
3416 static ssize_t
validate_show(struct kmem_cache
*s
, char *buf
)
3421 static ssize_t
validate_store(struct kmem_cache
*s
,
3422 const char *buf
, size_t length
)
3425 validate_slab_cache(s
);
3430 SLAB_ATTR(validate
);
3432 static ssize_t
shrink_show(struct kmem_cache
*s
, char *buf
)
3437 static ssize_t
shrink_store(struct kmem_cache
*s
,
3438 const char *buf
, size_t length
)
3440 if (buf
[0] == '1') {
3441 int rc
= kmem_cache_shrink(s
);
3451 static ssize_t
alloc_calls_show(struct kmem_cache
*s
, char *buf
)
3453 if (!(s
->flags
& SLAB_STORE_USER
))
3455 return list_locations(s
, buf
, TRACK_ALLOC
);
3457 SLAB_ATTR_RO(alloc_calls
);
3459 static ssize_t
free_calls_show(struct kmem_cache
*s
, char *buf
)
3461 if (!(s
->flags
& SLAB_STORE_USER
))
3463 return list_locations(s
, buf
, TRACK_FREE
);
3465 SLAB_ATTR_RO(free_calls
);
3468 static ssize_t
defrag_ratio_show(struct kmem_cache
*s
, char *buf
)
3470 return sprintf(buf
, "%d\n", s
->defrag_ratio
/ 10);
3473 static ssize_t
defrag_ratio_store(struct kmem_cache
*s
,
3474 const char *buf
, size_t length
)
3476 int n
= simple_strtoul(buf
, NULL
, 10);
3479 s
->defrag_ratio
= n
* 10;
3482 SLAB_ATTR(defrag_ratio
);
3485 static struct attribute
* slab_attrs
[] = {
3486 &slab_size_attr
.attr
,
3487 &object_size_attr
.attr
,
3488 &objs_per_slab_attr
.attr
,
3493 &cpu_slabs_attr
.attr
,
3497 &sanity_checks_attr
.attr
,
3499 &hwcache_align_attr
.attr
,
3500 &reclaim_account_attr
.attr
,
3501 &destroy_by_rcu_attr
.attr
,
3502 &red_zone_attr
.attr
,
3504 &store_user_attr
.attr
,
3505 &validate_attr
.attr
,
3507 &alloc_calls_attr
.attr
,
3508 &free_calls_attr
.attr
,
3509 #ifdef CONFIG_ZONE_DMA
3510 &cache_dma_attr
.attr
,
3513 &defrag_ratio_attr
.attr
,
3518 static struct attribute_group slab_attr_group
= {
3519 .attrs
= slab_attrs
,
3522 static ssize_t
slab_attr_show(struct kobject
*kobj
,
3523 struct attribute
*attr
,
3526 struct slab_attribute
*attribute
;
3527 struct kmem_cache
*s
;
3530 attribute
= to_slab_attr(attr
);
3533 if (!attribute
->show
)
3536 err
= attribute
->show(s
, buf
);
3541 static ssize_t
slab_attr_store(struct kobject
*kobj
,
3542 struct attribute
*attr
,
3543 const char *buf
, size_t len
)
3545 struct slab_attribute
*attribute
;
3546 struct kmem_cache
*s
;
3549 attribute
= to_slab_attr(attr
);
3552 if (!attribute
->store
)
3555 err
= attribute
->store(s
, buf
, len
);
3560 static struct sysfs_ops slab_sysfs_ops
= {
3561 .show
= slab_attr_show
,
3562 .store
= slab_attr_store
,
3565 static struct kobj_type slab_ktype
= {
3566 .sysfs_ops
= &slab_sysfs_ops
,
3569 static int uevent_filter(struct kset
*kset
, struct kobject
*kobj
)
3571 struct kobj_type
*ktype
= get_ktype(kobj
);
3573 if (ktype
== &slab_ktype
)
3578 static struct kset_uevent_ops slab_uevent_ops
= {
3579 .filter
= uevent_filter
,
3582 decl_subsys(slab
, &slab_ktype
, &slab_uevent_ops
);
3584 #define ID_STR_LENGTH 64
3586 /* Create a unique string id for a slab cache:
3588 * :[flags-]size:[memory address of kmemcache]
3590 static char *create_unique_id(struct kmem_cache
*s
)
3592 char *name
= kmalloc(ID_STR_LENGTH
, GFP_KERNEL
);
3599 * First flags affecting slabcache operations. We will only
3600 * get here for aliasable slabs so we do not need to support
3601 * too many flags. The flags here must cover all flags that
3602 * are matched during merging to guarantee that the id is
3605 if (s
->flags
& SLAB_CACHE_DMA
)
3607 if (s
->flags
& SLAB_RECLAIM_ACCOUNT
)
3609 if (s
->flags
& SLAB_DEBUG_FREE
)
3613 p
+= sprintf(p
, "%07d", s
->size
);
3614 BUG_ON(p
> name
+ ID_STR_LENGTH
- 1);
3618 static int sysfs_slab_add(struct kmem_cache
*s
)
3624 if (slab_state
< SYSFS
)
3625 /* Defer until later */
3628 unmergeable
= slab_unmergeable(s
);
3631 * Slabcache can never be merged so we can use the name proper.
3632 * This is typically the case for debug situations. In that
3633 * case we can catch duplicate names easily.
3635 sysfs_remove_link(&slab_subsys
.kobj
, s
->name
);
3639 * Create a unique name for the slab as a target
3642 name
= create_unique_id(s
);
3645 kobj_set_kset_s(s
, slab_subsys
);
3646 kobject_set_name(&s
->kobj
, name
);
3647 kobject_init(&s
->kobj
);
3648 err
= kobject_add(&s
->kobj
);
3652 err
= sysfs_create_group(&s
->kobj
, &slab_attr_group
);
3655 kobject_uevent(&s
->kobj
, KOBJ_ADD
);
3657 /* Setup first alias */
3658 sysfs_slab_alias(s
, s
->name
);
3664 static void sysfs_slab_remove(struct kmem_cache
*s
)
3666 kobject_uevent(&s
->kobj
, KOBJ_REMOVE
);
3667 kobject_del(&s
->kobj
);
3671 * Need to buffer aliases during bootup until sysfs becomes
3672 * available lest we loose that information.
3674 struct saved_alias
{
3675 struct kmem_cache
*s
;
3677 struct saved_alias
*next
;
3680 struct saved_alias
*alias_list
;
3682 static int sysfs_slab_alias(struct kmem_cache
*s
, const char *name
)
3684 struct saved_alias
*al
;
3686 if (slab_state
== SYSFS
) {
3688 * If we have a leftover link then remove it.
3690 sysfs_remove_link(&slab_subsys
.kobj
, name
);
3691 return sysfs_create_link(&slab_subsys
.kobj
,
3695 al
= kmalloc(sizeof(struct saved_alias
), GFP_KERNEL
);
3701 al
->next
= alias_list
;
3706 static int __init
slab_sysfs_init(void)
3708 struct list_head
*h
;
3711 err
= subsystem_register(&slab_subsys
);
3713 printk(KERN_ERR
"Cannot register slab subsystem.\n");
3719 list_for_each(h
, &slab_caches
) {
3720 struct kmem_cache
*s
=
3721 container_of(h
, struct kmem_cache
, list
);
3723 err
= sysfs_slab_add(s
);
3727 while (alias_list
) {
3728 struct saved_alias
*al
= alias_list
;
3730 alias_list
= alias_list
->next
;
3731 err
= sysfs_slab_alias(al
->s
, al
->name
);
3740 __initcall(slab_sysfs_init
);