Slab allocators: Drop support for destructors
[usb.git] / mm / slub.c
blob022c1b4d74d4dfbf23350eeedaeacdffe95d33bb
1 /*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9 */
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/bit_spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/slab.h>
17 #include <linux/seq_file.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuset.h>
20 #include <linux/mempolicy.h>
21 #include <linux/ctype.h>
22 #include <linux/kallsyms.h>
25 * Lock order:
26 * 1. slab_lock(page)
27 * 2. slab->list_lock
29 * The slab_lock protects operations on the object of a particular
30 * slab and its metadata in the page struct. If the slab lock
31 * has been taken then no allocations nor frees can be performed
32 * on the objects in the slab nor can the slab be added or removed
33 * from the partial or full lists since this would mean modifying
34 * the page_struct of the slab.
36 * The list_lock protects the partial and full list on each node and
37 * the partial slab counter. If taken then no new slabs may be added or
38 * removed from the lists nor make the number of partial slabs be modified.
39 * (Note that the total number of slabs is an atomic value that may be
40 * modified without taking the list lock).
42 * The list_lock is a centralized lock and thus we avoid taking it as
43 * much as possible. As long as SLUB does not have to handle partial
44 * slabs, operations can continue without any centralized lock. F.e.
45 * allocating a long series of objects that fill up slabs does not require
46 * the list lock.
48 * The lock order is sometimes inverted when we are trying to get a slab
49 * off a list. We take the list_lock and then look for a page on the list
50 * to use. While we do that objects in the slabs may be freed. We can
51 * only operate on the slab if we have also taken the slab_lock. So we use
52 * a slab_trylock() on the slab. If trylock was successful then no frees
53 * can occur anymore and we can use the slab for allocations etc. If the
54 * slab_trylock() does not succeed then frees are in progress in the slab and
55 * we must stay away from it for a while since we may cause a bouncing
56 * cacheline if we try to acquire the lock. So go onto the next slab.
57 * If all pages are busy then we may allocate a new slab instead of reusing
58 * a partial slab. A new slab has noone operating on it and thus there is
59 * no danger of cacheline contention.
61 * Interrupts are disabled during allocation and deallocation in order to
62 * make the slab allocator safe to use in the context of an irq. In addition
63 * interrupts are disabled to ensure that the processor does not change
64 * while handling per_cpu slabs, due to kernel preemption.
66 * SLUB assigns one slab for allocation to each processor.
67 * Allocations only occur from these slabs called cpu slabs.
69 * Slabs with free elements are kept on a partial list and during regular
70 * operations no list for full slabs is used. If an object in a full slab is
71 * freed then the slab will show up again on the partial lists.
72 * We track full slabs for debugging purposes though because otherwise we
73 * cannot scan all objects.
75 * Slabs are freed when they become empty. Teardown and setup is
76 * minimal so we rely on the page allocators per cpu caches for
77 * fast frees and allocs.
79 * Overloading of page flags that are otherwise used for LRU management.
81 * PageActive The slab is used as a cpu cache. Allocations
82 * may be performed from the slab. The slab is not
83 * on any slab list and cannot be moved onto one.
84 * The cpu slab may be equipped with an additioanl
85 * lockless_freelist that allows lockless access to
86 * free objects in addition to the regular freelist
87 * that requires the slab lock.
89 * PageError Slab requires special handling due to debug
90 * options set. This moves slab handling out of
91 * the fast path and disables lockless freelists.
94 static inline int SlabDebug(struct page *page)
96 #ifdef CONFIG_SLUB_DEBUG
97 return PageError(page);
98 #else
99 return 0;
100 #endif
103 static inline void SetSlabDebug(struct page *page)
105 #ifdef CONFIG_SLUB_DEBUG
106 SetPageError(page);
107 #endif
110 static inline void ClearSlabDebug(struct page *page)
112 #ifdef CONFIG_SLUB_DEBUG
113 ClearPageError(page);
114 #endif
118 * Issues still to be resolved:
120 * - The per cpu array is updated for each new slab and and is a remote
121 * cacheline for most nodes. This could become a bouncing cacheline given
122 * enough frequent updates. There are 16 pointers in a cacheline, so at
123 * max 16 cpus could compete for the cacheline which may be okay.
125 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
127 * - Variable sizing of the per node arrays
130 /* Enable to test recovery from slab corruption on boot */
131 #undef SLUB_RESILIENCY_TEST
133 #if PAGE_SHIFT <= 12
136 * Small page size. Make sure that we do not fragment memory
138 #define DEFAULT_MAX_ORDER 1
139 #define DEFAULT_MIN_OBJECTS 4
141 #else
144 * Large page machines are customarily able to handle larger
145 * page orders.
147 #define DEFAULT_MAX_ORDER 2
148 #define DEFAULT_MIN_OBJECTS 8
150 #endif
153 * Mininum number of partial slabs. These will be left on the partial
154 * lists even if they are empty. kmem_cache_shrink may reclaim them.
156 #define MIN_PARTIAL 2
159 * Maximum number of desirable partial slabs.
160 * The existence of more partial slabs makes kmem_cache_shrink
161 * sort the partial list by the number of objects in the.
163 #define MAX_PARTIAL 10
165 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
166 SLAB_POISON | SLAB_STORE_USER)
169 * Set of flags that will prevent slab merging
171 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
172 SLAB_TRACE | SLAB_DESTROY_BY_RCU)
174 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
175 SLAB_CACHE_DMA)
177 #ifndef ARCH_KMALLOC_MINALIGN
178 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
179 #endif
181 #ifndef ARCH_SLAB_MINALIGN
182 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
183 #endif
185 /* Internal SLUB flags */
186 #define __OBJECT_POISON 0x80000000 /* Poison object */
188 /* Not all arches define cache_line_size */
189 #ifndef cache_line_size
190 #define cache_line_size() L1_CACHE_BYTES
191 #endif
193 static int kmem_size = sizeof(struct kmem_cache);
195 #ifdef CONFIG_SMP
196 static struct notifier_block slab_notifier;
197 #endif
199 static enum {
200 DOWN, /* No slab functionality available */
201 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
202 UP, /* Everything works but does not show up in sysfs */
203 SYSFS /* Sysfs up */
204 } slab_state = DOWN;
206 /* A list of all slab caches on the system */
207 static DECLARE_RWSEM(slub_lock);
208 LIST_HEAD(slab_caches);
211 * Tracking user of a slab.
213 struct track {
214 void *addr; /* Called from address */
215 int cpu; /* Was running on cpu */
216 int pid; /* Pid context */
217 unsigned long when; /* When did the operation occur */
220 enum track_item { TRACK_ALLOC, TRACK_FREE };
222 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
223 static int sysfs_slab_add(struct kmem_cache *);
224 static int sysfs_slab_alias(struct kmem_cache *, const char *);
225 static void sysfs_slab_remove(struct kmem_cache *);
226 #else
227 static int sysfs_slab_add(struct kmem_cache *s) { return 0; }
228 static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
229 static void sysfs_slab_remove(struct kmem_cache *s) {}
230 #endif
232 /********************************************************************
233 * Core slab cache functions
234 *******************************************************************/
236 int slab_is_available(void)
238 return slab_state >= UP;
241 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
243 #ifdef CONFIG_NUMA
244 return s->node[node];
245 #else
246 return &s->local_node;
247 #endif
250 static inline int check_valid_pointer(struct kmem_cache *s,
251 struct page *page, const void *object)
253 void *base;
255 if (!object)
256 return 1;
258 base = page_address(page);
259 if (object < base || object >= base + s->objects * s->size ||
260 (object - base) % s->size) {
261 return 0;
264 return 1;
268 * Slow version of get and set free pointer.
270 * This version requires touching the cache lines of kmem_cache which
271 * we avoid to do in the fast alloc free paths. There we obtain the offset
272 * from the page struct.
274 static inline void *get_freepointer(struct kmem_cache *s, void *object)
276 return *(void **)(object + s->offset);
279 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
281 *(void **)(object + s->offset) = fp;
284 /* Loop over all objects in a slab */
285 #define for_each_object(__p, __s, __addr) \
286 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
287 __p += (__s)->size)
289 /* Scan freelist */
290 #define for_each_free_object(__p, __s, __free) \
291 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
293 /* Determine object index from a given position */
294 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
296 return (p - addr) / s->size;
299 #ifdef CONFIG_SLUB_DEBUG
301 * Debug settings:
303 static int slub_debug;
305 static char *slub_debug_slabs;
308 * Object debugging
310 static void print_section(char *text, u8 *addr, unsigned int length)
312 int i, offset;
313 int newline = 1;
314 char ascii[17];
316 ascii[16] = 0;
318 for (i = 0; i < length; i++) {
319 if (newline) {
320 printk(KERN_ERR "%10s 0x%p: ", text, addr + i);
321 newline = 0;
323 printk(" %02x", addr[i]);
324 offset = i % 16;
325 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
326 if (offset == 15) {
327 printk(" %s\n",ascii);
328 newline = 1;
331 if (!newline) {
332 i %= 16;
333 while (i < 16) {
334 printk(" ");
335 ascii[i] = ' ';
336 i++;
338 printk(" %s\n", ascii);
342 static struct track *get_track(struct kmem_cache *s, void *object,
343 enum track_item alloc)
345 struct track *p;
347 if (s->offset)
348 p = object + s->offset + sizeof(void *);
349 else
350 p = object + s->inuse;
352 return p + alloc;
355 static void set_track(struct kmem_cache *s, void *object,
356 enum track_item alloc, void *addr)
358 struct track *p;
360 if (s->offset)
361 p = object + s->offset + sizeof(void *);
362 else
363 p = object + s->inuse;
365 p += alloc;
366 if (addr) {
367 p->addr = addr;
368 p->cpu = smp_processor_id();
369 p->pid = current ? current->pid : -1;
370 p->when = jiffies;
371 } else
372 memset(p, 0, sizeof(struct track));
375 static void init_tracking(struct kmem_cache *s, void *object)
377 if (s->flags & SLAB_STORE_USER) {
378 set_track(s, object, TRACK_FREE, NULL);
379 set_track(s, object, TRACK_ALLOC, NULL);
383 static void print_track(const char *s, struct track *t)
385 if (!t->addr)
386 return;
388 printk(KERN_ERR "%s: ", s);
389 __print_symbol("%s", (unsigned long)t->addr);
390 printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
393 static void print_trailer(struct kmem_cache *s, u8 *p)
395 unsigned int off; /* Offset of last byte */
397 if (s->flags & SLAB_RED_ZONE)
398 print_section("Redzone", p + s->objsize,
399 s->inuse - s->objsize);
401 printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
402 p + s->offset,
403 get_freepointer(s, p));
405 if (s->offset)
406 off = s->offset + sizeof(void *);
407 else
408 off = s->inuse;
410 if (s->flags & SLAB_STORE_USER) {
411 print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
412 print_track("Last free ", get_track(s, p, TRACK_FREE));
413 off += 2 * sizeof(struct track);
416 if (off != s->size)
417 /* Beginning of the filler is the free pointer */
418 print_section("Filler", p + off, s->size - off);
421 static void object_err(struct kmem_cache *s, struct page *page,
422 u8 *object, char *reason)
424 u8 *addr = page_address(page);
426 printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
427 s->name, reason, object, page);
428 printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
429 object - addr, page->flags, page->inuse, page->freelist);
430 if (object > addr + 16)
431 print_section("Bytes b4", object - 16, 16);
432 print_section("Object", object, min(s->objsize, 128));
433 print_trailer(s, object);
434 dump_stack();
437 static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...)
439 va_list args;
440 char buf[100];
442 va_start(args, reason);
443 vsnprintf(buf, sizeof(buf), reason, args);
444 va_end(args);
445 printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf,
446 page);
447 dump_stack();
450 static void init_object(struct kmem_cache *s, void *object, int active)
452 u8 *p = object;
454 if (s->flags & __OBJECT_POISON) {
455 memset(p, POISON_FREE, s->objsize - 1);
456 p[s->objsize -1] = POISON_END;
459 if (s->flags & SLAB_RED_ZONE)
460 memset(p + s->objsize,
461 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
462 s->inuse - s->objsize);
465 static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
467 while (bytes) {
468 if (*start != (u8)value)
469 return 0;
470 start++;
471 bytes--;
473 return 1;
477 * Object layout:
479 * object address
480 * Bytes of the object to be managed.
481 * If the freepointer may overlay the object then the free
482 * pointer is the first word of the object.
484 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
485 * 0xa5 (POISON_END)
487 * object + s->objsize
488 * Padding to reach word boundary. This is also used for Redzoning.
489 * Padding is extended by another word if Redzoning is enabled and
490 * objsize == inuse.
492 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
493 * 0xcc (RED_ACTIVE) for objects in use.
495 * object + s->inuse
496 * Meta data starts here.
498 * A. Free pointer (if we cannot overwrite object on free)
499 * B. Tracking data for SLAB_STORE_USER
500 * C. Padding to reach required alignment boundary or at mininum
501 * one word if debuggin is on to be able to detect writes
502 * before the word boundary.
504 * Padding is done using 0x5a (POISON_INUSE)
506 * object + s->size
507 * Nothing is used beyond s->size.
509 * If slabcaches are merged then the objsize and inuse boundaries are mostly
510 * ignored. And therefore no slab options that rely on these boundaries
511 * may be used with merged slabcaches.
514 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
515 void *from, void *to)
517 printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
518 s->name, message, data, from, to - 1);
519 memset(from, data, to - from);
522 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
524 unsigned long off = s->inuse; /* The end of info */
526 if (s->offset)
527 /* Freepointer is placed after the object. */
528 off += sizeof(void *);
530 if (s->flags & SLAB_STORE_USER)
531 /* We also have user information there */
532 off += 2 * sizeof(struct track);
534 if (s->size == off)
535 return 1;
537 if (check_bytes(p + off, POISON_INUSE, s->size - off))
538 return 1;
540 object_err(s, page, p, "Object padding check fails");
543 * Restore padding
545 restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
546 return 0;
549 static int slab_pad_check(struct kmem_cache *s, struct page *page)
551 u8 *p;
552 int length, remainder;
554 if (!(s->flags & SLAB_POISON))
555 return 1;
557 p = page_address(page);
558 length = s->objects * s->size;
559 remainder = (PAGE_SIZE << s->order) - length;
560 if (!remainder)
561 return 1;
563 if (!check_bytes(p + length, POISON_INUSE, remainder)) {
564 slab_err(s, page, "Padding check failed");
565 restore_bytes(s, "slab padding", POISON_INUSE, p + length,
566 p + length + remainder);
567 return 0;
569 return 1;
572 static int check_object(struct kmem_cache *s, struct page *page,
573 void *object, int active)
575 u8 *p = object;
576 u8 *endobject = object + s->objsize;
578 if (s->flags & SLAB_RED_ZONE) {
579 unsigned int red =
580 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
582 if (!check_bytes(endobject, red, s->inuse - s->objsize)) {
583 object_err(s, page, object,
584 active ? "Redzone Active" : "Redzone Inactive");
585 restore_bytes(s, "redzone", red,
586 endobject, object + s->inuse);
587 return 0;
589 } else {
590 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse &&
591 !check_bytes(endobject, POISON_INUSE,
592 s->inuse - s->objsize)) {
593 object_err(s, page, p, "Alignment padding check fails");
595 * Fix it so that there will not be another report.
597 * Hmmm... We may be corrupting an object that now expects
598 * to be longer than allowed.
600 restore_bytes(s, "alignment padding", POISON_INUSE,
601 endobject, object + s->inuse);
605 if (s->flags & SLAB_POISON) {
606 if (!active && (s->flags & __OBJECT_POISON) &&
607 (!check_bytes(p, POISON_FREE, s->objsize - 1) ||
608 p[s->objsize - 1] != POISON_END)) {
610 object_err(s, page, p, "Poison check failed");
611 restore_bytes(s, "Poison", POISON_FREE,
612 p, p + s->objsize -1);
613 restore_bytes(s, "Poison", POISON_END,
614 p + s->objsize - 1, p + s->objsize);
615 return 0;
618 * check_pad_bytes cleans up on its own.
620 check_pad_bytes(s, page, p);
623 if (!s->offset && active)
625 * Object and freepointer overlap. Cannot check
626 * freepointer while object is allocated.
628 return 1;
630 /* Check free pointer validity */
631 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
632 object_err(s, page, p, "Freepointer corrupt");
634 * No choice but to zap it and thus loose the remainder
635 * of the free objects in this slab. May cause
636 * another error because the object count is now wrong.
638 set_freepointer(s, p, NULL);
639 return 0;
641 return 1;
644 static int check_slab(struct kmem_cache *s, struct page *page)
646 VM_BUG_ON(!irqs_disabled());
648 if (!PageSlab(page)) {
649 slab_err(s, page, "Not a valid slab page flags=%lx "
650 "mapping=0x%p count=%d", page->flags, page->mapping,
651 page_count(page));
652 return 0;
654 if (page->offset * sizeof(void *) != s->offset) {
655 slab_err(s, page, "Corrupted offset %lu flags=0x%lx "
656 "mapping=0x%p count=%d",
657 (unsigned long)(page->offset * sizeof(void *)),
658 page->flags,
659 page->mapping,
660 page_count(page));
661 return 0;
663 if (page->inuse > s->objects) {
664 slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx "
665 "mapping=0x%p count=%d",
666 s->name, page->inuse, s->objects, page->flags,
667 page->mapping, page_count(page));
668 return 0;
670 /* Slab_pad_check fixes things up after itself */
671 slab_pad_check(s, page);
672 return 1;
676 * Determine if a certain object on a page is on the freelist. Must hold the
677 * slab lock to guarantee that the chains are in a consistent state.
679 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
681 int nr = 0;
682 void *fp = page->freelist;
683 void *object = NULL;
685 while (fp && nr <= s->objects) {
686 if (fp == search)
687 return 1;
688 if (!check_valid_pointer(s, page, fp)) {
689 if (object) {
690 object_err(s, page, object,
691 "Freechain corrupt");
692 set_freepointer(s, object, NULL);
693 break;
694 } else {
695 slab_err(s, page, "Freepointer 0x%p corrupt",
696 fp);
697 page->freelist = NULL;
698 page->inuse = s->objects;
699 printk(KERN_ERR "@@@ SLUB %s: Freelist "
700 "cleared. Slab 0x%p\n",
701 s->name, page);
702 return 0;
704 break;
706 object = fp;
707 fp = get_freepointer(s, object);
708 nr++;
711 if (page->inuse != s->objects - nr) {
712 slab_err(s, page, "Wrong object count. Counter is %d but "
713 "counted were %d", s, page, page->inuse,
714 s->objects - nr);
715 page->inuse = s->objects - nr;
716 printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. "
717 "Slab @0x%p\n", s->name, page);
719 return search == NULL;
723 * Tracking of fully allocated slabs for debugging purposes.
725 static void add_full(struct kmem_cache_node *n, struct page *page)
727 spin_lock(&n->list_lock);
728 list_add(&page->lru, &n->full);
729 spin_unlock(&n->list_lock);
732 static void remove_full(struct kmem_cache *s, struct page *page)
734 struct kmem_cache_node *n;
736 if (!(s->flags & SLAB_STORE_USER))
737 return;
739 n = get_node(s, page_to_nid(page));
741 spin_lock(&n->list_lock);
742 list_del(&page->lru);
743 spin_unlock(&n->list_lock);
746 static int alloc_object_checks(struct kmem_cache *s, struct page *page,
747 void *object)
749 if (!check_slab(s, page))
750 goto bad;
752 if (object && !on_freelist(s, page, object)) {
753 slab_err(s, page, "Object 0x%p already allocated", object);
754 goto bad;
757 if (!check_valid_pointer(s, page, object)) {
758 object_err(s, page, object, "Freelist Pointer check fails");
759 goto bad;
762 if (!object)
763 return 1;
765 if (!check_object(s, page, object, 0))
766 goto bad;
768 return 1;
769 bad:
770 if (PageSlab(page)) {
772 * If this is a slab page then lets do the best we can
773 * to avoid issues in the future. Marking all objects
774 * as used avoids touching the remaining objects.
776 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
777 s->name, page);
778 page->inuse = s->objects;
779 page->freelist = NULL;
780 /* Fix up fields that may be corrupted */
781 page->offset = s->offset / sizeof(void *);
783 return 0;
786 static int free_object_checks(struct kmem_cache *s, struct page *page,
787 void *object)
789 if (!check_slab(s, page))
790 goto fail;
792 if (!check_valid_pointer(s, page, object)) {
793 slab_err(s, page, "Invalid object pointer 0x%p", object);
794 goto fail;
797 if (on_freelist(s, page, object)) {
798 slab_err(s, page, "Object 0x%p already free", object);
799 goto fail;
802 if (!check_object(s, page, object, 1))
803 return 0;
805 if (unlikely(s != page->slab)) {
806 if (!PageSlab(page))
807 slab_err(s, page, "Attempt to free object(0x%p) "
808 "outside of slab", object);
809 else
810 if (!page->slab) {
811 printk(KERN_ERR
812 "SLUB <none>: no slab for object 0x%p.\n",
813 object);
814 dump_stack();
816 else
817 slab_err(s, page, "object at 0x%p belongs "
818 "to slab %s", object, page->slab->name);
819 goto fail;
821 return 1;
822 fail:
823 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
824 s->name, page, object);
825 return 0;
828 static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
830 if (s->flags & SLAB_TRACE) {
831 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
832 s->name,
833 alloc ? "alloc" : "free",
834 object, page->inuse,
835 page->freelist);
837 if (!alloc)
838 print_section("Object", (void *)object, s->objsize);
840 dump_stack();
844 static int __init setup_slub_debug(char *str)
846 if (!str || *str != '=')
847 slub_debug = DEBUG_DEFAULT_FLAGS;
848 else {
849 str++;
850 if (*str == 0 || *str == ',')
851 slub_debug = DEBUG_DEFAULT_FLAGS;
852 else
853 for( ;*str && *str != ','; str++)
854 switch (*str) {
855 case 'f' : case 'F' :
856 slub_debug |= SLAB_DEBUG_FREE;
857 break;
858 case 'z' : case 'Z' :
859 slub_debug |= SLAB_RED_ZONE;
860 break;
861 case 'p' : case 'P' :
862 slub_debug |= SLAB_POISON;
863 break;
864 case 'u' : case 'U' :
865 slub_debug |= SLAB_STORE_USER;
866 break;
867 case 't' : case 'T' :
868 slub_debug |= SLAB_TRACE;
869 break;
870 default:
871 printk(KERN_ERR "slub_debug option '%c' "
872 "unknown. skipped\n",*str);
876 if (*str == ',')
877 slub_debug_slabs = str + 1;
878 return 1;
881 __setup("slub_debug", setup_slub_debug);
883 static void kmem_cache_open_debug_check(struct kmem_cache *s)
886 * The page->offset field is only 16 bit wide. This is an offset
887 * in units of words from the beginning of an object. If the slab
888 * size is bigger then we cannot move the free pointer behind the
889 * object anymore.
891 * On 32 bit platforms the limit is 256k. On 64bit platforms
892 * the limit is 512k.
894 * Debugging or ctor may create a need to move the free
895 * pointer. Fail if this happens.
897 if (s->size >= 65535 * sizeof(void *)) {
898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
900 BUG_ON(s->ctor);
902 else
904 * Enable debugging if selected on the kernel commandline.
906 if (slub_debug && (!slub_debug_slabs ||
907 strncmp(slub_debug_slabs, s->name,
908 strlen(slub_debug_slabs)) == 0))
909 s->flags |= slub_debug;
911 #else
913 static inline int alloc_object_checks(struct kmem_cache *s,
914 struct page *page, void *object) { return 0; }
916 static inline int free_object_checks(struct kmem_cache *s,
917 struct page *page, void *object) { return 0; }
919 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
920 static inline void remove_full(struct kmem_cache *s, struct page *page) {}
921 static inline void trace(struct kmem_cache *s, struct page *page,
922 void *object, int alloc) {}
923 static inline void init_object(struct kmem_cache *s,
924 void *object, int active) {}
925 static inline void init_tracking(struct kmem_cache *s, void *object) {}
926 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
927 { return 1; }
928 static inline int check_object(struct kmem_cache *s, struct page *page,
929 void *object, int active) { return 1; }
930 static inline void set_track(struct kmem_cache *s, void *object,
931 enum track_item alloc, void *addr) {}
932 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
933 #define slub_debug 0
934 #endif
936 * Slab allocation and freeing
938 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
940 struct page * page;
941 int pages = 1 << s->order;
943 if (s->order)
944 flags |= __GFP_COMP;
946 if (s->flags & SLAB_CACHE_DMA)
947 flags |= SLUB_DMA;
949 if (node == -1)
950 page = alloc_pages(flags, s->order);
951 else
952 page = alloc_pages_node(node, flags, s->order);
954 if (!page)
955 return NULL;
957 mod_zone_page_state(page_zone(page),
958 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
959 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
960 pages);
962 return page;
965 static void setup_object(struct kmem_cache *s, struct page *page,
966 void *object)
968 if (SlabDebug(page)) {
969 init_object(s, object, 0);
970 init_tracking(s, object);
973 if (unlikely(s->ctor))
974 s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR);
977 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
979 struct page *page;
980 struct kmem_cache_node *n;
981 void *start;
982 void *end;
983 void *last;
984 void *p;
986 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
988 if (flags & __GFP_WAIT)
989 local_irq_enable();
991 page = allocate_slab(s, flags & GFP_LEVEL_MASK, node);
992 if (!page)
993 goto out;
995 n = get_node(s, page_to_nid(page));
996 if (n)
997 atomic_long_inc(&n->nr_slabs);
998 page->offset = s->offset / sizeof(void *);
999 page->slab = s;
1000 page->flags |= 1 << PG_slab;
1001 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1002 SLAB_STORE_USER | SLAB_TRACE))
1003 SetSlabDebug(page);
1005 start = page_address(page);
1006 end = start + s->objects * s->size;
1008 if (unlikely(s->flags & SLAB_POISON))
1009 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1011 last = start;
1012 for_each_object(p, s, start) {
1013 setup_object(s, page, last);
1014 set_freepointer(s, last, p);
1015 last = p;
1017 setup_object(s, page, last);
1018 set_freepointer(s, last, NULL);
1020 page->freelist = start;
1021 page->lockless_freelist = NULL;
1022 page->inuse = 0;
1023 out:
1024 if (flags & __GFP_WAIT)
1025 local_irq_disable();
1026 return page;
1029 static void __free_slab(struct kmem_cache *s, struct page *page)
1031 int pages = 1 << s->order;
1033 if (unlikely(SlabDebug(page))) {
1034 void *p;
1036 slab_pad_check(s, page);
1037 for_each_object(p, s, page_address(page))
1038 check_object(s, page, p, 0);
1041 mod_zone_page_state(page_zone(page),
1042 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1043 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1044 - pages);
1046 page->mapping = NULL;
1047 __free_pages(page, s->order);
1050 static void rcu_free_slab(struct rcu_head *h)
1052 struct page *page;
1054 page = container_of((struct list_head *)h, struct page, lru);
1055 __free_slab(page->slab, page);
1058 static void free_slab(struct kmem_cache *s, struct page *page)
1060 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1062 * RCU free overloads the RCU head over the LRU
1064 struct rcu_head *head = (void *)&page->lru;
1066 call_rcu(head, rcu_free_slab);
1067 } else
1068 __free_slab(s, page);
1071 static void discard_slab(struct kmem_cache *s, struct page *page)
1073 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1075 atomic_long_dec(&n->nr_slabs);
1076 reset_page_mapcount(page);
1077 ClearSlabDebug(page);
1078 __ClearPageSlab(page);
1079 free_slab(s, page);
1083 * Per slab locking using the pagelock
1085 static __always_inline void slab_lock(struct page *page)
1087 bit_spin_lock(PG_locked, &page->flags);
1090 static __always_inline void slab_unlock(struct page *page)
1092 bit_spin_unlock(PG_locked, &page->flags);
1095 static __always_inline int slab_trylock(struct page *page)
1097 int rc = 1;
1099 rc = bit_spin_trylock(PG_locked, &page->flags);
1100 return rc;
1104 * Management of partially allocated slabs
1106 static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
1108 spin_lock(&n->list_lock);
1109 n->nr_partial++;
1110 list_add_tail(&page->lru, &n->partial);
1111 spin_unlock(&n->list_lock);
1114 static void add_partial(struct kmem_cache_node *n, struct page *page)
1116 spin_lock(&n->list_lock);
1117 n->nr_partial++;
1118 list_add(&page->lru, &n->partial);
1119 spin_unlock(&n->list_lock);
1122 static void remove_partial(struct kmem_cache *s,
1123 struct page *page)
1125 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1127 spin_lock(&n->list_lock);
1128 list_del(&page->lru);
1129 n->nr_partial--;
1130 spin_unlock(&n->list_lock);
1134 * Lock slab and remove from the partial list.
1136 * Must hold list_lock.
1138 static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
1140 if (slab_trylock(page)) {
1141 list_del(&page->lru);
1142 n->nr_partial--;
1143 return 1;
1145 return 0;
1149 * Try to allocate a partial slab from a specific node.
1151 static struct page *get_partial_node(struct kmem_cache_node *n)
1153 struct page *page;
1156 * Racy check. If we mistakenly see no partial slabs then we
1157 * just allocate an empty slab. If we mistakenly try to get a
1158 * partial slab and there is none available then get_partials()
1159 * will return NULL.
1161 if (!n || !n->nr_partial)
1162 return NULL;
1164 spin_lock(&n->list_lock);
1165 list_for_each_entry(page, &n->partial, lru)
1166 if (lock_and_del_slab(n, page))
1167 goto out;
1168 page = NULL;
1169 out:
1170 spin_unlock(&n->list_lock);
1171 return page;
1175 * Get a page from somewhere. Search in increasing NUMA distances.
1177 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1179 #ifdef CONFIG_NUMA
1180 struct zonelist *zonelist;
1181 struct zone **z;
1182 struct page *page;
1185 * The defrag ratio allows a configuration of the tradeoffs between
1186 * inter node defragmentation and node local allocations. A lower
1187 * defrag_ratio increases the tendency to do local allocations
1188 * instead of attempting to obtain partial slabs from other nodes.
1190 * If the defrag_ratio is set to 0 then kmalloc() always
1191 * returns node local objects. If the ratio is higher then kmalloc()
1192 * may return off node objects because partial slabs are obtained
1193 * from other nodes and filled up.
1195 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
1196 * defrag_ratio = 1000) then every (well almost) allocation will
1197 * first attempt to defrag slab caches on other nodes. This means
1198 * scanning over all nodes to look for partial slabs which may be
1199 * expensive if we do it every time we are trying to find a slab
1200 * with available objects.
1202 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
1203 return NULL;
1205 zonelist = &NODE_DATA(slab_node(current->mempolicy))
1206 ->node_zonelists[gfp_zone(flags)];
1207 for (z = zonelist->zones; *z; z++) {
1208 struct kmem_cache_node *n;
1210 n = get_node(s, zone_to_nid(*z));
1212 if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
1213 n->nr_partial > MIN_PARTIAL) {
1214 page = get_partial_node(n);
1215 if (page)
1216 return page;
1219 #endif
1220 return NULL;
1224 * Get a partial page, lock it and return it.
1226 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1228 struct page *page;
1229 int searchnode = (node == -1) ? numa_node_id() : node;
1231 page = get_partial_node(get_node(s, searchnode));
1232 if (page || (flags & __GFP_THISNODE))
1233 return page;
1235 return get_any_partial(s, flags);
1239 * Move a page back to the lists.
1241 * Must be called with the slab lock held.
1243 * On exit the slab lock will have been dropped.
1245 static void putback_slab(struct kmem_cache *s, struct page *page)
1247 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1249 if (page->inuse) {
1251 if (page->freelist)
1252 add_partial(n, page);
1253 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1254 add_full(n, page);
1255 slab_unlock(page);
1257 } else {
1258 if (n->nr_partial < MIN_PARTIAL) {
1260 * Adding an empty slab to the partial slabs in order
1261 * to avoid page allocator overhead. This slab needs
1262 * to come after the other slabs with objects in
1263 * order to fill them up. That way the size of the
1264 * partial list stays small. kmem_cache_shrink can
1265 * reclaim empty slabs from the partial list.
1267 add_partial_tail(n, page);
1268 slab_unlock(page);
1269 } else {
1270 slab_unlock(page);
1271 discard_slab(s, page);
1277 * Remove the cpu slab
1279 static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
1282 * Merge cpu freelist into freelist. Typically we get here
1283 * because both freelists are empty. So this is unlikely
1284 * to occur.
1286 while (unlikely(page->lockless_freelist)) {
1287 void **object;
1289 /* Retrieve object from cpu_freelist */
1290 object = page->lockless_freelist;
1291 page->lockless_freelist = page->lockless_freelist[page->offset];
1293 /* And put onto the regular freelist */
1294 object[page->offset] = page->freelist;
1295 page->freelist = object;
1296 page->inuse--;
1298 s->cpu_slab[cpu] = NULL;
1299 ClearPageActive(page);
1301 putback_slab(s, page);
1304 static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
1306 slab_lock(page);
1307 deactivate_slab(s, page, cpu);
1311 * Flush cpu slab.
1312 * Called from IPI handler with interrupts disabled.
1314 static void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1316 struct page *page = s->cpu_slab[cpu];
1318 if (likely(page))
1319 flush_slab(s, page, cpu);
1322 static void flush_cpu_slab(void *d)
1324 struct kmem_cache *s = d;
1325 int cpu = smp_processor_id();
1327 __flush_cpu_slab(s, cpu);
1330 static void flush_all(struct kmem_cache *s)
1332 #ifdef CONFIG_SMP
1333 on_each_cpu(flush_cpu_slab, s, 1, 1);
1334 #else
1335 unsigned long flags;
1337 local_irq_save(flags);
1338 flush_cpu_slab(s);
1339 local_irq_restore(flags);
1340 #endif
1344 * Slow path. The lockless freelist is empty or we need to perform
1345 * debugging duties.
1347 * Interrupts are disabled.
1349 * Processing is still very fast if new objects have been freed to the
1350 * regular freelist. In that case we simply take over the regular freelist
1351 * as the lockless freelist and zap the regular freelist.
1353 * If that is not working then we fall back to the partial lists. We take the
1354 * first element of the freelist as the object to allocate now and move the
1355 * rest of the freelist to the lockless freelist.
1357 * And if we were unable to get a new slab from the partial slab lists then
1358 * we need to allocate a new slab. This is slowest path since we may sleep.
1360 static void *__slab_alloc(struct kmem_cache *s,
1361 gfp_t gfpflags, int node, void *addr, struct page *page)
1363 void **object;
1364 int cpu = smp_processor_id();
1366 if (!page)
1367 goto new_slab;
1369 slab_lock(page);
1370 if (unlikely(node != -1 && page_to_nid(page) != node))
1371 goto another_slab;
1372 load_freelist:
1373 object = page->freelist;
1374 if (unlikely(!object))
1375 goto another_slab;
1376 if (unlikely(SlabDebug(page)))
1377 goto debug;
1379 object = page->freelist;
1380 page->lockless_freelist = object[page->offset];
1381 page->inuse = s->objects;
1382 page->freelist = NULL;
1383 slab_unlock(page);
1384 return object;
1386 another_slab:
1387 deactivate_slab(s, page, cpu);
1389 new_slab:
1390 page = get_partial(s, gfpflags, node);
1391 if (page) {
1392 have_slab:
1393 s->cpu_slab[cpu] = page;
1394 SetPageActive(page);
1395 goto load_freelist;
1398 page = new_slab(s, gfpflags, node);
1399 if (page) {
1400 cpu = smp_processor_id();
1401 if (s->cpu_slab[cpu]) {
1403 * Someone else populated the cpu_slab while we
1404 * enabled interrupts, or we have gotten scheduled
1405 * on another cpu. The page may not be on the
1406 * requested node even if __GFP_THISNODE was
1407 * specified. So we need to recheck.
1409 if (node == -1 ||
1410 page_to_nid(s->cpu_slab[cpu]) == node) {
1412 * Current cpuslab is acceptable and we
1413 * want the current one since its cache hot
1415 discard_slab(s, page);
1416 page = s->cpu_slab[cpu];
1417 slab_lock(page);
1418 goto load_freelist;
1420 /* New slab does not fit our expectations */
1421 flush_slab(s, s->cpu_slab[cpu], cpu);
1423 slab_lock(page);
1424 goto have_slab;
1426 return NULL;
1427 debug:
1428 object = page->freelist;
1429 if (!alloc_object_checks(s, page, object))
1430 goto another_slab;
1431 if (s->flags & SLAB_STORE_USER)
1432 set_track(s, object, TRACK_ALLOC, addr);
1433 trace(s, page, object, 1);
1434 init_object(s, object, 1);
1436 page->inuse++;
1437 page->freelist = object[page->offset];
1438 slab_unlock(page);
1439 return object;
1443 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1444 * have the fastpath folded into their functions. So no function call
1445 * overhead for requests that can be satisfied on the fastpath.
1447 * The fastpath works by first checking if the lockless freelist can be used.
1448 * If not then __slab_alloc is called for slow processing.
1450 * Otherwise we can simply pick the next object from the lockless free list.
1452 static void __always_inline *slab_alloc(struct kmem_cache *s,
1453 gfp_t gfpflags, int node, void *addr)
1455 struct page *page;
1456 void **object;
1457 unsigned long flags;
1459 local_irq_save(flags);
1460 page = s->cpu_slab[smp_processor_id()];
1461 if (unlikely(!page || !page->lockless_freelist ||
1462 (node != -1 && page_to_nid(page) != node)))
1464 object = __slab_alloc(s, gfpflags, node, addr, page);
1466 else {
1467 object = page->lockless_freelist;
1468 page->lockless_freelist = object[page->offset];
1470 local_irq_restore(flags);
1471 return object;
1474 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1476 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
1478 EXPORT_SYMBOL(kmem_cache_alloc);
1480 #ifdef CONFIG_NUMA
1481 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1483 return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
1485 EXPORT_SYMBOL(kmem_cache_alloc_node);
1486 #endif
1489 * Slow patch handling. This may still be called frequently since objects
1490 * have a longer lifetime than the cpu slabs in most processing loads.
1492 * So we still attempt to reduce cache line usage. Just take the slab
1493 * lock and free the item. If there is no additional partial page
1494 * handling required then we can return immediately.
1496 static void __slab_free(struct kmem_cache *s, struct page *page,
1497 void *x, void *addr)
1499 void *prior;
1500 void **object = (void *)x;
1502 slab_lock(page);
1504 if (unlikely(SlabDebug(page)))
1505 goto debug;
1506 checks_ok:
1507 prior = object[page->offset] = page->freelist;
1508 page->freelist = object;
1509 page->inuse--;
1511 if (unlikely(PageActive(page)))
1513 * Cpu slabs are never on partial lists and are
1514 * never freed.
1516 goto out_unlock;
1518 if (unlikely(!page->inuse))
1519 goto slab_empty;
1522 * Objects left in the slab. If it
1523 * was not on the partial list before
1524 * then add it.
1526 if (unlikely(!prior))
1527 add_partial(get_node(s, page_to_nid(page)), page);
1529 out_unlock:
1530 slab_unlock(page);
1531 return;
1533 slab_empty:
1534 if (prior)
1536 * Slab still on the partial list.
1538 remove_partial(s, page);
1540 slab_unlock(page);
1541 discard_slab(s, page);
1542 return;
1544 debug:
1545 if (!free_object_checks(s, page, x))
1546 goto out_unlock;
1547 if (!PageActive(page) && !page->freelist)
1548 remove_full(s, page);
1549 if (s->flags & SLAB_STORE_USER)
1550 set_track(s, x, TRACK_FREE, addr);
1551 trace(s, page, object, 0);
1552 init_object(s, object, 0);
1553 goto checks_ok;
1557 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1558 * can perform fastpath freeing without additional function calls.
1560 * The fastpath is only possible if we are freeing to the current cpu slab
1561 * of this processor. This typically the case if we have just allocated
1562 * the item before.
1564 * If fastpath is not possible then fall back to __slab_free where we deal
1565 * with all sorts of special processing.
1567 static void __always_inline slab_free(struct kmem_cache *s,
1568 struct page *page, void *x, void *addr)
1570 void **object = (void *)x;
1571 unsigned long flags;
1573 local_irq_save(flags);
1574 if (likely(page == s->cpu_slab[smp_processor_id()] &&
1575 !SlabDebug(page))) {
1576 object[page->offset] = page->lockless_freelist;
1577 page->lockless_freelist = object;
1578 } else
1579 __slab_free(s, page, x, addr);
1581 local_irq_restore(flags);
1584 void kmem_cache_free(struct kmem_cache *s, void *x)
1586 struct page *page;
1588 page = virt_to_head_page(x);
1590 slab_free(s, page, x, __builtin_return_address(0));
1592 EXPORT_SYMBOL(kmem_cache_free);
1594 /* Figure out on which slab object the object resides */
1595 static struct page *get_object_page(const void *x)
1597 struct page *page = virt_to_head_page(x);
1599 if (!PageSlab(page))
1600 return NULL;
1602 return page;
1606 * Object placement in a slab is made very easy because we always start at
1607 * offset 0. If we tune the size of the object to the alignment then we can
1608 * get the required alignment by putting one properly sized object after
1609 * another.
1611 * Notice that the allocation order determines the sizes of the per cpu
1612 * caches. Each processor has always one slab available for allocations.
1613 * Increasing the allocation order reduces the number of times that slabs
1614 * must be moved on and off the partial lists and is therefore a factor in
1615 * locking overhead.
1619 * Mininum / Maximum order of slab pages. This influences locking overhead
1620 * and slab fragmentation. A higher order reduces the number of partial slabs
1621 * and increases the number of allocations possible without having to
1622 * take the list_lock.
1624 static int slub_min_order;
1625 static int slub_max_order = DEFAULT_MAX_ORDER;
1626 static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1629 * Merge control. If this is set then no merging of slab caches will occur.
1630 * (Could be removed. This was introduced to pacify the merge skeptics.)
1632 static int slub_nomerge;
1635 * Calculate the order of allocation given an slab object size.
1637 * The order of allocation has significant impact on performance and other
1638 * system components. Generally order 0 allocations should be preferred since
1639 * order 0 does not cause fragmentation in the page allocator. Larger objects
1640 * be problematic to put into order 0 slabs because there may be too much
1641 * unused space left. We go to a higher order if more than 1/8th of the slab
1642 * would be wasted.
1644 * In order to reach satisfactory performance we must ensure that a minimum
1645 * number of objects is in one slab. Otherwise we may generate too much
1646 * activity on the partial lists which requires taking the list_lock. This is
1647 * less a concern for large slabs though which are rarely used.
1649 * slub_max_order specifies the order where we begin to stop considering the
1650 * number of objects in a slab as critical. If we reach slub_max_order then
1651 * we try to keep the page order as low as possible. So we accept more waste
1652 * of space in favor of a small page order.
1654 * Higher order allocations also allow the placement of more objects in a
1655 * slab and thereby reduce object handling overhead. If the user has
1656 * requested a higher mininum order then we start with that one instead of
1657 * the smallest order which will fit the object.
1659 static inline int slab_order(int size, int min_objects,
1660 int max_order, int fract_leftover)
1662 int order;
1663 int rem;
1665 for (order = max(slub_min_order,
1666 fls(min_objects * size - 1) - PAGE_SHIFT);
1667 order <= max_order; order++) {
1669 unsigned long slab_size = PAGE_SIZE << order;
1671 if (slab_size < min_objects * size)
1672 continue;
1674 rem = slab_size % size;
1676 if (rem <= slab_size / fract_leftover)
1677 break;
1681 return order;
1684 static inline int calculate_order(int size)
1686 int order;
1687 int min_objects;
1688 int fraction;
1691 * Attempt to find best configuration for a slab. This
1692 * works by first attempting to generate a layout with
1693 * the best configuration and backing off gradually.
1695 * First we reduce the acceptable waste in a slab. Then
1696 * we reduce the minimum objects required in a slab.
1698 min_objects = slub_min_objects;
1699 while (min_objects > 1) {
1700 fraction = 8;
1701 while (fraction >= 4) {
1702 order = slab_order(size, min_objects,
1703 slub_max_order, fraction);
1704 if (order <= slub_max_order)
1705 return order;
1706 fraction /= 2;
1708 min_objects /= 2;
1712 * We were unable to place multiple objects in a slab. Now
1713 * lets see if we can place a single object there.
1715 order = slab_order(size, 1, slub_max_order, 1);
1716 if (order <= slub_max_order)
1717 return order;
1720 * Doh this slab cannot be placed using slub_max_order.
1722 order = slab_order(size, 1, MAX_ORDER, 1);
1723 if (order <= MAX_ORDER)
1724 return order;
1725 return -ENOSYS;
1729 * Figure out what the alignment of the objects will be.
1731 static unsigned long calculate_alignment(unsigned long flags,
1732 unsigned long align, unsigned long size)
1735 * If the user wants hardware cache aligned objects then
1736 * follow that suggestion if the object is sufficiently
1737 * large.
1739 * The hardware cache alignment cannot override the
1740 * specified alignment though. If that is greater
1741 * then use it.
1743 if ((flags & SLAB_HWCACHE_ALIGN) &&
1744 size > cache_line_size() / 2)
1745 return max_t(unsigned long, align, cache_line_size());
1747 if (align < ARCH_SLAB_MINALIGN)
1748 return ARCH_SLAB_MINALIGN;
1750 return ALIGN(align, sizeof(void *));
1753 static void init_kmem_cache_node(struct kmem_cache_node *n)
1755 n->nr_partial = 0;
1756 atomic_long_set(&n->nr_slabs, 0);
1757 spin_lock_init(&n->list_lock);
1758 INIT_LIST_HEAD(&n->partial);
1759 INIT_LIST_HEAD(&n->full);
1762 #ifdef CONFIG_NUMA
1764 * No kmalloc_node yet so do it by hand. We know that this is the first
1765 * slab on the node for this slabcache. There are no concurrent accesses
1766 * possible.
1768 * Note that this function only works on the kmalloc_node_cache
1769 * when allocating for the kmalloc_node_cache.
1771 static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags,
1772 int node)
1774 struct page *page;
1775 struct kmem_cache_node *n;
1777 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
1779 page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node);
1780 /* new_slab() disables interupts */
1781 local_irq_enable();
1783 BUG_ON(!page);
1784 n = page->freelist;
1785 BUG_ON(!n);
1786 page->freelist = get_freepointer(kmalloc_caches, n);
1787 page->inuse++;
1788 kmalloc_caches->node[node] = n;
1789 init_object(kmalloc_caches, n, 1);
1790 init_kmem_cache_node(n);
1791 atomic_long_inc(&n->nr_slabs);
1792 add_partial(n, page);
1793 return n;
1796 static void free_kmem_cache_nodes(struct kmem_cache *s)
1798 int node;
1800 for_each_online_node(node) {
1801 struct kmem_cache_node *n = s->node[node];
1802 if (n && n != &s->local_node)
1803 kmem_cache_free(kmalloc_caches, n);
1804 s->node[node] = NULL;
1808 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1810 int node;
1811 int local_node;
1813 if (slab_state >= UP)
1814 local_node = page_to_nid(virt_to_page(s));
1815 else
1816 local_node = 0;
1818 for_each_online_node(node) {
1819 struct kmem_cache_node *n;
1821 if (local_node == node)
1822 n = &s->local_node;
1823 else {
1824 if (slab_state == DOWN) {
1825 n = early_kmem_cache_node_alloc(gfpflags,
1826 node);
1827 continue;
1829 n = kmem_cache_alloc_node(kmalloc_caches,
1830 gfpflags, node);
1832 if (!n) {
1833 free_kmem_cache_nodes(s);
1834 return 0;
1838 s->node[node] = n;
1839 init_kmem_cache_node(n);
1841 return 1;
1843 #else
1844 static void free_kmem_cache_nodes(struct kmem_cache *s)
1848 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1850 init_kmem_cache_node(&s->local_node);
1851 return 1;
1853 #endif
1856 * calculate_sizes() determines the order and the distribution of data within
1857 * a slab object.
1859 static int calculate_sizes(struct kmem_cache *s)
1861 unsigned long flags = s->flags;
1862 unsigned long size = s->objsize;
1863 unsigned long align = s->align;
1866 * Determine if we can poison the object itself. If the user of
1867 * the slab may touch the object after free or before allocation
1868 * then we should never poison the object itself.
1870 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
1871 !s->ctor)
1872 s->flags |= __OBJECT_POISON;
1873 else
1874 s->flags &= ~__OBJECT_POISON;
1877 * Round up object size to the next word boundary. We can only
1878 * place the free pointer at word boundaries and this determines
1879 * the possible location of the free pointer.
1881 size = ALIGN(size, sizeof(void *));
1883 #ifdef CONFIG_SLUB_DEBUG
1885 * If we are Redzoning then check if there is some space between the
1886 * end of the object and the free pointer. If not then add an
1887 * additional word to have some bytes to store Redzone information.
1889 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
1890 size += sizeof(void *);
1891 #endif
1894 * With that we have determined the number of bytes in actual use
1895 * by the object. This is the potential offset to the free pointer.
1897 s->inuse = size;
1899 #ifdef CONFIG_SLUB_DEBUG
1900 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
1901 s->ctor)) {
1903 * Relocate free pointer after the object if it is not
1904 * permitted to overwrite the first word of the object on
1905 * kmem_cache_free.
1907 * This is the case if we do RCU, have a constructor or
1908 * destructor or are poisoning the objects.
1910 s->offset = size;
1911 size += sizeof(void *);
1914 if (flags & SLAB_STORE_USER)
1916 * Need to store information about allocs and frees after
1917 * the object.
1919 size += 2 * sizeof(struct track);
1921 if (flags & SLAB_RED_ZONE)
1923 * Add some empty padding so that we can catch
1924 * overwrites from earlier objects rather than let
1925 * tracking information or the free pointer be
1926 * corrupted if an user writes before the start
1927 * of the object.
1929 size += sizeof(void *);
1930 #endif
1933 * Determine the alignment based on various parameters that the
1934 * user specified and the dynamic determination of cache line size
1935 * on bootup.
1937 align = calculate_alignment(flags, align, s->objsize);
1940 * SLUB stores one object immediately after another beginning from
1941 * offset 0. In order to align the objects we have to simply size
1942 * each object to conform to the alignment.
1944 size = ALIGN(size, align);
1945 s->size = size;
1947 s->order = calculate_order(size);
1948 if (s->order < 0)
1949 return 0;
1952 * Determine the number of objects per slab
1954 s->objects = (PAGE_SIZE << s->order) / size;
1957 * Verify that the number of objects is within permitted limits.
1958 * The page->inuse field is only 16 bit wide! So we cannot have
1959 * more than 64k objects per slab.
1961 if (!s->objects || s->objects > 65535)
1962 return 0;
1963 return 1;
1967 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1968 const char *name, size_t size,
1969 size_t align, unsigned long flags,
1970 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1972 memset(s, 0, kmem_size);
1973 s->name = name;
1974 s->ctor = ctor;
1975 s->objsize = size;
1976 s->flags = flags;
1977 s->align = align;
1978 kmem_cache_open_debug_check(s);
1980 if (!calculate_sizes(s))
1981 goto error;
1983 s->refcount = 1;
1984 #ifdef CONFIG_NUMA
1985 s->defrag_ratio = 100;
1986 #endif
1988 if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
1989 return 1;
1990 error:
1991 if (flags & SLAB_PANIC)
1992 panic("Cannot create slab %s size=%lu realsize=%u "
1993 "order=%u offset=%u flags=%lx\n",
1994 s->name, (unsigned long)size, s->size, s->order,
1995 s->offset, flags);
1996 return 0;
1998 EXPORT_SYMBOL(kmem_cache_open);
2001 * Check if a given pointer is valid
2003 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2005 struct page * page;
2007 page = get_object_page(object);
2009 if (!page || s != page->slab)
2010 /* No slab or wrong slab */
2011 return 0;
2013 if (!check_valid_pointer(s, page, object))
2014 return 0;
2017 * We could also check if the object is on the slabs freelist.
2018 * But this would be too expensive and it seems that the main
2019 * purpose of kmem_ptr_valid is to check if the object belongs
2020 * to a certain slab.
2022 return 1;
2024 EXPORT_SYMBOL(kmem_ptr_validate);
2027 * Determine the size of a slab object
2029 unsigned int kmem_cache_size(struct kmem_cache *s)
2031 return s->objsize;
2033 EXPORT_SYMBOL(kmem_cache_size);
2035 const char *kmem_cache_name(struct kmem_cache *s)
2037 return s->name;
2039 EXPORT_SYMBOL(kmem_cache_name);
2042 * Attempt to free all slabs on a node. Return the number of slabs we
2043 * were unable to free.
2045 static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2046 struct list_head *list)
2048 int slabs_inuse = 0;
2049 unsigned long flags;
2050 struct page *page, *h;
2052 spin_lock_irqsave(&n->list_lock, flags);
2053 list_for_each_entry_safe(page, h, list, lru)
2054 if (!page->inuse) {
2055 list_del(&page->lru);
2056 discard_slab(s, page);
2057 } else
2058 slabs_inuse++;
2059 spin_unlock_irqrestore(&n->list_lock, flags);
2060 return slabs_inuse;
2064 * Release all resources used by a slab cache.
2066 static int kmem_cache_close(struct kmem_cache *s)
2068 int node;
2070 flush_all(s);
2072 /* Attempt to free all objects */
2073 for_each_online_node(node) {
2074 struct kmem_cache_node *n = get_node(s, node);
2076 n->nr_partial -= free_list(s, n, &n->partial);
2077 if (atomic_long_read(&n->nr_slabs))
2078 return 1;
2080 free_kmem_cache_nodes(s);
2081 return 0;
2085 * Close a cache and release the kmem_cache structure
2086 * (must be used for caches created using kmem_cache_create)
2088 void kmem_cache_destroy(struct kmem_cache *s)
2090 down_write(&slub_lock);
2091 s->refcount--;
2092 if (!s->refcount) {
2093 list_del(&s->list);
2094 if (kmem_cache_close(s))
2095 WARN_ON(1);
2096 sysfs_slab_remove(s);
2097 kfree(s);
2099 up_write(&slub_lock);
2101 EXPORT_SYMBOL(kmem_cache_destroy);
2103 /********************************************************************
2104 * Kmalloc subsystem
2105 *******************************************************************/
2107 struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
2108 EXPORT_SYMBOL(kmalloc_caches);
2110 #ifdef CONFIG_ZONE_DMA
2111 static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
2112 #endif
2114 static int __init setup_slub_min_order(char *str)
2116 get_option (&str, &slub_min_order);
2118 return 1;
2121 __setup("slub_min_order=", setup_slub_min_order);
2123 static int __init setup_slub_max_order(char *str)
2125 get_option (&str, &slub_max_order);
2127 return 1;
2130 __setup("slub_max_order=", setup_slub_max_order);
2132 static int __init setup_slub_min_objects(char *str)
2134 get_option (&str, &slub_min_objects);
2136 return 1;
2139 __setup("slub_min_objects=", setup_slub_min_objects);
2141 static int __init setup_slub_nomerge(char *str)
2143 slub_nomerge = 1;
2144 return 1;
2147 __setup("slub_nomerge", setup_slub_nomerge);
2149 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2150 const char *name, int size, gfp_t gfp_flags)
2152 unsigned int flags = 0;
2154 if (gfp_flags & SLUB_DMA)
2155 flags = SLAB_CACHE_DMA;
2157 down_write(&slub_lock);
2158 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2159 flags, NULL))
2160 goto panic;
2162 list_add(&s->list, &slab_caches);
2163 up_write(&slub_lock);
2164 if (sysfs_slab_add(s))
2165 goto panic;
2166 return s;
2168 panic:
2169 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2172 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2174 int index = kmalloc_index(size);
2176 if (!index)
2177 return NULL;
2179 /* Allocation too large? */
2180 BUG_ON(index < 0);
2182 #ifdef CONFIG_ZONE_DMA
2183 if ((flags & SLUB_DMA)) {
2184 struct kmem_cache *s;
2185 struct kmem_cache *x;
2186 char *text;
2187 size_t realsize;
2189 s = kmalloc_caches_dma[index];
2190 if (s)
2191 return s;
2193 /* Dynamically create dma cache */
2194 x = kmalloc(kmem_size, flags & ~SLUB_DMA);
2195 if (!x)
2196 panic("Unable to allocate memory for dma cache\n");
2198 if (index <= KMALLOC_SHIFT_HIGH)
2199 realsize = 1 << index;
2200 else {
2201 if (index == 1)
2202 realsize = 96;
2203 else
2204 realsize = 192;
2207 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2208 (unsigned int)realsize);
2209 s = create_kmalloc_cache(x, text, realsize, flags);
2210 kmalloc_caches_dma[index] = s;
2211 return s;
2213 #endif
2214 return &kmalloc_caches[index];
2217 void *__kmalloc(size_t size, gfp_t flags)
2219 struct kmem_cache *s = get_slab(size, flags);
2221 if (s)
2222 return slab_alloc(s, flags, -1, __builtin_return_address(0));
2223 return NULL;
2225 EXPORT_SYMBOL(__kmalloc);
2227 #ifdef CONFIG_NUMA
2228 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2230 struct kmem_cache *s = get_slab(size, flags);
2232 if (s)
2233 return slab_alloc(s, flags, node, __builtin_return_address(0));
2234 return NULL;
2236 EXPORT_SYMBOL(__kmalloc_node);
2237 #endif
2239 size_t ksize(const void *object)
2241 struct page *page = get_object_page(object);
2242 struct kmem_cache *s;
2244 BUG_ON(!page);
2245 s = page->slab;
2246 BUG_ON(!s);
2249 * Debugging requires use of the padding between object
2250 * and whatever may come after it.
2252 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2253 return s->objsize;
2256 * If we have the need to store the freelist pointer
2257 * back there or track user information then we can
2258 * only use the space before that information.
2260 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2261 return s->inuse;
2264 * Else we can use all the padding etc for the allocation
2266 return s->size;
2268 EXPORT_SYMBOL(ksize);
2270 void kfree(const void *x)
2272 struct kmem_cache *s;
2273 struct page *page;
2275 if (!x)
2276 return;
2278 page = virt_to_head_page(x);
2279 s = page->slab;
2281 slab_free(s, page, (void *)x, __builtin_return_address(0));
2283 EXPORT_SYMBOL(kfree);
2286 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2287 * the remaining slabs by the number of items in use. The slabs with the
2288 * most items in use come first. New allocations will then fill those up
2289 * and thus they can be removed from the partial lists.
2291 * The slabs with the least items are placed last. This results in them
2292 * being allocated from last increasing the chance that the last objects
2293 * are freed in them.
2295 int kmem_cache_shrink(struct kmem_cache *s)
2297 int node;
2298 int i;
2299 struct kmem_cache_node *n;
2300 struct page *page;
2301 struct page *t;
2302 struct list_head *slabs_by_inuse =
2303 kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2304 unsigned long flags;
2306 if (!slabs_by_inuse)
2307 return -ENOMEM;
2309 flush_all(s);
2310 for_each_online_node(node) {
2311 n = get_node(s, node);
2313 if (!n->nr_partial)
2314 continue;
2316 for (i = 0; i < s->objects; i++)
2317 INIT_LIST_HEAD(slabs_by_inuse + i);
2319 spin_lock_irqsave(&n->list_lock, flags);
2322 * Build lists indexed by the items in use in each slab.
2324 * Note that concurrent frees may occur while we hold the
2325 * list_lock. page->inuse here is the upper limit.
2327 list_for_each_entry_safe(page, t, &n->partial, lru) {
2328 if (!page->inuse && slab_trylock(page)) {
2330 * Must hold slab lock here because slab_free
2331 * may have freed the last object and be
2332 * waiting to release the slab.
2334 list_del(&page->lru);
2335 n->nr_partial--;
2336 slab_unlock(page);
2337 discard_slab(s, page);
2338 } else {
2339 if (n->nr_partial > MAX_PARTIAL)
2340 list_move(&page->lru,
2341 slabs_by_inuse + page->inuse);
2345 if (n->nr_partial <= MAX_PARTIAL)
2346 goto out;
2349 * Rebuild the partial list with the slabs filled up most
2350 * first and the least used slabs at the end.
2352 for (i = s->objects - 1; i >= 0; i--)
2353 list_splice(slabs_by_inuse + i, n->partial.prev);
2355 out:
2356 spin_unlock_irqrestore(&n->list_lock, flags);
2359 kfree(slabs_by_inuse);
2360 return 0;
2362 EXPORT_SYMBOL(kmem_cache_shrink);
2365 * krealloc - reallocate memory. The contents will remain unchanged.
2366 * @p: object to reallocate memory for.
2367 * @new_size: how many bytes of memory are required.
2368 * @flags: the type of memory to allocate.
2370 * The contents of the object pointed to are preserved up to the
2371 * lesser of the new and old sizes. If @p is %NULL, krealloc()
2372 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
2373 * %NULL pointer, the object pointed to is freed.
2375 void *krealloc(const void *p, size_t new_size, gfp_t flags)
2377 void *ret;
2378 size_t ks;
2380 if (unlikely(!p))
2381 return kmalloc(new_size, flags);
2383 if (unlikely(!new_size)) {
2384 kfree(p);
2385 return NULL;
2388 ks = ksize(p);
2389 if (ks >= new_size)
2390 return (void *)p;
2392 ret = kmalloc(new_size, flags);
2393 if (ret) {
2394 memcpy(ret, p, min(new_size, ks));
2395 kfree(p);
2397 return ret;
2399 EXPORT_SYMBOL(krealloc);
2401 /********************************************************************
2402 * Basic setup of slabs
2403 *******************************************************************/
2405 void __init kmem_cache_init(void)
2407 int i;
2409 #ifdef CONFIG_NUMA
2411 * Must first have the slab cache available for the allocations of the
2412 * struct kmem_cache_node's. There is special bootstrap code in
2413 * kmem_cache_open for slab_state == DOWN.
2415 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2416 sizeof(struct kmem_cache_node), GFP_KERNEL);
2417 #endif
2419 /* Able to allocate the per node structures */
2420 slab_state = PARTIAL;
2422 /* Caches that are not of the two-to-the-power-of size */
2423 create_kmalloc_cache(&kmalloc_caches[1],
2424 "kmalloc-96", 96, GFP_KERNEL);
2425 create_kmalloc_cache(&kmalloc_caches[2],
2426 "kmalloc-192", 192, GFP_KERNEL);
2428 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
2429 create_kmalloc_cache(&kmalloc_caches[i],
2430 "kmalloc", 1 << i, GFP_KERNEL);
2432 slab_state = UP;
2434 /* Provide the correct kmalloc names now that the caches are up */
2435 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
2436 kmalloc_caches[i]. name =
2437 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2439 #ifdef CONFIG_SMP
2440 register_cpu_notifier(&slab_notifier);
2441 #endif
2443 kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2444 nr_cpu_ids * sizeof(struct page *);
2446 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2447 " Processors=%d, Nodes=%d\n",
2448 KMALLOC_SHIFT_HIGH, cache_line_size(),
2449 slub_min_order, slub_max_order, slub_min_objects,
2450 nr_cpu_ids, nr_node_ids);
2454 * Find a mergeable slab cache
2456 static int slab_unmergeable(struct kmem_cache *s)
2458 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2459 return 1;
2461 if (s->ctor)
2462 return 1;
2464 return 0;
2467 static struct kmem_cache *find_mergeable(size_t size,
2468 size_t align, unsigned long flags,
2469 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2471 struct list_head *h;
2473 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2474 return NULL;
2476 if (ctor)
2477 return NULL;
2479 size = ALIGN(size, sizeof(void *));
2480 align = calculate_alignment(flags, align, size);
2481 size = ALIGN(size, align);
2483 list_for_each(h, &slab_caches) {
2484 struct kmem_cache *s =
2485 container_of(h, struct kmem_cache, list);
2487 if (slab_unmergeable(s))
2488 continue;
2490 if (size > s->size)
2491 continue;
2493 if (((flags | slub_debug) & SLUB_MERGE_SAME) !=
2494 (s->flags & SLUB_MERGE_SAME))
2495 continue;
2497 * Check if alignment is compatible.
2498 * Courtesy of Adrian Drzewiecki
2500 if ((s->size & ~(align -1)) != s->size)
2501 continue;
2503 if (s->size - size >= sizeof(void *))
2504 continue;
2506 return s;
2508 return NULL;
2511 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2512 size_t align, unsigned long flags,
2513 void (*ctor)(void *, struct kmem_cache *, unsigned long),
2514 void (*dtor)(void *, struct kmem_cache *, unsigned long))
2516 struct kmem_cache *s;
2518 BUG_ON(dtor);
2519 down_write(&slub_lock);
2520 s = find_mergeable(size, align, flags, ctor);
2521 if (s) {
2522 s->refcount++;
2524 * Adjust the object sizes so that we clear
2525 * the complete object on kzalloc.
2527 s->objsize = max(s->objsize, (int)size);
2528 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
2529 if (sysfs_slab_alias(s, name))
2530 goto err;
2531 } else {
2532 s = kmalloc(kmem_size, GFP_KERNEL);
2533 if (s && kmem_cache_open(s, GFP_KERNEL, name,
2534 size, align, flags, ctor)) {
2535 if (sysfs_slab_add(s)) {
2536 kfree(s);
2537 goto err;
2539 list_add(&s->list, &slab_caches);
2540 } else
2541 kfree(s);
2543 up_write(&slub_lock);
2544 return s;
2546 err:
2547 up_write(&slub_lock);
2548 if (flags & SLAB_PANIC)
2549 panic("Cannot create slabcache %s\n", name);
2550 else
2551 s = NULL;
2552 return s;
2554 EXPORT_SYMBOL(kmem_cache_create);
2556 void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2558 void *x;
2560 x = slab_alloc(s, flags, -1, __builtin_return_address(0));
2561 if (x)
2562 memset(x, 0, s->objsize);
2563 return x;
2565 EXPORT_SYMBOL(kmem_cache_zalloc);
2567 #ifdef CONFIG_SMP
2568 static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
2570 struct list_head *h;
2572 down_read(&slub_lock);
2573 list_for_each(h, &slab_caches) {
2574 struct kmem_cache *s =
2575 container_of(h, struct kmem_cache, list);
2577 func(s, cpu);
2579 up_read(&slub_lock);
2583 * Use the cpu notifier to insure that the cpu slabs are flushed when
2584 * necessary.
2586 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2587 unsigned long action, void *hcpu)
2589 long cpu = (long)hcpu;
2591 switch (action) {
2592 case CPU_UP_CANCELED:
2593 case CPU_UP_CANCELED_FROZEN:
2594 case CPU_DEAD:
2595 case CPU_DEAD_FROZEN:
2596 for_all_slabs(__flush_cpu_slab, cpu);
2597 break;
2598 default:
2599 break;
2601 return NOTIFY_OK;
2604 static struct notifier_block __cpuinitdata slab_notifier =
2605 { &slab_cpuup_callback, NULL, 0 };
2607 #endif
2609 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2611 struct kmem_cache *s = get_slab(size, gfpflags);
2613 if (!s)
2614 return NULL;
2616 return slab_alloc(s, gfpflags, -1, caller);
2619 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2620 int node, void *caller)
2622 struct kmem_cache *s = get_slab(size, gfpflags);
2624 if (!s)
2625 return NULL;
2627 return slab_alloc(s, gfpflags, node, caller);
2630 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
2631 static int validate_slab(struct kmem_cache *s, struct page *page)
2633 void *p;
2634 void *addr = page_address(page);
2635 DECLARE_BITMAP(map, s->objects);
2637 if (!check_slab(s, page) ||
2638 !on_freelist(s, page, NULL))
2639 return 0;
2641 /* Now we know that a valid freelist exists */
2642 bitmap_zero(map, s->objects);
2644 for_each_free_object(p, s, page->freelist) {
2645 set_bit(slab_index(p, s, addr), map);
2646 if (!check_object(s, page, p, 0))
2647 return 0;
2650 for_each_object(p, s, addr)
2651 if (!test_bit(slab_index(p, s, addr), map))
2652 if (!check_object(s, page, p, 1))
2653 return 0;
2654 return 1;
2657 static void validate_slab_slab(struct kmem_cache *s, struct page *page)
2659 if (slab_trylock(page)) {
2660 validate_slab(s, page);
2661 slab_unlock(page);
2662 } else
2663 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
2664 s->name, page);
2666 if (s->flags & DEBUG_DEFAULT_FLAGS) {
2667 if (!SlabDebug(page))
2668 printk(KERN_ERR "SLUB %s: SlabDebug not set "
2669 "on slab 0x%p\n", s->name, page);
2670 } else {
2671 if (SlabDebug(page))
2672 printk(KERN_ERR "SLUB %s: SlabDebug set on "
2673 "slab 0x%p\n", s->name, page);
2677 static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
2679 unsigned long count = 0;
2680 struct page *page;
2681 unsigned long flags;
2683 spin_lock_irqsave(&n->list_lock, flags);
2685 list_for_each_entry(page, &n->partial, lru) {
2686 validate_slab_slab(s, page);
2687 count++;
2689 if (count != n->nr_partial)
2690 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
2691 "counter=%ld\n", s->name, count, n->nr_partial);
2693 if (!(s->flags & SLAB_STORE_USER))
2694 goto out;
2696 list_for_each_entry(page, &n->full, lru) {
2697 validate_slab_slab(s, page);
2698 count++;
2700 if (count != atomic_long_read(&n->nr_slabs))
2701 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
2702 "counter=%ld\n", s->name, count,
2703 atomic_long_read(&n->nr_slabs));
2705 out:
2706 spin_unlock_irqrestore(&n->list_lock, flags);
2707 return count;
2710 static unsigned long validate_slab_cache(struct kmem_cache *s)
2712 int node;
2713 unsigned long count = 0;
2715 flush_all(s);
2716 for_each_online_node(node) {
2717 struct kmem_cache_node *n = get_node(s, node);
2719 count += validate_slab_node(s, n);
2721 return count;
2724 #ifdef SLUB_RESILIENCY_TEST
2725 static void resiliency_test(void)
2727 u8 *p;
2729 printk(KERN_ERR "SLUB resiliency testing\n");
2730 printk(KERN_ERR "-----------------------\n");
2731 printk(KERN_ERR "A. Corruption after allocation\n");
2733 p = kzalloc(16, GFP_KERNEL);
2734 p[16] = 0x12;
2735 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
2736 " 0x12->0x%p\n\n", p + 16);
2738 validate_slab_cache(kmalloc_caches + 4);
2740 /* Hmmm... The next two are dangerous */
2741 p = kzalloc(32, GFP_KERNEL);
2742 p[32 + sizeof(void *)] = 0x34;
2743 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
2744 " 0x34 -> -0x%p\n", p);
2745 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2747 validate_slab_cache(kmalloc_caches + 5);
2748 p = kzalloc(64, GFP_KERNEL);
2749 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
2750 *p = 0x56;
2751 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
2753 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2754 validate_slab_cache(kmalloc_caches + 6);
2756 printk(KERN_ERR "\nB. Corruption after free\n");
2757 p = kzalloc(128, GFP_KERNEL);
2758 kfree(p);
2759 *p = 0x78;
2760 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
2761 validate_slab_cache(kmalloc_caches + 7);
2763 p = kzalloc(256, GFP_KERNEL);
2764 kfree(p);
2765 p[50] = 0x9a;
2766 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
2767 validate_slab_cache(kmalloc_caches + 8);
2769 p = kzalloc(512, GFP_KERNEL);
2770 kfree(p);
2771 p[512] = 0xab;
2772 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
2773 validate_slab_cache(kmalloc_caches + 9);
2775 #else
2776 static void resiliency_test(void) {};
2777 #endif
2780 * Generate lists of code addresses where slabcache objects are allocated
2781 * and freed.
2784 struct location {
2785 unsigned long count;
2786 void *addr;
2787 long long sum_time;
2788 long min_time;
2789 long max_time;
2790 long min_pid;
2791 long max_pid;
2792 cpumask_t cpus;
2793 nodemask_t nodes;
2796 struct loc_track {
2797 unsigned long max;
2798 unsigned long count;
2799 struct location *loc;
2802 static void free_loc_track(struct loc_track *t)
2804 if (t->max)
2805 free_pages((unsigned long)t->loc,
2806 get_order(sizeof(struct location) * t->max));
2809 static int alloc_loc_track(struct loc_track *t, unsigned long max)
2811 struct location *l;
2812 int order;
2814 if (!max)
2815 max = PAGE_SIZE / sizeof(struct location);
2817 order = get_order(sizeof(struct location) * max);
2819 l = (void *)__get_free_pages(GFP_KERNEL, order);
2821 if (!l)
2822 return 0;
2824 if (t->count) {
2825 memcpy(l, t->loc, sizeof(struct location) * t->count);
2826 free_loc_track(t);
2828 t->max = max;
2829 t->loc = l;
2830 return 1;
2833 static int add_location(struct loc_track *t, struct kmem_cache *s,
2834 const struct track *track)
2836 long start, end, pos;
2837 struct location *l;
2838 void *caddr;
2839 unsigned long age = jiffies - track->when;
2841 start = -1;
2842 end = t->count;
2844 for ( ; ; ) {
2845 pos = start + (end - start + 1) / 2;
2848 * There is nothing at "end". If we end up there
2849 * we need to add something to before end.
2851 if (pos == end)
2852 break;
2854 caddr = t->loc[pos].addr;
2855 if (track->addr == caddr) {
2857 l = &t->loc[pos];
2858 l->count++;
2859 if (track->when) {
2860 l->sum_time += age;
2861 if (age < l->min_time)
2862 l->min_time = age;
2863 if (age > l->max_time)
2864 l->max_time = age;
2866 if (track->pid < l->min_pid)
2867 l->min_pid = track->pid;
2868 if (track->pid > l->max_pid)
2869 l->max_pid = track->pid;
2871 cpu_set(track->cpu, l->cpus);
2873 node_set(page_to_nid(virt_to_page(track)), l->nodes);
2874 return 1;
2877 if (track->addr < caddr)
2878 end = pos;
2879 else
2880 start = pos;
2884 * Not found. Insert new tracking element.
2886 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
2887 return 0;
2889 l = t->loc + pos;
2890 if (pos < t->count)
2891 memmove(l + 1, l,
2892 (t->count - pos) * sizeof(struct location));
2893 t->count++;
2894 l->count = 1;
2895 l->addr = track->addr;
2896 l->sum_time = age;
2897 l->min_time = age;
2898 l->max_time = age;
2899 l->min_pid = track->pid;
2900 l->max_pid = track->pid;
2901 cpus_clear(l->cpus);
2902 cpu_set(track->cpu, l->cpus);
2903 nodes_clear(l->nodes);
2904 node_set(page_to_nid(virt_to_page(track)), l->nodes);
2905 return 1;
2908 static void process_slab(struct loc_track *t, struct kmem_cache *s,
2909 struct page *page, enum track_item alloc)
2911 void *addr = page_address(page);
2912 DECLARE_BITMAP(map, s->objects);
2913 void *p;
2915 bitmap_zero(map, s->objects);
2916 for_each_free_object(p, s, page->freelist)
2917 set_bit(slab_index(p, s, addr), map);
2919 for_each_object(p, s, addr)
2920 if (!test_bit(slab_index(p, s, addr), map))
2921 add_location(t, s, get_track(s, p, alloc));
2924 static int list_locations(struct kmem_cache *s, char *buf,
2925 enum track_item alloc)
2927 int n = 0;
2928 unsigned long i;
2929 struct loc_track t;
2930 int node;
2932 t.count = 0;
2933 t.max = 0;
2935 /* Push back cpu slabs */
2936 flush_all(s);
2938 for_each_online_node(node) {
2939 struct kmem_cache_node *n = get_node(s, node);
2940 unsigned long flags;
2941 struct page *page;
2943 if (!atomic_read(&n->nr_slabs))
2944 continue;
2946 spin_lock_irqsave(&n->list_lock, flags);
2947 list_for_each_entry(page, &n->partial, lru)
2948 process_slab(&t, s, page, alloc);
2949 list_for_each_entry(page, &n->full, lru)
2950 process_slab(&t, s, page, alloc);
2951 spin_unlock_irqrestore(&n->list_lock, flags);
2954 for (i = 0; i < t.count; i++) {
2955 struct location *l = &t.loc[i];
2957 if (n > PAGE_SIZE - 100)
2958 break;
2959 n += sprintf(buf + n, "%7ld ", l->count);
2961 if (l->addr)
2962 n += sprint_symbol(buf + n, (unsigned long)l->addr);
2963 else
2964 n += sprintf(buf + n, "<not-available>");
2966 if (l->sum_time != l->min_time) {
2967 unsigned long remainder;
2969 n += sprintf(buf + n, " age=%ld/%ld/%ld",
2970 l->min_time,
2971 div_long_long_rem(l->sum_time, l->count, &remainder),
2972 l->max_time);
2973 } else
2974 n += sprintf(buf + n, " age=%ld",
2975 l->min_time);
2977 if (l->min_pid != l->max_pid)
2978 n += sprintf(buf + n, " pid=%ld-%ld",
2979 l->min_pid, l->max_pid);
2980 else
2981 n += sprintf(buf + n, " pid=%ld",
2982 l->min_pid);
2984 if (num_online_cpus() > 1 && !cpus_empty(l->cpus)) {
2985 n += sprintf(buf + n, " cpus=");
2986 n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
2987 l->cpus);
2990 if (num_online_nodes() > 1 && !nodes_empty(l->nodes)) {
2991 n += sprintf(buf + n, " nodes=");
2992 n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
2993 l->nodes);
2996 n += sprintf(buf + n, "\n");
2999 free_loc_track(&t);
3000 if (!t.count)
3001 n += sprintf(buf, "No data\n");
3002 return n;
3005 static unsigned long count_partial(struct kmem_cache_node *n)
3007 unsigned long flags;
3008 unsigned long x = 0;
3009 struct page *page;
3011 spin_lock_irqsave(&n->list_lock, flags);
3012 list_for_each_entry(page, &n->partial, lru)
3013 x += page->inuse;
3014 spin_unlock_irqrestore(&n->list_lock, flags);
3015 return x;
3018 enum slab_stat_type {
3019 SL_FULL,
3020 SL_PARTIAL,
3021 SL_CPU,
3022 SL_OBJECTS
3025 #define SO_FULL (1 << SL_FULL)
3026 #define SO_PARTIAL (1 << SL_PARTIAL)
3027 #define SO_CPU (1 << SL_CPU)
3028 #define SO_OBJECTS (1 << SL_OBJECTS)
3030 static unsigned long slab_objects(struct kmem_cache *s,
3031 char *buf, unsigned long flags)
3033 unsigned long total = 0;
3034 int cpu;
3035 int node;
3036 int x;
3037 unsigned long *nodes;
3038 unsigned long *per_cpu;
3040 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3041 per_cpu = nodes + nr_node_ids;
3043 for_each_possible_cpu(cpu) {
3044 struct page *page = s->cpu_slab[cpu];
3045 int node;
3047 if (page) {
3048 node = page_to_nid(page);
3049 if (flags & SO_CPU) {
3050 int x = 0;
3052 if (flags & SO_OBJECTS)
3053 x = page->inuse;
3054 else
3055 x = 1;
3056 total += x;
3057 nodes[node] += x;
3059 per_cpu[node]++;
3063 for_each_online_node(node) {
3064 struct kmem_cache_node *n = get_node(s, node);
3066 if (flags & SO_PARTIAL) {
3067 if (flags & SO_OBJECTS)
3068 x = count_partial(n);
3069 else
3070 x = n->nr_partial;
3071 total += x;
3072 nodes[node] += x;
3075 if (flags & SO_FULL) {
3076 int full_slabs = atomic_read(&n->nr_slabs)
3077 - per_cpu[node]
3078 - n->nr_partial;
3080 if (flags & SO_OBJECTS)
3081 x = full_slabs * s->objects;
3082 else
3083 x = full_slabs;
3084 total += x;
3085 nodes[node] += x;
3089 x = sprintf(buf, "%lu", total);
3090 #ifdef CONFIG_NUMA
3091 for_each_online_node(node)
3092 if (nodes[node])
3093 x += sprintf(buf + x, " N%d=%lu",
3094 node, nodes[node]);
3095 #endif
3096 kfree(nodes);
3097 return x + sprintf(buf + x, "\n");
3100 static int any_slab_objects(struct kmem_cache *s)
3102 int node;
3103 int cpu;
3105 for_each_possible_cpu(cpu)
3106 if (s->cpu_slab[cpu])
3107 return 1;
3109 for_each_node(node) {
3110 struct kmem_cache_node *n = get_node(s, node);
3112 if (n->nr_partial || atomic_read(&n->nr_slabs))
3113 return 1;
3115 return 0;
3118 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3119 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3121 struct slab_attribute {
3122 struct attribute attr;
3123 ssize_t (*show)(struct kmem_cache *s, char *buf);
3124 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3127 #define SLAB_ATTR_RO(_name) \
3128 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3130 #define SLAB_ATTR(_name) \
3131 static struct slab_attribute _name##_attr = \
3132 __ATTR(_name, 0644, _name##_show, _name##_store)
3134 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3136 return sprintf(buf, "%d\n", s->size);
3138 SLAB_ATTR_RO(slab_size);
3140 static ssize_t align_show(struct kmem_cache *s, char *buf)
3142 return sprintf(buf, "%d\n", s->align);
3144 SLAB_ATTR_RO(align);
3146 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3148 return sprintf(buf, "%d\n", s->objsize);
3150 SLAB_ATTR_RO(object_size);
3152 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3154 return sprintf(buf, "%d\n", s->objects);
3156 SLAB_ATTR_RO(objs_per_slab);
3158 static ssize_t order_show(struct kmem_cache *s, char *buf)
3160 return sprintf(buf, "%d\n", s->order);
3162 SLAB_ATTR_RO(order);
3164 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3166 if (s->ctor) {
3167 int n = sprint_symbol(buf, (unsigned long)s->ctor);
3169 return n + sprintf(buf + n, "\n");
3171 return 0;
3173 SLAB_ATTR_RO(ctor);
3175 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3177 return sprintf(buf, "%d\n", s->refcount - 1);
3179 SLAB_ATTR_RO(aliases);
3181 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3183 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3185 SLAB_ATTR_RO(slabs);
3187 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3189 return slab_objects(s, buf, SO_PARTIAL);
3191 SLAB_ATTR_RO(partial);
3193 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3195 return slab_objects(s, buf, SO_CPU);
3197 SLAB_ATTR_RO(cpu_slabs);
3199 static ssize_t objects_show(struct kmem_cache *s, char *buf)
3201 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3203 SLAB_ATTR_RO(objects);
3205 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3207 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3210 static ssize_t sanity_checks_store(struct kmem_cache *s,
3211 const char *buf, size_t length)
3213 s->flags &= ~SLAB_DEBUG_FREE;
3214 if (buf[0] == '1')
3215 s->flags |= SLAB_DEBUG_FREE;
3216 return length;
3218 SLAB_ATTR(sanity_checks);
3220 static ssize_t trace_show(struct kmem_cache *s, char *buf)
3222 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3225 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3226 size_t length)
3228 s->flags &= ~SLAB_TRACE;
3229 if (buf[0] == '1')
3230 s->flags |= SLAB_TRACE;
3231 return length;
3233 SLAB_ATTR(trace);
3235 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3237 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3240 static ssize_t reclaim_account_store(struct kmem_cache *s,
3241 const char *buf, size_t length)
3243 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3244 if (buf[0] == '1')
3245 s->flags |= SLAB_RECLAIM_ACCOUNT;
3246 return length;
3248 SLAB_ATTR(reclaim_account);
3250 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3252 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3254 SLAB_ATTR_RO(hwcache_align);
3256 #ifdef CONFIG_ZONE_DMA
3257 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3259 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3261 SLAB_ATTR_RO(cache_dma);
3262 #endif
3264 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3266 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3268 SLAB_ATTR_RO(destroy_by_rcu);
3270 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3272 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3275 static ssize_t red_zone_store(struct kmem_cache *s,
3276 const char *buf, size_t length)
3278 if (any_slab_objects(s))
3279 return -EBUSY;
3281 s->flags &= ~SLAB_RED_ZONE;
3282 if (buf[0] == '1')
3283 s->flags |= SLAB_RED_ZONE;
3284 calculate_sizes(s);
3285 return length;
3287 SLAB_ATTR(red_zone);
3289 static ssize_t poison_show(struct kmem_cache *s, char *buf)
3291 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3294 static ssize_t poison_store(struct kmem_cache *s,
3295 const char *buf, size_t length)
3297 if (any_slab_objects(s))
3298 return -EBUSY;
3300 s->flags &= ~SLAB_POISON;
3301 if (buf[0] == '1')
3302 s->flags |= SLAB_POISON;
3303 calculate_sizes(s);
3304 return length;
3306 SLAB_ATTR(poison);
3308 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3310 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3313 static ssize_t store_user_store(struct kmem_cache *s,
3314 const char *buf, size_t length)
3316 if (any_slab_objects(s))
3317 return -EBUSY;
3319 s->flags &= ~SLAB_STORE_USER;
3320 if (buf[0] == '1')
3321 s->flags |= SLAB_STORE_USER;
3322 calculate_sizes(s);
3323 return length;
3325 SLAB_ATTR(store_user);
3327 static ssize_t validate_show(struct kmem_cache *s, char *buf)
3329 return 0;
3332 static ssize_t validate_store(struct kmem_cache *s,
3333 const char *buf, size_t length)
3335 if (buf[0] == '1')
3336 validate_slab_cache(s);
3337 else
3338 return -EINVAL;
3339 return length;
3341 SLAB_ATTR(validate);
3343 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3345 return 0;
3348 static ssize_t shrink_store(struct kmem_cache *s,
3349 const char *buf, size_t length)
3351 if (buf[0] == '1') {
3352 int rc = kmem_cache_shrink(s);
3354 if (rc)
3355 return rc;
3356 } else
3357 return -EINVAL;
3358 return length;
3360 SLAB_ATTR(shrink);
3362 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3364 if (!(s->flags & SLAB_STORE_USER))
3365 return -ENOSYS;
3366 return list_locations(s, buf, TRACK_ALLOC);
3368 SLAB_ATTR_RO(alloc_calls);
3370 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3372 if (!(s->flags & SLAB_STORE_USER))
3373 return -ENOSYS;
3374 return list_locations(s, buf, TRACK_FREE);
3376 SLAB_ATTR_RO(free_calls);
3378 #ifdef CONFIG_NUMA
3379 static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
3381 return sprintf(buf, "%d\n", s->defrag_ratio / 10);
3384 static ssize_t defrag_ratio_store(struct kmem_cache *s,
3385 const char *buf, size_t length)
3387 int n = simple_strtoul(buf, NULL, 10);
3389 if (n < 100)
3390 s->defrag_ratio = n * 10;
3391 return length;
3393 SLAB_ATTR(defrag_ratio);
3394 #endif
3396 static struct attribute * slab_attrs[] = {
3397 &slab_size_attr.attr,
3398 &object_size_attr.attr,
3399 &objs_per_slab_attr.attr,
3400 &order_attr.attr,
3401 &objects_attr.attr,
3402 &slabs_attr.attr,
3403 &partial_attr.attr,
3404 &cpu_slabs_attr.attr,
3405 &ctor_attr.attr,
3406 &aliases_attr.attr,
3407 &align_attr.attr,
3408 &sanity_checks_attr.attr,
3409 &trace_attr.attr,
3410 &hwcache_align_attr.attr,
3411 &reclaim_account_attr.attr,
3412 &destroy_by_rcu_attr.attr,
3413 &red_zone_attr.attr,
3414 &poison_attr.attr,
3415 &store_user_attr.attr,
3416 &validate_attr.attr,
3417 &shrink_attr.attr,
3418 &alloc_calls_attr.attr,
3419 &free_calls_attr.attr,
3420 #ifdef CONFIG_ZONE_DMA
3421 &cache_dma_attr.attr,
3422 #endif
3423 #ifdef CONFIG_NUMA
3424 &defrag_ratio_attr.attr,
3425 #endif
3426 NULL
3429 static struct attribute_group slab_attr_group = {
3430 .attrs = slab_attrs,
3433 static ssize_t slab_attr_show(struct kobject *kobj,
3434 struct attribute *attr,
3435 char *buf)
3437 struct slab_attribute *attribute;
3438 struct kmem_cache *s;
3439 int err;
3441 attribute = to_slab_attr(attr);
3442 s = to_slab(kobj);
3444 if (!attribute->show)
3445 return -EIO;
3447 err = attribute->show(s, buf);
3449 return err;
3452 static ssize_t slab_attr_store(struct kobject *kobj,
3453 struct attribute *attr,
3454 const char *buf, size_t len)
3456 struct slab_attribute *attribute;
3457 struct kmem_cache *s;
3458 int err;
3460 attribute = to_slab_attr(attr);
3461 s = to_slab(kobj);
3463 if (!attribute->store)
3464 return -EIO;
3466 err = attribute->store(s, buf, len);
3468 return err;
3471 static struct sysfs_ops slab_sysfs_ops = {
3472 .show = slab_attr_show,
3473 .store = slab_attr_store,
3476 static struct kobj_type slab_ktype = {
3477 .sysfs_ops = &slab_sysfs_ops,
3480 static int uevent_filter(struct kset *kset, struct kobject *kobj)
3482 struct kobj_type *ktype = get_ktype(kobj);
3484 if (ktype == &slab_ktype)
3485 return 1;
3486 return 0;
3489 static struct kset_uevent_ops slab_uevent_ops = {
3490 .filter = uevent_filter,
3493 decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
3495 #define ID_STR_LENGTH 64
3497 /* Create a unique string id for a slab cache:
3498 * format
3499 * :[flags-]size:[memory address of kmemcache]
3501 static char *create_unique_id(struct kmem_cache *s)
3503 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
3504 char *p = name;
3506 BUG_ON(!name);
3508 *p++ = ':';
3510 * First flags affecting slabcache operations. We will only
3511 * get here for aliasable slabs so we do not need to support
3512 * too many flags. The flags here must cover all flags that
3513 * are matched during merging to guarantee that the id is
3514 * unique.
3516 if (s->flags & SLAB_CACHE_DMA)
3517 *p++ = 'd';
3518 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3519 *p++ = 'a';
3520 if (s->flags & SLAB_DEBUG_FREE)
3521 *p++ = 'F';
3522 if (p != name + 1)
3523 *p++ = '-';
3524 p += sprintf(p, "%07d", s->size);
3525 BUG_ON(p > name + ID_STR_LENGTH - 1);
3526 return name;
3529 static int sysfs_slab_add(struct kmem_cache *s)
3531 int err;
3532 const char *name;
3533 int unmergeable;
3535 if (slab_state < SYSFS)
3536 /* Defer until later */
3537 return 0;
3539 unmergeable = slab_unmergeable(s);
3540 if (unmergeable) {
3542 * Slabcache can never be merged so we can use the name proper.
3543 * This is typically the case for debug situations. In that
3544 * case we can catch duplicate names easily.
3546 sysfs_remove_link(&slab_subsys.kobj, s->name);
3547 name = s->name;
3548 } else {
3550 * Create a unique name for the slab as a target
3551 * for the symlinks.
3553 name = create_unique_id(s);
3556 kobj_set_kset_s(s, slab_subsys);
3557 kobject_set_name(&s->kobj, name);
3558 kobject_init(&s->kobj);
3559 err = kobject_add(&s->kobj);
3560 if (err)
3561 return err;
3563 err = sysfs_create_group(&s->kobj, &slab_attr_group);
3564 if (err)
3565 return err;
3566 kobject_uevent(&s->kobj, KOBJ_ADD);
3567 if (!unmergeable) {
3568 /* Setup first alias */
3569 sysfs_slab_alias(s, s->name);
3570 kfree(name);
3572 return 0;
3575 static void sysfs_slab_remove(struct kmem_cache *s)
3577 kobject_uevent(&s->kobj, KOBJ_REMOVE);
3578 kobject_del(&s->kobj);
3582 * Need to buffer aliases during bootup until sysfs becomes
3583 * available lest we loose that information.
3585 struct saved_alias {
3586 struct kmem_cache *s;
3587 const char *name;
3588 struct saved_alias *next;
3591 struct saved_alias *alias_list;
3593 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3595 struct saved_alias *al;
3597 if (slab_state == SYSFS) {
3599 * If we have a leftover link then remove it.
3601 sysfs_remove_link(&slab_subsys.kobj, name);
3602 return sysfs_create_link(&slab_subsys.kobj,
3603 &s->kobj, name);
3606 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
3607 if (!al)
3608 return -ENOMEM;
3610 al->s = s;
3611 al->name = name;
3612 al->next = alias_list;
3613 alias_list = al;
3614 return 0;
3617 static int __init slab_sysfs_init(void)
3619 struct list_head *h;
3620 int err;
3622 err = subsystem_register(&slab_subsys);
3623 if (err) {
3624 printk(KERN_ERR "Cannot register slab subsystem.\n");
3625 return -ENOSYS;
3628 slab_state = SYSFS;
3630 list_for_each(h, &slab_caches) {
3631 struct kmem_cache *s =
3632 container_of(h, struct kmem_cache, list);
3634 err = sysfs_slab_add(s);
3635 BUG_ON(err);
3638 while (alias_list) {
3639 struct saved_alias *al = alias_list;
3641 alias_list = alias_list->next;
3642 err = sysfs_slab_alias(al->s, al->name);
3643 BUG_ON(err);
3644 kfree(al);
3647 resiliency_test();
3648 return 0;
3651 __initcall(slab_sysfs_init);
3652 #endif