added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / mm / slob.c
blobe0bd87d56e738ce54ef48544d4a9f2fe4c889d10
1 /*
2 * SLOB Allocator: Simple List Of Blocks
4 * Matt Mackall <mpm@selenic.com> 12/30/03
6 * NUMA support by Paul Mundt, 2007.
8 * How SLOB works:
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact
32 * allocation size in page->private so that it can be used to accurately
33 * provide ksize(). These objects are detected in kfree() because slob_page()
34 * is false for them.
36 * SLAB is emulated on top of SLOB by simply calling constructors and
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62 #include <linux/mm.h>
63 #include <linux/cache.h>
64 #include <linux/init.h>
65 #include <linux/module.h>
66 #include <linux/rcupdate.h>
67 #include <linux/list.h>
68 #include <trace/kmemtrace.h>
69 #include <asm/atomic.h>
72 * slob_block has a field 'units', which indicates size of block if +ve,
73 * or offset of next block if -ve (in SLOB_UNITs).
75 * Free blocks of size 1 unit simply contain the offset of the next block.
76 * Those with larger size contain their size in the first SLOB_UNIT of
77 * memory, and the offset of the next free block in the second SLOB_UNIT.
79 #if PAGE_SIZE <= (32767 * 2)
80 typedef s16 slobidx_t;
81 #else
82 typedef s32 slobidx_t;
83 #endif
85 struct slob_block {
86 slobidx_t units;
88 typedef struct slob_block slob_t;
91 * We use struct page fields to manage some slob allocation aspects,
92 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
93 * just define our own struct page type variant here.
95 struct slob_page {
96 union {
97 struct {
98 unsigned long flags; /* mandatory */
99 atomic_t _count; /* mandatory */
100 slobidx_t units; /* free units left in page */
101 unsigned long pad[2];
102 slob_t *free; /* first free slob_t in page */
103 struct list_head list; /* linked list of free pages */
105 struct page page;
108 static inline void struct_slob_page_wrong_size(void)
109 { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
112 * free_slob_page: call before a slob_page is returned to the page allocator.
114 static inline void free_slob_page(struct slob_page *sp)
116 reset_page_mapcount(&sp->page);
117 sp->page.mapping = NULL;
121 * All partially free slob pages go on these lists.
123 #define SLOB_BREAK1 256
124 #define SLOB_BREAK2 1024
125 static LIST_HEAD(free_slob_small);
126 static LIST_HEAD(free_slob_medium);
127 static LIST_HEAD(free_slob_large);
130 * slob_page: True for all slob pages (false for bigblock pages)
132 static inline int slob_page(struct slob_page *sp)
134 return PageSlobPage((struct page *)sp);
137 static inline void set_slob_page(struct slob_page *sp)
139 __SetPageSlobPage((struct page *)sp);
142 static inline void clear_slob_page(struct slob_page *sp)
144 __ClearPageSlobPage((struct page *)sp);
148 * slob_page_free: true for pages on free_slob_pages list.
150 static inline int slob_page_free(struct slob_page *sp)
152 return PageSlobFree((struct page *)sp);
155 static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
157 list_add(&sp->list, list);
158 __SetPageSlobFree((struct page *)sp);
161 static inline void clear_slob_page_free(struct slob_page *sp)
163 list_del(&sp->list);
164 __ClearPageSlobFree((struct page *)sp);
167 #define SLOB_UNIT sizeof(slob_t)
168 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
169 #define SLOB_ALIGN L1_CACHE_BYTES
172 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
173 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
174 * the block using call_rcu.
176 struct slob_rcu {
177 struct rcu_head head;
178 int size;
182 * slob_lock protects all slob allocator structures.
184 static DEFINE_SPINLOCK(slob_lock);
187 * Encode the given size and next info into a free slob block s.
189 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
191 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
192 slobidx_t offset = next - base;
194 if (size > 1) {
195 s[0].units = size;
196 s[1].units = offset;
197 } else
198 s[0].units = -offset;
202 * Return the size of a slob block.
204 static slobidx_t slob_units(slob_t *s)
206 if (s->units > 0)
207 return s->units;
208 return 1;
212 * Return the next free slob block pointer after this one.
214 static slob_t *slob_next(slob_t *s)
216 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
217 slobidx_t next;
219 if (s[0].units < 0)
220 next = -s[0].units;
221 else
222 next = s[1].units;
223 return base+next;
227 * Returns true if s is the last free block in its page.
229 static int slob_last(slob_t *s)
231 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
234 static void *slob_new_page(gfp_t gfp, int order, int node)
236 void *page;
238 #ifdef CONFIG_NUMA
239 if (node != -1)
240 page = alloc_pages_node(node, gfp, order);
241 else
242 #endif
243 page = alloc_pages(gfp, order);
245 if (!page)
246 return NULL;
248 return page_address(page);
252 * Allocate a slob block within a given slob_page sp.
254 static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
256 slob_t *prev, *cur, *aligned = 0;
257 int delta = 0, units = SLOB_UNITS(size);
259 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
260 slobidx_t avail = slob_units(cur);
262 if (align) {
263 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
264 delta = aligned - cur;
266 if (avail >= units + delta) { /* room enough? */
267 slob_t *next;
269 if (delta) { /* need to fragment head to align? */
270 next = slob_next(cur);
271 set_slob(aligned, avail - delta, next);
272 set_slob(cur, delta, aligned);
273 prev = cur;
274 cur = aligned;
275 avail = slob_units(cur);
278 next = slob_next(cur);
279 if (avail == units) { /* exact fit? unlink. */
280 if (prev)
281 set_slob(prev, slob_units(prev), next);
282 else
283 sp->free = next;
284 } else { /* fragment */
285 if (prev)
286 set_slob(prev, slob_units(prev), cur + units);
287 else
288 sp->free = cur + units;
289 set_slob(cur + units, avail - units, next);
292 sp->units -= units;
293 if (!sp->units)
294 clear_slob_page_free(sp);
295 return cur;
297 if (slob_last(cur))
298 return NULL;
303 * slob_alloc: entry point into the slob allocator.
305 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
307 struct slob_page *sp;
308 struct list_head *prev;
309 struct list_head *slob_list;
310 slob_t *b = NULL;
311 unsigned long flags;
313 if (size < SLOB_BREAK1)
314 slob_list = &free_slob_small;
315 else if (size < SLOB_BREAK2)
316 slob_list = &free_slob_medium;
317 else
318 slob_list = &free_slob_large;
320 spin_lock_irqsave(&slob_lock, flags);
321 /* Iterate through each partially free page, try to find room */
322 list_for_each_entry(sp, slob_list, list) {
323 #ifdef CONFIG_NUMA
325 * If there's a node specification, search for a partial
326 * page with a matching node id in the freelist.
328 if (node != -1 && page_to_nid(&sp->page) != node)
329 continue;
330 #endif
331 /* Enough room on this page? */
332 if (sp->units < SLOB_UNITS(size))
333 continue;
335 /* Attempt to alloc */
336 prev = sp->list.prev;
337 b = slob_page_alloc(sp, size, align);
338 if (!b)
339 continue;
341 /* Improve fragment distribution and reduce our average
342 * search time by starting our next search here. (see
343 * Knuth vol 1, sec 2.5, pg 449) */
344 if (prev != slob_list->prev &&
345 slob_list->next != prev->next)
346 list_move_tail(slob_list, prev->next);
347 break;
349 spin_unlock_irqrestore(&slob_lock, flags);
351 /* Not enough space: must allocate a new page */
352 if (!b) {
353 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
354 if (!b)
355 return 0;
356 sp = (struct slob_page *)virt_to_page(b);
357 set_slob_page(sp);
359 spin_lock_irqsave(&slob_lock, flags);
360 sp->units = SLOB_UNITS(PAGE_SIZE);
361 sp->free = b;
362 INIT_LIST_HEAD(&sp->list);
363 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
364 set_slob_page_free(sp, slob_list);
365 b = slob_page_alloc(sp, size, align);
366 BUG_ON(!b);
367 spin_unlock_irqrestore(&slob_lock, flags);
369 if (unlikely((gfp & __GFP_ZERO) && b))
370 memset(b, 0, size);
371 return b;
375 * slob_free: entry point into the slob allocator.
377 static void slob_free(void *block, int size)
379 struct slob_page *sp;
380 slob_t *prev, *next, *b = (slob_t *)block;
381 slobidx_t units;
382 unsigned long flags;
384 if (unlikely(ZERO_OR_NULL_PTR(block)))
385 return;
386 BUG_ON(!size);
388 sp = (struct slob_page *)virt_to_page(block);
389 units = SLOB_UNITS(size);
391 spin_lock_irqsave(&slob_lock, flags);
393 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
394 /* Go directly to page allocator. Do not pass slob allocator */
395 if (slob_page_free(sp))
396 clear_slob_page_free(sp);
397 clear_slob_page(sp);
398 free_slob_page(sp);
399 free_page((unsigned long)b);
400 goto out;
403 if (!slob_page_free(sp)) {
404 /* This slob page is about to become partially free. Easy! */
405 sp->units = units;
406 sp->free = b;
407 set_slob(b, units,
408 (void *)((unsigned long)(b +
409 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
410 set_slob_page_free(sp, &free_slob_small);
411 goto out;
415 * Otherwise the page is already partially free, so find reinsertion
416 * point.
418 sp->units += units;
420 if (b < sp->free) {
421 if (b + units == sp->free) {
422 units += slob_units(sp->free);
423 sp->free = slob_next(sp->free);
425 set_slob(b, units, sp->free);
426 sp->free = b;
427 } else {
428 prev = sp->free;
429 next = slob_next(prev);
430 while (b > next) {
431 prev = next;
432 next = slob_next(prev);
435 if (!slob_last(prev) && b + units == next) {
436 units += slob_units(next);
437 set_slob(b, units, slob_next(next));
438 } else
439 set_slob(b, units, next);
441 if (prev + slob_units(prev) == b) {
442 units = slob_units(b) + slob_units(prev);
443 set_slob(prev, units, slob_next(b));
444 } else
445 set_slob(prev, slob_units(prev), b);
447 out:
448 spin_unlock_irqrestore(&slob_lock, flags);
452 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
455 #ifndef ARCH_KMALLOC_MINALIGN
456 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
457 #endif
459 #ifndef ARCH_SLAB_MINALIGN
460 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
461 #endif
463 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
465 unsigned int *m;
466 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
467 void *ret;
469 lockdep_trace_alloc(gfp);
471 if (size < PAGE_SIZE - align) {
472 if (!size)
473 return ZERO_SIZE_PTR;
475 m = slob_alloc(size + align, gfp, align, node);
477 if (!m)
478 return NULL;
479 *m = size;
480 ret = (void *)m + align;
482 trace_kmalloc_node(_RET_IP_, ret,
483 size, size + align, gfp, node);
484 } else {
485 unsigned int order = get_order(size);
487 ret = slob_new_page(gfp | __GFP_COMP, order, node);
488 if (ret) {
489 struct page *page;
490 page = virt_to_page(ret);
491 page->private = size;
494 trace_kmalloc_node(_RET_IP_, ret,
495 size, PAGE_SIZE << order, gfp, node);
498 return ret;
500 EXPORT_SYMBOL(__kmalloc_node);
502 void kfree(const void *block)
504 struct slob_page *sp;
506 trace_kfree(_RET_IP_, block);
508 if (unlikely(ZERO_OR_NULL_PTR(block)))
509 return;
511 sp = (struct slob_page *)virt_to_page(block);
512 if (slob_page(sp)) {
513 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
514 unsigned int *m = (unsigned int *)(block - align);
515 slob_free(m, *m + align);
516 } else
517 put_page(&sp->page);
519 EXPORT_SYMBOL(kfree);
521 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
522 size_t ksize(const void *block)
524 struct slob_page *sp;
526 BUG_ON(!block);
527 if (unlikely(block == ZERO_SIZE_PTR))
528 return 0;
530 sp = (struct slob_page *)virt_to_page(block);
531 if (slob_page(sp)) {
532 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
533 unsigned int *m = (unsigned int *)(block - align);
534 return SLOB_UNITS(*m) * SLOB_UNIT;
535 } else
536 return sp->page.private;
538 EXPORT_SYMBOL(ksize);
540 struct kmem_cache {
541 unsigned int size, align;
542 unsigned long flags;
543 const char *name;
544 void (*ctor)(void *);
547 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
548 size_t align, unsigned long flags, void (*ctor)(void *))
550 struct kmem_cache *c;
552 c = slob_alloc(sizeof(struct kmem_cache),
553 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
555 if (c) {
556 c->name = name;
557 c->size = size;
558 if (flags & SLAB_DESTROY_BY_RCU) {
559 /* leave room for rcu footer at the end of object */
560 c->size += sizeof(struct slob_rcu);
562 c->flags = flags;
563 c->ctor = ctor;
564 /* ignore alignment unless it's forced */
565 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
566 if (c->align < ARCH_SLAB_MINALIGN)
567 c->align = ARCH_SLAB_MINALIGN;
568 if (c->align < align)
569 c->align = align;
570 } else if (flags & SLAB_PANIC)
571 panic("Cannot create slab cache %s\n", name);
573 return c;
575 EXPORT_SYMBOL(kmem_cache_create);
577 void kmem_cache_destroy(struct kmem_cache *c)
579 slob_free(c, sizeof(struct kmem_cache));
581 EXPORT_SYMBOL(kmem_cache_destroy);
583 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
585 void *b;
587 if (c->size < PAGE_SIZE) {
588 b = slob_alloc(c->size, flags, c->align, node);
589 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
590 SLOB_UNITS(c->size) * SLOB_UNIT,
591 flags, node);
592 } else {
593 b = slob_new_page(flags, get_order(c->size), node);
594 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
595 PAGE_SIZE << get_order(c->size),
596 flags, node);
599 if (c->ctor)
600 c->ctor(b);
602 return b;
604 EXPORT_SYMBOL(kmem_cache_alloc_node);
606 static void __kmem_cache_free(void *b, int size)
608 if (size < PAGE_SIZE)
609 slob_free(b, size);
610 else
611 free_pages((unsigned long)b, get_order(size));
614 static void kmem_rcu_free(struct rcu_head *head)
616 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
617 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
619 __kmem_cache_free(b, slob_rcu->size);
622 void kmem_cache_free(struct kmem_cache *c, void *b)
624 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
625 struct slob_rcu *slob_rcu;
626 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
627 INIT_RCU_HEAD(&slob_rcu->head);
628 slob_rcu->size = c->size;
629 call_rcu(&slob_rcu->head, kmem_rcu_free);
630 } else {
631 __kmem_cache_free(b, c->size);
634 trace_kmem_cache_free(_RET_IP_, b);
636 EXPORT_SYMBOL(kmem_cache_free);
638 unsigned int kmem_cache_size(struct kmem_cache *c)
640 return c->size;
642 EXPORT_SYMBOL(kmem_cache_size);
644 const char *kmem_cache_name(struct kmem_cache *c)
646 return c->name;
648 EXPORT_SYMBOL(kmem_cache_name);
650 int kmem_cache_shrink(struct kmem_cache *d)
652 return 0;
654 EXPORT_SYMBOL(kmem_cache_shrink);
656 int kmem_ptr_validate(struct kmem_cache *a, const void *b)
658 return 0;
661 static unsigned int slob_ready __read_mostly;
663 int slab_is_available(void)
665 return slob_ready;
668 void __init kmem_cache_init(void)
670 slob_ready = 1;