1 #ifndef _LINUX_SLUB_DEF_H
2 #define _LINUX_SLUB_DEF_H
5 * SLUB : A Slab allocator without object queues.
7 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9 #include <linux/types.h>
10 #include <linux/gfp.h>
11 #include <linux/workqueue.h>
12 #include <linux/kobject.h>
14 struct kmem_cache_cpu
{
22 struct kmem_cache_node
{
23 spinlock_t list_lock
; /* Protect partial list and nr_partial */
24 unsigned long nr_partial
;
25 atomic_long_t nr_slabs
;
26 struct list_head partial
;
27 #ifdef CONFIG_SLUB_DEBUG
28 struct list_head full
;
33 * Slab cache management.
36 /* Used for retriving partial slabs etc */
38 int size
; /* The size of an object including meta data */
39 int objsize
; /* The size of an object without meta data */
40 int offset
; /* Free pointer offset. */
44 * Avoid an extra cache line for UP, SMP and for the node local to
47 struct kmem_cache_node local_node
;
49 /* Allocation and freeing of slabs */
50 int objects
; /* Number of objects in slab */
51 int refcount
; /* Refcount for slab cache destroy */
52 void (*ctor
)(struct kmem_cache
*, void *);
53 int inuse
; /* Offset to metadata */
54 int align
; /* Alignment */
55 const char *name
; /* Name (only for display!) */
56 struct list_head list
; /* List of slab caches */
57 #ifdef CONFIG_SLUB_DEBUG
58 struct kobject kobj
; /* For sysfs */
63 struct kmem_cache_node
*node
[MAX_NUMNODES
];
66 struct kmem_cache_cpu
*cpu_slab
[NR_CPUS
];
68 struct kmem_cache_cpu cpu_slab
;
75 #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
76 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
78 #define KMALLOC_MIN_SIZE 8
81 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
84 * We keep the general caches in an array of slab caches that are used for
85 * 2^x bytes of allocations.
87 extern struct kmem_cache kmalloc_caches
[PAGE_SHIFT
];
90 * Sorry that the following has to be that ugly but some versions of GCC
91 * have trouble with constant propagation and loops.
93 static __always_inline
int kmalloc_index(size_t size
)
98 if (size
<= KMALLOC_MIN_SIZE
)
99 return KMALLOC_SHIFT_LOW
;
101 if (size
> 64 && size
<= 96)
103 if (size
> 128 && size
<= 192)
105 if (size
<= 8) return 3;
106 if (size
<= 16) return 4;
107 if (size
<= 32) return 5;
108 if (size
<= 64) return 6;
109 if (size
<= 128) return 7;
110 if (size
<= 256) return 8;
111 if (size
<= 512) return 9;
112 if (size
<= 1024) return 10;
113 if (size
<= 2 * 1024) return 11;
115 * The following is only needed to support architectures with a larger page
118 if (size
<= 4 * 1024) return 12;
119 if (size
<= 8 * 1024) return 13;
120 if (size
<= 16 * 1024) return 14;
121 if (size
<= 32 * 1024) return 15;
122 if (size
<= 64 * 1024) return 16;
123 if (size
<= 128 * 1024) return 17;
124 if (size
<= 256 * 1024) return 18;
125 if (size
<= 512 * 1024) return 19;
126 if (size
<= 1024 * 1024) return 20;
127 if (size
<= 2 * 1024 * 1024) return 21;
131 * What we really wanted to do and cannot do because of compiler issues is:
133 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
134 * if (size <= (1 << i))
140 * Find the slab cache for a given combination of allocation flags and size.
142 * This ought to end up with a global pointer to the right cache
145 static __always_inline
struct kmem_cache
*kmalloc_slab(size_t size
)
147 int index
= kmalloc_index(size
);
152 return &kmalloc_caches
[index
];
155 #ifdef CONFIG_ZONE_DMA
156 #define SLUB_DMA __GFP_DMA
158 /* Disable DMA functionality */
159 #define SLUB_DMA (__force gfp_t)0
162 void *kmem_cache_alloc(struct kmem_cache
*, gfp_t
);
163 void *__kmalloc(size_t size
, gfp_t flags
);
165 static __always_inline
void *kmalloc(size_t size
, gfp_t flags
)
167 if (__builtin_constant_p(size
)) {
168 if (size
> PAGE_SIZE
/ 2)
169 return (void *)__get_free_pages(flags
| __GFP_COMP
,
172 if (!(flags
& SLUB_DMA
)) {
173 struct kmem_cache
*s
= kmalloc_slab(size
);
176 return ZERO_SIZE_PTR
;
178 return kmem_cache_alloc(s
, flags
);
181 return __kmalloc(size
, flags
);
185 void *__kmalloc_node(size_t size
, gfp_t flags
, int node
);
186 void *kmem_cache_alloc_node(struct kmem_cache
*, gfp_t flags
, int node
);
188 static __always_inline
void *kmalloc_node(size_t size
, gfp_t flags
, int node
)
190 if (__builtin_constant_p(size
) &&
191 size
<= PAGE_SIZE
/ 2 && !(flags
& SLUB_DMA
)) {
192 struct kmem_cache
*s
= kmalloc_slab(size
);
195 return ZERO_SIZE_PTR
;
197 return kmem_cache_alloc_node(s
, flags
, node
);
199 return __kmalloc_node(size
, flags
, node
);
203 #endif /* _LINUX_SLUB_DEF_H */