rapidio: add Port-Write handling for EM
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / slab_def.h
blob1812dac8c496b8694d18b45c372b86c411cf0400
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
4 /*
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 #include <linux/kmemtrace.h>
19 #ifndef ARCH_KMALLOC_MINALIGN
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
29 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
30 #endif
32 #ifndef ARCH_SLAB_MINALIGN
34 * Enforce a minimum alignment for all caches.
35 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
36 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
37 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
38 * some debug features.
40 #define ARCH_SLAB_MINALIGN 0
41 #endif
44 * struct kmem_cache
46 * manages a cache.
49 struct kmem_cache {
50 /* 1) per-cpu data, touched during every alloc/free */
51 struct array_cache *array[NR_CPUS];
52 /* 2) Cache tunables. Protected by cache_chain_mutex */
53 unsigned int batchcount;
54 unsigned int limit;
55 unsigned int shared;
57 unsigned int buffer_size;
58 u32 reciprocal_buffer_size;
59 /* 3) touched by every alloc & free from the backend */
61 unsigned int flags; /* constant flags */
62 unsigned int num; /* # of objs per slab */
64 /* 4) cache_grow/shrink */
65 /* order of pgs per slab (2^n) */
66 unsigned int gfporder;
68 /* force GFP flags, e.g. GFP_DMA */
69 gfp_t gfpflags;
71 size_t colour; /* cache colouring range */
72 unsigned int colour_off; /* colour offset */
73 struct kmem_cache *slabp_cache;
74 unsigned int slab_size;
75 unsigned int dflags; /* dynamic flags */
77 /* constructor func */
78 void (*ctor)(void *obj);
80 /* 5) cache creation/removal */
81 const char *name;
82 struct list_head next;
84 /* 6) statistics */
85 #ifdef CONFIG_DEBUG_SLAB
86 unsigned long num_active;
87 unsigned long num_allocations;
88 unsigned long high_mark;
89 unsigned long grown;
90 unsigned long reaped;
91 unsigned long errors;
92 unsigned long max_freeable;
93 unsigned long node_allocs;
94 unsigned long node_frees;
95 unsigned long node_overflow;
96 atomic_t allochit;
97 atomic_t allocmiss;
98 atomic_t freehit;
99 atomic_t freemiss;
102 * If debugging is enabled, then the allocator can add additional
103 * fields and/or padding to every object. buffer_size contains the total
104 * object size including these internal fields, the following two
105 * variables contain the offset to the user object and its size.
107 int obj_offset;
108 int obj_size;
109 #endif /* CONFIG_DEBUG_SLAB */
112 * We put nodelists[] at the end of kmem_cache, because we want to size
113 * this array to nr_node_ids slots instead of MAX_NUMNODES
114 * (see kmem_cache_init())
115 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
116 * is statically defined, so we reserve the max number of nodes.
118 struct kmem_list3 *nodelists[MAX_NUMNODES];
120 * Do not add fields after nodelists[]
124 /* Size description struct for general caches. */
125 struct cache_sizes {
126 size_t cs_size;
127 struct kmem_cache *cs_cachep;
128 #ifdef CONFIG_ZONE_DMA
129 struct kmem_cache *cs_dmacachep;
130 #endif
132 extern struct cache_sizes malloc_sizes[];
134 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
135 void *__kmalloc(size_t size, gfp_t flags);
137 #ifdef CONFIG_TRACING
138 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
139 extern size_t slab_buffer_size(struct kmem_cache *cachep);
140 #else
141 static __always_inline void *
142 kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
144 return kmem_cache_alloc(cachep, flags);
146 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
148 return 0;
150 #endif
152 static __always_inline void *kmalloc(size_t size, gfp_t flags)
154 struct kmem_cache *cachep;
155 void *ret;
157 if (__builtin_constant_p(size)) {
158 int i = 0;
160 if (!size)
161 return ZERO_SIZE_PTR;
163 #define CACHE(x) \
164 if (size <= x) \
165 goto found; \
166 else \
167 i++;
168 #include <linux/kmalloc_sizes.h>
169 #undef CACHE
170 return NULL;
171 found:
172 #ifdef CONFIG_ZONE_DMA
173 if (flags & GFP_DMA)
174 cachep = malloc_sizes[i].cs_dmacachep;
175 else
176 #endif
177 cachep = malloc_sizes[i].cs_cachep;
179 ret = kmem_cache_alloc_notrace(cachep, flags);
181 trace_kmalloc(_THIS_IP_, ret,
182 size, slab_buffer_size(cachep), flags);
184 return ret;
186 return __kmalloc(size, flags);
189 #ifdef CONFIG_NUMA
190 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
191 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
193 #ifdef CONFIG_TRACING
194 extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
195 gfp_t flags,
196 int nodeid);
197 #else
198 static __always_inline void *
199 kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
200 gfp_t flags,
201 int nodeid)
203 return kmem_cache_alloc_node(cachep, flags, nodeid);
205 #endif
207 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
209 struct kmem_cache *cachep;
210 void *ret;
212 if (__builtin_constant_p(size)) {
213 int i = 0;
215 if (!size)
216 return ZERO_SIZE_PTR;
218 #define CACHE(x) \
219 if (size <= x) \
220 goto found; \
221 else \
222 i++;
223 #include <linux/kmalloc_sizes.h>
224 #undef CACHE
225 return NULL;
226 found:
227 #ifdef CONFIG_ZONE_DMA
228 if (flags & GFP_DMA)
229 cachep = malloc_sizes[i].cs_dmacachep;
230 else
231 #endif
232 cachep = malloc_sizes[i].cs_cachep;
234 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
236 trace_kmalloc_node(_THIS_IP_, ret,
237 size, slab_buffer_size(cachep),
238 flags, node);
240 return ret;
242 return __kmalloc_node(size, flags, node);
245 #endif /* CONFIG_NUMA */
247 #endif /* _LINUX_SLAB_DEF_H */