tcp: remove Appropriate Byte Count support
[linux-2.6/btrfs-unstable.git] / include / linux / slab_def.h
blob8bb6e0eaf3c69ef19c5a2fc4810c8a558881ed5d
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
4 /*
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
19 * struct kmem_cache
21 * manages a cache.
24 struct kmem_cache {
25 /* 1) Cache tunables. Protected by cache_chain_mutex */
26 unsigned int batchcount;
27 unsigned int limit;
28 unsigned int shared;
30 unsigned int size;
31 u32 reciprocal_buffer_size;
32 /* 2) touched by every alloc & free from the backend */
34 unsigned int flags; /* constant flags */
35 unsigned int num; /* # of objs per slab */
37 /* 3) cache_grow/shrink */
38 /* order of pgs per slab (2^n) */
39 unsigned int gfporder;
41 /* force GFP flags, e.g. GFP_DMA */
42 gfp_t allocflags;
44 size_t colour; /* cache colouring range */
45 unsigned int colour_off; /* colour offset */
46 struct kmem_cache *slabp_cache;
47 unsigned int slab_size;
49 /* constructor func */
50 void (*ctor)(void *obj);
52 /* 4) cache creation/removal */
53 const char *name;
54 struct list_head list;
55 int refcount;
56 int object_size;
57 int align;
59 /* 5) statistics */
60 #ifdef CONFIG_DEBUG_SLAB
61 unsigned long num_active;
62 unsigned long num_allocations;
63 unsigned long high_mark;
64 unsigned long grown;
65 unsigned long reaped;
66 unsigned long errors;
67 unsigned long max_freeable;
68 unsigned long node_allocs;
69 unsigned long node_frees;
70 unsigned long node_overflow;
71 atomic_t allochit;
72 atomic_t allocmiss;
73 atomic_t freehit;
74 atomic_t freemiss;
77 * If debugging is enabled, then the allocator can add additional
78 * fields and/or padding to every object. size contains the total
79 * object size including these internal fields, the following two
80 * variables contain the offset to the user object and its size.
82 int obj_offset;
83 #endif /* CONFIG_DEBUG_SLAB */
84 #ifdef CONFIG_MEMCG_KMEM
85 struct memcg_cache_params *memcg_params;
86 #endif
88 /* 6) per-cpu/per-node data, touched during every alloc/free */
90 * We put array[] at the end of kmem_cache, because we want to size
91 * this array to nr_cpu_ids slots instead of NR_CPUS
92 * (see kmem_cache_init())
93 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
94 * is statically defined, so we reserve the max number of cpus.
96 * We also need to guarantee that the list is able to accomodate a
97 * pointer for each node since "nodelists" uses the remainder of
98 * available pointers.
100 struct kmem_list3 **nodelists;
101 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
103 * Do not add fields after array[]
107 /* Size description struct for general caches. */
108 struct cache_sizes {
109 size_t cs_size;
110 struct kmem_cache *cs_cachep;
111 #ifdef CONFIG_ZONE_DMA
112 struct kmem_cache *cs_dmacachep;
113 #endif
115 extern struct cache_sizes malloc_sizes[];
117 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
118 void *__kmalloc(size_t size, gfp_t flags);
120 #ifdef CONFIG_TRACING
121 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
122 #else
123 static __always_inline void *
124 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
126 return kmem_cache_alloc(cachep, flags);
128 #endif
130 static __always_inline void *kmalloc(size_t size, gfp_t flags)
132 struct kmem_cache *cachep;
133 void *ret;
135 if (__builtin_constant_p(size)) {
136 int i = 0;
138 if (!size)
139 return ZERO_SIZE_PTR;
141 #define CACHE(x) \
142 if (size <= x) \
143 goto found; \
144 else \
145 i++;
146 #include <linux/kmalloc_sizes.h>
147 #undef CACHE
148 return NULL;
149 found:
150 #ifdef CONFIG_ZONE_DMA
151 if (flags & GFP_DMA)
152 cachep = malloc_sizes[i].cs_dmacachep;
153 else
154 #endif
155 cachep = malloc_sizes[i].cs_cachep;
157 ret = kmem_cache_alloc_trace(cachep, flags, size);
159 return ret;
161 return __kmalloc(size, flags);
164 #ifdef CONFIG_NUMA
165 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
166 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
168 #ifdef CONFIG_TRACING
169 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
170 gfp_t flags,
171 int nodeid,
172 size_t size);
173 #else
174 static __always_inline void *
175 kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
176 gfp_t flags,
177 int nodeid,
178 size_t size)
180 return kmem_cache_alloc_node(cachep, flags, nodeid);
182 #endif
184 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
186 struct kmem_cache *cachep;
188 if (__builtin_constant_p(size)) {
189 int i = 0;
191 if (!size)
192 return ZERO_SIZE_PTR;
194 #define CACHE(x) \
195 if (size <= x) \
196 goto found; \
197 else \
198 i++;
199 #include <linux/kmalloc_sizes.h>
200 #undef CACHE
201 return NULL;
202 found:
203 #ifdef CONFIG_ZONE_DMA
204 if (flags & GFP_DMA)
205 cachep = malloc_sizes[i].cs_dmacachep;
206 else
207 #endif
208 cachep = malloc_sizes[i].cs_cachep;
210 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
212 return __kmalloc_node(size, flags, node);
215 #endif /* CONFIG_NUMA */
217 #endif /* _LINUX_SLAB_DEF_H */