1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 #include <linux/kmemtrace.h>
19 #ifndef ARCH_KMALLOC_MINALIGN
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
29 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
32 #ifndef ARCH_SLAB_MINALIGN
34 * Enforce a minimum alignment for all caches.
35 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
36 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
37 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
38 * some debug features.
40 #define ARCH_SLAB_MINALIGN 0
50 /* 1) per-cpu data, touched during every alloc/free */
51 struct array_cache
*array
[NR_CPUS
];
52 /* 2) Cache tunables. Protected by cache_chain_mutex */
53 unsigned int batchcount
;
57 unsigned int buffer_size
;
58 u32 reciprocal_buffer_size
;
59 /* 3) touched by every alloc & free from the backend */
61 unsigned int flags
; /* constant flags */
62 unsigned int num
; /* # of objs per slab */
64 /* 4) cache_grow/shrink */
65 /* order of pgs per slab (2^n) */
66 unsigned int gfporder
;
68 /* force GFP flags, e.g. GFP_DMA */
71 size_t colour
; /* cache colouring range */
72 unsigned int colour_off
; /* colour offset */
73 struct kmem_cache
*slabp_cache
;
74 unsigned int slab_size
;
75 unsigned int dflags
; /* dynamic flags */
77 /* constructor func */
78 void (*ctor
)(void *obj
);
80 /* 5) cache creation/removal */
82 struct list_head next
;
85 #ifdef CONFIG_DEBUG_SLAB
86 unsigned long num_active
;
87 unsigned long num_allocations
;
88 unsigned long high_mark
;
92 unsigned long max_freeable
;
93 unsigned long node_allocs
;
94 unsigned long node_frees
;
95 unsigned long node_overflow
;
102 * If debugging is enabled, then the allocator can add additional
103 * fields and/or padding to every object. buffer_size contains the total
104 * object size including these internal fields, the following two
105 * variables contain the offset to the user object and its size.
109 #endif /* CONFIG_DEBUG_SLAB */
112 * We put nodelists[] at the end of kmem_cache, because we want to size
113 * this array to nr_node_ids slots instead of MAX_NUMNODES
114 * (see kmem_cache_init())
115 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
116 * is statically defined, so we reserve the max number of nodes.
118 struct kmem_list3
*nodelists
[MAX_NUMNODES
];
120 * Do not add fields after nodelists[]
124 /* Size description struct for general caches. */
127 struct kmem_cache
*cs_cachep
;
128 #ifdef CONFIG_ZONE_DMA
129 struct kmem_cache
*cs_dmacachep
;
132 extern struct cache_sizes malloc_sizes
[];
134 void *kmem_cache_alloc(struct kmem_cache
*, gfp_t
);
135 void *__kmalloc(size_t size
, gfp_t flags
);
137 #ifdef CONFIG_TRACING
138 extern void *kmem_cache_alloc_notrace(struct kmem_cache
*cachep
, gfp_t flags
);
139 extern size_t slab_buffer_size(struct kmem_cache
*cachep
);
141 static __always_inline
void *
142 kmem_cache_alloc_notrace(struct kmem_cache
*cachep
, gfp_t flags
)
144 return kmem_cache_alloc(cachep
, flags
);
146 static inline size_t slab_buffer_size(struct kmem_cache
*cachep
)
152 static __always_inline
void *kmalloc(size_t size
, gfp_t flags
)
154 struct kmem_cache
*cachep
;
157 if (__builtin_constant_p(size
)) {
161 return ZERO_SIZE_PTR
;
168 #include <linux/kmalloc_sizes.h>
172 #ifdef CONFIG_ZONE_DMA
174 cachep
= malloc_sizes
[i
].cs_dmacachep
;
177 cachep
= malloc_sizes
[i
].cs_cachep
;
179 ret
= kmem_cache_alloc_notrace(cachep
, flags
);
181 trace_kmalloc(_THIS_IP_
, ret
,
182 size
, slab_buffer_size(cachep
), flags
);
186 return __kmalloc(size
, flags
);
190 extern void *__kmalloc_node(size_t size
, gfp_t flags
, int node
);
191 extern void *kmem_cache_alloc_node(struct kmem_cache
*, gfp_t flags
, int node
);
193 #ifdef CONFIG_TRACING
194 extern void *kmem_cache_alloc_node_notrace(struct kmem_cache
*cachep
,
198 static __always_inline
void *
199 kmem_cache_alloc_node_notrace(struct kmem_cache
*cachep
,
203 return kmem_cache_alloc_node(cachep
, flags
, nodeid
);
207 static __always_inline
void *kmalloc_node(size_t size
, gfp_t flags
, int node
)
209 struct kmem_cache
*cachep
;
212 if (__builtin_constant_p(size
)) {
216 return ZERO_SIZE_PTR
;
223 #include <linux/kmalloc_sizes.h>
227 #ifdef CONFIG_ZONE_DMA
229 cachep
= malloc_sizes
[i
].cs_dmacachep
;
232 cachep
= malloc_sizes
[i
].cs_cachep
;
234 ret
= kmem_cache_alloc_node_notrace(cachep
, flags
, node
);
236 trace_kmalloc_node(_THIS_IP_
, ret
,
237 size
, slab_buffer_size(cachep
),
242 return __kmalloc_node(size
, flags
, node
);
245 #endif /* CONFIG_NUMA */
247 #endif /* _LINUX_SLAB_DEF_H */