tracing/kmemtrace: normalize the raw tracer event to the unified tracing API
[linux-2.6/mini2440.git] / include / linux / slab_def.h
blob455f9affea9abafe24c6be97a861d1839ee770a0
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
4 /*
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 #include <trace/kmemtrace.h>
19 /* Size description struct for general caches. */
20 struct cache_sizes {
21 size_t cs_size;
22 struct kmem_cache *cs_cachep;
23 #ifdef CONFIG_ZONE_DMA
24 struct kmem_cache *cs_dmacachep;
25 #endif
27 extern struct cache_sizes malloc_sizes[];
29 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
30 void *__kmalloc(size_t size, gfp_t flags);
32 #ifdef CONFIG_KMEMTRACE
33 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34 extern size_t slab_buffer_size(struct kmem_cache *cachep);
35 #else
36 static __always_inline void *
37 kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
39 return kmem_cache_alloc(cachep, flags);
41 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
43 return 0;
45 #endif
47 static __always_inline void *kmalloc(size_t size, gfp_t flags)
49 struct kmem_cache *cachep;
50 void *ret;
52 if (__builtin_constant_p(size)) {
53 int i = 0;
55 if (!size)
56 return ZERO_SIZE_PTR;
58 #define CACHE(x) \
59 if (size <= x) \
60 goto found; \
61 else \
62 i++;
63 #include <linux/kmalloc_sizes.h>
64 #undef CACHE
66 extern void __you_cannot_kmalloc_that_much(void);
67 __you_cannot_kmalloc_that_much();
69 found:
70 #ifdef CONFIG_ZONE_DMA
71 if (flags & GFP_DMA)
72 cachep = malloc_sizes[i].cs_dmacachep;
73 else
74 #endif
75 cachep = malloc_sizes[i].cs_cachep;
77 ret = kmem_cache_alloc_notrace(cachep, flags);
79 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
80 size, slab_buffer_size(cachep), flags);
82 return ret;
84 return __kmalloc(size, flags);
87 #ifdef CONFIG_NUMA
88 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
89 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
91 #ifdef CONFIG_KMEMTRACE
92 extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
93 gfp_t flags,
94 int nodeid);
95 #else
96 static __always_inline void *
97 kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
98 gfp_t flags,
99 int nodeid)
101 return kmem_cache_alloc_node(cachep, flags, nodeid);
103 #endif
105 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
107 struct kmem_cache *cachep;
108 void *ret;
110 if (__builtin_constant_p(size)) {
111 int i = 0;
113 if (!size)
114 return ZERO_SIZE_PTR;
116 #define CACHE(x) \
117 if (size <= x) \
118 goto found; \
119 else \
120 i++;
121 #include <linux/kmalloc_sizes.h>
122 #undef CACHE
124 extern void __you_cannot_kmalloc_that_much(void);
125 __you_cannot_kmalloc_that_much();
127 found:
128 #ifdef CONFIG_ZONE_DMA
129 if (flags & GFP_DMA)
130 cachep = malloc_sizes[i].cs_dmacachep;
131 else
132 #endif
133 cachep = malloc_sizes[i].cs_cachep;
135 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
137 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
138 ret, size, slab_buffer_size(cachep),
139 flags, node);
141 return ret;
143 return __kmalloc_node(size, flags, node);
146 #endif /* CONFIG_NUMA */
148 #endif /* _LINUX_SLAB_DEF_H */