Merge branch 'cpuinit_phase2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulg...
[linux-2.6.git] / mm / slab.h
blob620ceeddbe1ad74cfa5fb7bc5e2fc2d13f7e3191
1 #ifndef MM_SLAB_H
2 #define MM_SLAB_H
3 /*
4 * Internal slab definitions
5 */
7 /*
8 * State of the slab allocator.
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
15 enum slab_state {
16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
19 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
20 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */
24 extern enum slab_state slab_state;
26 /* The slab cache mutex protects the management structures during changes */
27 extern struct mutex slab_mutex;
29 /* The list of all slab caches on the system */
30 extern struct list_head slab_caches;
32 /* The slab cache that manages slab cache information */
33 extern struct kmem_cache *kmem_cache;
35 unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
38 #ifndef CONFIG_SLOB
39 /* Kmalloc array related functions */
40 void create_kmalloc_caches(unsigned long);
42 /* Find the kmalloc slab corresponding for a certain size */
43 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
44 #endif
47 /* Functions provided by the slab allocators */
48 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
50 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51 unsigned long flags);
52 extern void create_boot_cache(struct kmem_cache *, const char *name,
53 size_t size, unsigned long flags);
55 struct mem_cgroup;
56 #ifdef CONFIG_SLUB
57 struct kmem_cache *
58 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
59 size_t align, unsigned long flags, void (*ctor)(void *));
60 #else
61 static inline struct kmem_cache *
62 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
63 size_t align, unsigned long flags, void (*ctor)(void *))
64 { return NULL; }
65 #endif
68 /* Legal flag mask for kmem_cache_create(), for various configurations */
69 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
72 #if defined(CONFIG_DEBUG_SLAB)
73 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74 #elif defined(CONFIG_SLUB_DEBUG)
75 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 SLAB_TRACE | SLAB_DEBUG_FREE)
77 #else
78 #define SLAB_DEBUG_FLAGS (0)
79 #endif
81 #if defined(CONFIG_SLAB)
82 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84 #elif defined(CONFIG_SLUB)
85 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86 SLAB_TEMPORARY | SLAB_NOTRACK)
87 #else
88 #define SLAB_CACHE_FLAGS (0)
89 #endif
91 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
93 int __kmem_cache_shutdown(struct kmem_cache *);
95 struct seq_file;
96 struct file;
98 struct slabinfo {
99 unsigned long active_objs;
100 unsigned long num_objs;
101 unsigned long active_slabs;
102 unsigned long num_slabs;
103 unsigned long shared_avail;
104 unsigned int limit;
105 unsigned int batchcount;
106 unsigned int shared;
107 unsigned int objects_per_slab;
108 unsigned int cache_order;
111 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
112 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
113 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
114 size_t count, loff_t *ppos);
116 #ifdef CONFIG_MEMCG_KMEM
117 static inline bool is_root_cache(struct kmem_cache *s)
119 return !s->memcg_params || s->memcg_params->is_root_cache;
122 static inline bool cache_match_memcg(struct kmem_cache *cachep,
123 struct mem_cgroup *memcg)
125 return (is_root_cache(cachep) && !memcg) ||
126 (cachep->memcg_params->memcg == memcg);
129 static inline void memcg_bind_pages(struct kmem_cache *s, int order)
131 if (!is_root_cache(s))
132 atomic_add(1 << order, &s->memcg_params->nr_pages);
135 static inline void memcg_release_pages(struct kmem_cache *s, int order)
137 if (is_root_cache(s))
138 return;
140 if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
141 mem_cgroup_destroy_cache(s);
144 static inline bool slab_equal_or_root(struct kmem_cache *s,
145 struct kmem_cache *p)
147 return (p == s) ||
148 (s->memcg_params && (p == s->memcg_params->root_cache));
152 * We use suffixes to the name in memcg because we can't have caches
153 * created in the system with the same name. But when we print them
154 * locally, better refer to them with the base name
156 static inline const char *cache_name(struct kmem_cache *s)
158 if (!is_root_cache(s))
159 return s->memcg_params->root_cache->name;
160 return s->name;
163 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
165 return s->memcg_params->memcg_caches[idx];
168 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
170 if (is_root_cache(s))
171 return s;
172 return s->memcg_params->root_cache;
174 #else
175 static inline bool is_root_cache(struct kmem_cache *s)
177 return true;
180 static inline bool cache_match_memcg(struct kmem_cache *cachep,
181 struct mem_cgroup *memcg)
183 return true;
186 static inline void memcg_bind_pages(struct kmem_cache *s, int order)
190 static inline void memcg_release_pages(struct kmem_cache *s, int order)
194 static inline bool slab_equal_or_root(struct kmem_cache *s,
195 struct kmem_cache *p)
197 return true;
200 static inline const char *cache_name(struct kmem_cache *s)
202 return s->name;
205 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
207 return NULL;
210 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
212 return s;
214 #endif
216 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
218 struct kmem_cache *cachep;
219 struct page *page;
222 * When kmemcg is not being used, both assignments should return the
223 * same value. but we don't want to pay the assignment price in that
224 * case. If it is not compiled in, the compiler should be smart enough
225 * to not do even the assignment. In that case, slab_equal_or_root
226 * will also be a constant.
228 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
229 return s;
231 page = virt_to_head_page(x);
232 cachep = page->slab_cache;
233 if (slab_equal_or_root(cachep, s))
234 return cachep;
236 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
237 __FUNCTION__, cachep->name, s->name);
238 WARN_ON_ONCE(1);
239 return s;
241 #endif
245 * The slab lists for all objects.
247 struct kmem_cache_node {
248 spinlock_t list_lock;
250 #ifdef CONFIG_SLAB
251 struct list_head slabs_partial; /* partial list first, better asm code */
252 struct list_head slabs_full;
253 struct list_head slabs_free;
254 unsigned long free_objects;
255 unsigned int free_limit;
256 unsigned int colour_next; /* Per-node cache coloring */
257 struct array_cache *shared; /* shared per node */
258 struct array_cache **alien; /* on other nodes */
259 unsigned long next_reap; /* updated without locking */
260 int free_touched; /* updated without locking */
261 #endif
263 #ifdef CONFIG_SLUB
264 unsigned long nr_partial;
265 struct list_head partial;
266 #ifdef CONFIG_SLUB_DEBUG
267 atomic_long_t nr_slabs;
268 atomic_long_t total_objects;
269 struct list_head full;
270 #endif
271 #endif
275 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
276 void slab_stop(struct seq_file *m, void *p);