locking/csd_lock: Use smp_cond_acquire() in csd_lock_wait()
[linux-stable.git] / mm / slab.h
blob2eedacea439de698bdf2103f54ae936fe40de48d
1 #ifndef MM_SLAB_H
2 #define MM_SLAB_H
3 /*
4 * Internal slab definitions
5 */
7 #ifdef CONFIG_SLOB
8 /*
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
19 struct kmem_cache {
20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */
22 unsigned int align; /* Alignment as calculated */
23 unsigned long flags; /* Active flags on the slab */
24 const char *name; /* Slab name for sysfs */
25 int refcount; /* Use counter */
26 void (*ctor)(void *); /* Called on object slot creation */
27 struct list_head list; /* List of all slab caches on the system */
30 #endif /* CONFIG_SLOB */
32 #ifdef CONFIG_SLAB
33 #include <linux/slab_def.h>
34 #endif
36 #ifdef CONFIG_SLUB
37 #include <linux/slub_def.h>
38 #endif
40 #include <linux/memcontrol.h>
43 * State of the slab allocator.
45 * This is used to describe the states of the allocator during bootup.
46 * Allocators use this to gradually bootstrap themselves. Most allocators
47 * have the problem that the structures used for managing slab caches are
48 * allocated from slab caches themselves.
50 enum slab_state {
51 DOWN, /* No slab functionality yet */
52 PARTIAL, /* SLUB: kmem_cache_node available */
53 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
54 UP, /* Slab caches usable but not all extras yet */
55 FULL /* Everything is working */
58 extern enum slab_state slab_state;
60 /* The slab cache mutex protects the management structures during changes */
61 extern struct mutex slab_mutex;
63 /* The list of all slab caches on the system */
64 extern struct list_head slab_caches;
66 /* The slab cache that manages slab cache information */
67 extern struct kmem_cache *kmem_cache;
69 unsigned long calculate_alignment(unsigned long flags,
70 unsigned long align, unsigned long size);
72 #ifndef CONFIG_SLOB
73 /* Kmalloc array related functions */
74 void setup_kmalloc_cache_index_table(void);
75 void create_kmalloc_caches(unsigned long);
77 /* Find the kmalloc slab corresponding for a certain size */
78 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
79 #endif
82 /* Functions provided by the slab allocators */
83 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
85 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
86 unsigned long flags);
87 extern void create_boot_cache(struct kmem_cache *, const char *name,
88 size_t size, unsigned long flags);
90 int slab_unmergeable(struct kmem_cache *s);
91 struct kmem_cache *find_mergeable(size_t size, size_t align,
92 unsigned long flags, const char *name, void (*ctor)(void *));
93 #ifndef CONFIG_SLOB
94 struct kmem_cache *
95 __kmem_cache_alias(const char *name, size_t size, size_t align,
96 unsigned long flags, void (*ctor)(void *));
98 unsigned long kmem_cache_flags(unsigned long object_size,
99 unsigned long flags, const char *name,
100 void (*ctor)(void *));
101 #else
102 static inline struct kmem_cache *
103 __kmem_cache_alias(const char *name, size_t size, size_t align,
104 unsigned long flags, void (*ctor)(void *))
105 { return NULL; }
107 static inline unsigned long kmem_cache_flags(unsigned long object_size,
108 unsigned long flags, const char *name,
109 void (*ctor)(void *))
111 return flags;
113 #endif
116 /* Legal flag mask for kmem_cache_create(), for various configurations */
117 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
118 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
120 #if defined(CONFIG_DEBUG_SLAB)
121 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
122 #elif defined(CONFIG_SLUB_DEBUG)
123 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
124 SLAB_TRACE | SLAB_DEBUG_FREE)
125 #else
126 #define SLAB_DEBUG_FLAGS (0)
127 #endif
129 #if defined(CONFIG_SLAB)
130 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
132 SLAB_NOTRACK | SLAB_ACCOUNT)
133 #elif defined(CONFIG_SLUB)
134 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
135 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
136 #else
137 #define SLAB_CACHE_FLAGS (0)
138 #endif
140 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
142 int __kmem_cache_shutdown(struct kmem_cache *);
143 void __kmem_cache_release(struct kmem_cache *);
144 int __kmem_cache_shrink(struct kmem_cache *, bool);
145 void slab_kmem_cache_release(struct kmem_cache *);
147 struct seq_file;
148 struct file;
150 struct slabinfo {
151 unsigned long active_objs;
152 unsigned long num_objs;
153 unsigned long active_slabs;
154 unsigned long num_slabs;
155 unsigned long shared_avail;
156 unsigned int limit;
157 unsigned int batchcount;
158 unsigned int shared;
159 unsigned int objects_per_slab;
160 unsigned int cache_order;
163 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
164 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
165 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
166 size_t count, loff_t *ppos);
169 * Generic implementation of bulk operations
170 * These are useful for situations in which the allocator cannot
171 * perform optimizations. In that case segments of the objecct listed
172 * may be allocated or freed using these operations.
174 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
175 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
177 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
179 * Iterate over all memcg caches of the given root cache. The caller must hold
180 * slab_mutex.
182 #define for_each_memcg_cache(iter, root) \
183 list_for_each_entry(iter, &(root)->memcg_params.list, \
184 memcg_params.list)
186 static inline bool is_root_cache(struct kmem_cache *s)
188 return s->memcg_params.is_root_cache;
191 static inline bool slab_equal_or_root(struct kmem_cache *s,
192 struct kmem_cache *p)
194 return p == s || p == s->memcg_params.root_cache;
198 * We use suffixes to the name in memcg because we can't have caches
199 * created in the system with the same name. But when we print them
200 * locally, better refer to them with the base name
202 static inline const char *cache_name(struct kmem_cache *s)
204 if (!is_root_cache(s))
205 s = s->memcg_params.root_cache;
206 return s->name;
210 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
211 * That said the caller must assure the memcg's cache won't go away by either
212 * taking a css reference to the owner cgroup, or holding the slab_mutex.
214 static inline struct kmem_cache *
215 cache_from_memcg_idx(struct kmem_cache *s, int idx)
217 struct kmem_cache *cachep;
218 struct memcg_cache_array *arr;
220 rcu_read_lock();
221 arr = rcu_dereference(s->memcg_params.memcg_caches);
224 * Make sure we will access the up-to-date value. The code updating
225 * memcg_caches issues a write barrier to match this (see
226 * memcg_create_kmem_cache()).
228 cachep = lockless_dereference(arr->entries[idx]);
229 rcu_read_unlock();
231 return cachep;
234 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
236 if (is_root_cache(s))
237 return s;
238 return s->memcg_params.root_cache;
241 static __always_inline int memcg_charge_slab(struct page *page,
242 gfp_t gfp, int order,
243 struct kmem_cache *s)
245 if (!memcg_kmem_enabled())
246 return 0;
247 if (is_root_cache(s))
248 return 0;
249 return __memcg_kmem_charge_memcg(page, gfp, order,
250 s->memcg_params.memcg);
253 extern void slab_init_memcg_params(struct kmem_cache *);
255 #else /* CONFIG_MEMCG && !CONFIG_SLOB */
257 #define for_each_memcg_cache(iter, root) \
258 for ((void)(iter), (void)(root); 0; )
260 static inline bool is_root_cache(struct kmem_cache *s)
262 return true;
265 static inline bool slab_equal_or_root(struct kmem_cache *s,
266 struct kmem_cache *p)
268 return true;
271 static inline const char *cache_name(struct kmem_cache *s)
273 return s->name;
276 static inline struct kmem_cache *
277 cache_from_memcg_idx(struct kmem_cache *s, int idx)
279 return NULL;
282 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
284 return s;
287 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
288 struct kmem_cache *s)
290 return 0;
293 static inline void slab_init_memcg_params(struct kmem_cache *s)
296 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
298 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
300 struct kmem_cache *cachep;
301 struct page *page;
304 * When kmemcg is not being used, both assignments should return the
305 * same value. but we don't want to pay the assignment price in that
306 * case. If it is not compiled in, the compiler should be smart enough
307 * to not do even the assignment. In that case, slab_equal_or_root
308 * will also be a constant.
310 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
311 return s;
313 page = virt_to_head_page(x);
314 cachep = page->slab_cache;
315 if (slab_equal_or_root(cachep, s))
316 return cachep;
318 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
319 __func__, s->name, cachep->name);
320 WARN_ON_ONCE(1);
321 return s;
324 #ifndef CONFIG_SLOB
326 * The slab lists for all objects.
328 struct kmem_cache_node {
329 spinlock_t list_lock;
331 #ifdef CONFIG_SLAB
332 struct list_head slabs_partial; /* partial list first, better asm code */
333 struct list_head slabs_full;
334 struct list_head slabs_free;
335 unsigned long free_objects;
336 unsigned int free_limit;
337 unsigned int colour_next; /* Per-node cache coloring */
338 struct array_cache *shared; /* shared per node */
339 struct alien_cache **alien; /* on other nodes */
340 unsigned long next_reap; /* updated without locking */
341 int free_touched; /* updated without locking */
342 #endif
344 #ifdef CONFIG_SLUB
345 unsigned long nr_partial;
346 struct list_head partial;
347 #ifdef CONFIG_SLUB_DEBUG
348 atomic_long_t nr_slabs;
349 atomic_long_t total_objects;
350 struct list_head full;
351 #endif
352 #endif
356 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
358 return s->node[node];
362 * Iterator over all nodes. The body will be executed for each node that has
363 * a kmem_cache_node structure allocated (which is true for all online nodes)
365 #define for_each_kmem_cache_node(__s, __node, __n) \
366 for (__node = 0; __node < nr_node_ids; __node++) \
367 if ((__n = get_node(__s, __node)))
369 #endif
371 void *slab_start(struct seq_file *m, loff_t *pos);
372 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
373 void slab_stop(struct seq_file *m, void *p);
374 int memcg_slab_show(struct seq_file *m, void *p);
376 #endif /* MM_SLAB_H */