CIFS: Reset read oplock to NONE if we have mandatory locks after reopen
[linux-stable.git] / mm / slab.h
blob9653f2e2591ad0982d2dc74c668323a43e5b026d
1 #ifndef MM_SLAB_H
2 #define MM_SLAB_H
3 /*
4 * Internal slab definitions
5 */
7 #ifdef CONFIG_SLOB
8 /*
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
19 struct kmem_cache {
20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */
22 unsigned int align; /* Alignment as calculated */
23 unsigned long flags; /* Active flags on the slab */
24 const char *name; /* Slab name for sysfs */
25 int refcount; /* Use counter */
26 void (*ctor)(void *); /* Called on object slot creation */
27 struct list_head list; /* List of all slab caches on the system */
30 #endif /* CONFIG_SLOB */
32 #ifdef CONFIG_SLAB
33 #include <linux/slab_def.h>
34 #endif
36 #ifdef CONFIG_SLUB
37 #include <linux/slub_def.h>
38 #endif
40 #include <linux/memcontrol.h>
41 #include <linux/fault-inject.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/kasan.h>
44 #include <linux/kmemleak.h>
45 #include <linux/random.h>
48 * State of the slab allocator.
50 * This is used to describe the states of the allocator during bootup.
51 * Allocators use this to gradually bootstrap themselves. Most allocators
52 * have the problem that the structures used for managing slab caches are
53 * allocated from slab caches themselves.
55 enum slab_state {
56 DOWN, /* No slab functionality yet */
57 PARTIAL, /* SLUB: kmem_cache_node available */
58 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
59 UP, /* Slab caches usable but not all extras yet */
60 FULL /* Everything is working */
63 extern enum slab_state slab_state;
65 /* The slab cache mutex protects the management structures during changes */
66 extern struct mutex slab_mutex;
68 /* The list of all slab caches on the system */
69 extern struct list_head slab_caches;
71 /* The slab cache that manages slab cache information */
72 extern struct kmem_cache *kmem_cache;
74 unsigned long calculate_alignment(unsigned long flags,
75 unsigned long align, unsigned long size);
77 #ifndef CONFIG_SLOB
78 /* Kmalloc array related functions */
79 void setup_kmalloc_cache_index_table(void);
80 void create_kmalloc_caches(unsigned long);
82 /* Find the kmalloc slab corresponding for a certain size */
83 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
84 #endif
87 /* Functions provided by the slab allocators */
88 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
90 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
91 unsigned long flags);
92 extern void create_boot_cache(struct kmem_cache *, const char *name,
93 size_t size, unsigned long flags);
95 int slab_unmergeable(struct kmem_cache *s);
96 struct kmem_cache *find_mergeable(size_t size, size_t align,
97 unsigned long flags, const char *name, void (*ctor)(void *));
98 #ifndef CONFIG_SLOB
99 struct kmem_cache *
100 __kmem_cache_alias(const char *name, size_t size, size_t align,
101 unsigned long flags, void (*ctor)(void *));
103 unsigned long kmem_cache_flags(unsigned long object_size,
104 unsigned long flags, const char *name,
105 void (*ctor)(void *));
106 #else
107 static inline struct kmem_cache *
108 __kmem_cache_alias(const char *name, size_t size, size_t align,
109 unsigned long flags, void (*ctor)(void *))
110 { return NULL; }
112 static inline unsigned long kmem_cache_flags(unsigned long object_size,
113 unsigned long flags, const char *name,
114 void (*ctor)(void *))
116 return flags;
118 #endif
121 /* Legal flag mask for kmem_cache_create(), for various configurations */
122 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
123 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
125 #if defined(CONFIG_DEBUG_SLAB)
126 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
127 #elif defined(CONFIG_SLUB_DEBUG)
128 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
129 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
130 #else
131 #define SLAB_DEBUG_FLAGS (0)
132 #endif
134 #if defined(CONFIG_SLAB)
135 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
136 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
137 SLAB_NOTRACK | SLAB_ACCOUNT)
138 #elif defined(CONFIG_SLUB)
139 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
140 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
141 #else
142 #define SLAB_CACHE_FLAGS (0)
143 #endif
145 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
147 int __kmem_cache_shutdown(struct kmem_cache *);
148 void __kmem_cache_release(struct kmem_cache *);
149 int __kmem_cache_shrink(struct kmem_cache *, bool);
150 void slab_kmem_cache_release(struct kmem_cache *);
152 struct seq_file;
153 struct file;
155 struct slabinfo {
156 unsigned long active_objs;
157 unsigned long num_objs;
158 unsigned long active_slabs;
159 unsigned long num_slabs;
160 unsigned long shared_avail;
161 unsigned int limit;
162 unsigned int batchcount;
163 unsigned int shared;
164 unsigned int objects_per_slab;
165 unsigned int cache_order;
168 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
169 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
170 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
171 size_t count, loff_t *ppos);
174 * Generic implementation of bulk operations
175 * These are useful for situations in which the allocator cannot
176 * perform optimizations. In that case segments of the object listed
177 * may be allocated or freed using these operations.
179 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
180 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
182 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
184 * Iterate over all memcg caches of the given root cache. The caller must hold
185 * slab_mutex.
187 #define for_each_memcg_cache(iter, root) \
188 list_for_each_entry(iter, &(root)->memcg_params.list, \
189 memcg_params.list)
191 static inline bool is_root_cache(struct kmem_cache *s)
193 return s->memcg_params.is_root_cache;
196 static inline bool slab_equal_or_root(struct kmem_cache *s,
197 struct kmem_cache *p)
199 return p == s || p == s->memcg_params.root_cache;
203 * We use suffixes to the name in memcg because we can't have caches
204 * created in the system with the same name. But when we print them
205 * locally, better refer to them with the base name
207 static inline const char *cache_name(struct kmem_cache *s)
209 if (!is_root_cache(s))
210 s = s->memcg_params.root_cache;
211 return s->name;
215 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
216 * That said the caller must assure the memcg's cache won't go away by either
217 * taking a css reference to the owner cgroup, or holding the slab_mutex.
219 static inline struct kmem_cache *
220 cache_from_memcg_idx(struct kmem_cache *s, int idx)
222 struct kmem_cache *cachep;
223 struct memcg_cache_array *arr;
225 rcu_read_lock();
226 arr = rcu_dereference(s->memcg_params.memcg_caches);
229 * Make sure we will access the up-to-date value. The code updating
230 * memcg_caches issues a write barrier to match this (see
231 * memcg_create_kmem_cache()).
233 cachep = lockless_dereference(arr->entries[idx]);
234 rcu_read_unlock();
236 return cachep;
239 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
241 if (is_root_cache(s))
242 return s;
243 return s->memcg_params.root_cache;
246 static __always_inline int memcg_charge_slab(struct page *page,
247 gfp_t gfp, int order,
248 struct kmem_cache *s)
250 int ret;
252 if (!memcg_kmem_enabled())
253 return 0;
254 if (is_root_cache(s))
255 return 0;
257 ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
258 if (ret)
259 return ret;
261 memcg_kmem_update_page_stat(page,
262 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
263 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
264 1 << order);
265 return 0;
268 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
269 struct kmem_cache *s)
271 if (!memcg_kmem_enabled())
272 return;
274 memcg_kmem_update_page_stat(page,
275 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
276 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
277 -(1 << order));
278 memcg_kmem_uncharge(page, order);
281 extern void slab_init_memcg_params(struct kmem_cache *);
283 #else /* CONFIG_MEMCG && !CONFIG_SLOB */
285 #define for_each_memcg_cache(iter, root) \
286 for ((void)(iter), (void)(root); 0; )
288 static inline bool is_root_cache(struct kmem_cache *s)
290 return true;
293 static inline bool slab_equal_or_root(struct kmem_cache *s,
294 struct kmem_cache *p)
296 return true;
299 static inline const char *cache_name(struct kmem_cache *s)
301 return s->name;
304 static inline struct kmem_cache *
305 cache_from_memcg_idx(struct kmem_cache *s, int idx)
307 return NULL;
310 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
312 return s;
315 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
316 struct kmem_cache *s)
318 return 0;
321 static inline void memcg_uncharge_slab(struct page *page, int order,
322 struct kmem_cache *s)
326 static inline void slab_init_memcg_params(struct kmem_cache *s)
329 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
331 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
333 struct kmem_cache *cachep;
334 struct page *page;
337 * When kmemcg is not being used, both assignments should return the
338 * same value. but we don't want to pay the assignment price in that
339 * case. If it is not compiled in, the compiler should be smart enough
340 * to not do even the assignment. In that case, slab_equal_or_root
341 * will also be a constant.
343 if (!memcg_kmem_enabled() &&
344 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
345 return s;
347 page = virt_to_head_page(x);
348 cachep = page->slab_cache;
349 if (slab_equal_or_root(cachep, s))
350 return cachep;
352 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
353 __func__, s->name, cachep->name);
354 WARN_ON_ONCE(1);
355 return s;
358 static inline size_t slab_ksize(const struct kmem_cache *s)
360 #ifndef CONFIG_SLUB
361 return s->object_size;
363 #else /* CONFIG_SLUB */
364 # ifdef CONFIG_SLUB_DEBUG
366 * Debugging requires use of the padding between object
367 * and whatever may come after it.
369 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
370 return s->object_size;
371 # endif
372 if (s->flags & SLAB_KASAN)
373 return s->object_size;
375 * If we have the need to store the freelist pointer
376 * back there or track user information then we can
377 * only use the space before that information.
379 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
380 return s->inuse;
382 * Else we can use all the padding etc for the allocation
384 return s->size;
385 #endif
388 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
389 gfp_t flags)
391 flags &= gfp_allowed_mask;
392 lockdep_trace_alloc(flags);
393 might_sleep_if(gfpflags_allow_blocking(flags));
395 if (should_failslab(s, flags))
396 return NULL;
398 if (memcg_kmem_enabled() &&
399 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
400 return memcg_kmem_get_cache(s);
402 return s;
405 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
406 size_t size, void **p)
408 size_t i;
410 flags &= gfp_allowed_mask;
411 for (i = 0; i < size; i++) {
412 void *object = p[i];
414 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
415 kmemleak_alloc_recursive(object, s->object_size, 1,
416 s->flags, flags);
417 kasan_slab_alloc(s, object, flags);
420 if (memcg_kmem_enabled())
421 memcg_kmem_put_cache(s);
424 #ifndef CONFIG_SLOB
426 * The slab lists for all objects.
428 struct kmem_cache_node {
429 spinlock_t list_lock;
431 #ifdef CONFIG_SLAB
432 struct list_head slabs_partial; /* partial list first, better asm code */
433 struct list_head slabs_full;
434 struct list_head slabs_free;
435 unsigned long free_objects;
436 unsigned int free_limit;
437 unsigned int colour_next; /* Per-node cache coloring */
438 struct array_cache *shared; /* shared per node */
439 struct alien_cache **alien; /* on other nodes */
440 unsigned long next_reap; /* updated without locking */
441 int free_touched; /* updated without locking */
442 #endif
444 #ifdef CONFIG_SLUB
445 unsigned long nr_partial;
446 struct list_head partial;
447 #ifdef CONFIG_SLUB_DEBUG
448 atomic_long_t nr_slabs;
449 atomic_long_t total_objects;
450 struct list_head full;
451 #endif
452 #endif
456 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
458 return s->node[node];
462 * Iterator over all nodes. The body will be executed for each node that has
463 * a kmem_cache_node structure allocated (which is true for all online nodes)
465 #define for_each_kmem_cache_node(__s, __node, __n) \
466 for (__node = 0; __node < nr_node_ids; __node++) \
467 if ((__n = get_node(__s, __node)))
469 #endif
471 void *slab_start(struct seq_file *m, loff_t *pos);
472 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
473 void slab_stop(struct seq_file *m, void *p);
474 int memcg_slab_show(struct seq_file *m, void *p);
476 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
478 #ifdef CONFIG_SLAB_FREELIST_RANDOM
479 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
480 gfp_t gfp);
481 void cache_random_seq_destroy(struct kmem_cache *cachep);
482 #else
483 static inline int cache_random_seq_create(struct kmem_cache *cachep,
484 unsigned int count, gfp_t gfp)
486 return 0;
488 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
489 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
491 #endif /* MM_SLAB_H */