1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
21 unsigned int object_size
;/* The original size of the object */
22 unsigned int size
; /* The aligned/padded/added on size */
23 unsigned int align
; /* Alignment as calculated */
24 slab_flags_t flags
; /* Active flags on the slab */
25 unsigned int useroffset
;/* Usercopy region offset */
26 unsigned int usersize
; /* Usercopy region size */
27 const char *name
; /* Slab name for sysfs */
28 int refcount
; /* Use counter */
29 void (*ctor
)(void *); /* Called on object slot creation */
30 struct list_head list
; /* List of all slab caches on the system */
33 #endif /* CONFIG_SLOB */
36 #include <linux/slab_def.h>
40 #include <linux/slub_def.h>
43 #include <linux/memcontrol.h>
44 #include <linux/fault-inject.h>
45 #include <linux/kasan.h>
46 #include <linux/kmemleak.h>
47 #include <linux/random.h>
48 #include <linux/sched/mm.h>
51 * State of the slab allocator.
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
59 DOWN
, /* No slab functionality yet */
60 PARTIAL
, /* SLUB: kmem_cache_node available */
61 PARTIAL_NODE
, /* SLAB: kmalloc size for node struct available */
62 UP
, /* Slab caches usable but not all extras yet */
63 FULL
/* Everything is working */
66 extern enum slab_state slab_state
;
68 /* The slab cache mutex protects the management structures during changes */
69 extern struct mutex slab_mutex
;
71 /* The list of all slab caches on the system */
72 extern struct list_head slab_caches
;
74 /* The slab cache that manages slab cache information */
75 extern struct kmem_cache
*kmem_cache
;
77 /* A table of kmalloc cache names and sizes */
78 extern const struct kmalloc_info_struct
{
84 /* Kmalloc array related functions */
85 void setup_kmalloc_cache_index_table(void);
86 void create_kmalloc_caches(slab_flags_t
);
88 /* Find the kmalloc slab corresponding for a certain size */
89 struct kmem_cache
*kmalloc_slab(size_t, gfp_t
);
93 /* Functions provided by the slab allocators */
94 int __kmem_cache_create(struct kmem_cache
*, slab_flags_t flags
);
96 struct kmem_cache
*create_kmalloc_cache(const char *name
, unsigned int size
,
97 slab_flags_t flags
, unsigned int useroffset
,
98 unsigned int usersize
);
99 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
100 unsigned int size
, slab_flags_t flags
,
101 unsigned int useroffset
, unsigned int usersize
);
103 int slab_unmergeable(struct kmem_cache
*s
);
104 struct kmem_cache
*find_mergeable(unsigned size
, unsigned align
,
105 slab_flags_t flags
, const char *name
, void (*ctor
)(void *));
108 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
109 slab_flags_t flags
, void (*ctor
)(void *));
111 slab_flags_t
kmem_cache_flags(unsigned int object_size
,
112 slab_flags_t flags
, const char *name
,
113 void (*ctor
)(void *));
115 static inline struct kmem_cache
*
116 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
117 slab_flags_t flags
, void (*ctor
)(void *))
120 static inline slab_flags_t
kmem_cache_flags(unsigned int object_size
,
121 slab_flags_t flags
, const char *name
,
122 void (*ctor
)(void *))
129 /* Legal flag mask for kmem_cache_create(), for various configurations */
130 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
133 #if defined(CONFIG_DEBUG_SLAB)
134 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135 #elif defined(CONFIG_SLUB_DEBUG)
136 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
139 #define SLAB_DEBUG_FLAGS (0)
142 #if defined(CONFIG_SLAB)
143 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
146 #elif defined(CONFIG_SLUB)
147 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
148 SLAB_TEMPORARY | SLAB_ACCOUNT)
150 #define SLAB_CACHE_FLAGS (0)
153 /* Common flags available with current configuration */
154 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
156 /* Common flags permitted for kmem_cache_create */
157 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
162 SLAB_CONSISTENCY_CHECKS | \
165 SLAB_RECLAIM_ACCOUNT | \
169 bool __kmem_cache_empty(struct kmem_cache
*);
170 int __kmem_cache_shutdown(struct kmem_cache
*);
171 void __kmem_cache_release(struct kmem_cache
*);
172 int __kmem_cache_shrink(struct kmem_cache
*);
173 void __kmemcg_cache_deactivate(struct kmem_cache
*s
);
174 void slab_kmem_cache_release(struct kmem_cache
*);
180 unsigned long active_objs
;
181 unsigned long num_objs
;
182 unsigned long active_slabs
;
183 unsigned long num_slabs
;
184 unsigned long shared_avail
;
186 unsigned int batchcount
;
188 unsigned int objects_per_slab
;
189 unsigned int cache_order
;
192 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
193 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
);
194 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
195 size_t count
, loff_t
*ppos
);
198 * Generic implementation of bulk operations
199 * These are useful for situations in which the allocator cannot
200 * perform optimizations. In that case segments of the object listed
201 * may be allocated or freed using these operations.
203 void __kmem_cache_free_bulk(struct kmem_cache
*, size_t, void **);
204 int __kmem_cache_alloc_bulk(struct kmem_cache
*, gfp_t
, size_t, void **);
206 #ifdef CONFIG_MEMCG_KMEM
208 /* List of all root caches. */
209 extern struct list_head slab_root_caches
;
210 #define root_caches_node memcg_params.__root_caches_node
213 * Iterate over all memcg caches of the given root cache. The caller must hold
216 #define for_each_memcg_cache(iter, root) \
217 list_for_each_entry(iter, &(root)->memcg_params.children, \
218 memcg_params.children_node)
220 static inline bool is_root_cache(struct kmem_cache
*s
)
222 return !s
->memcg_params
.root_cache
;
225 static inline bool slab_equal_or_root(struct kmem_cache
*s
,
226 struct kmem_cache
*p
)
228 return p
== s
|| p
== s
->memcg_params
.root_cache
;
232 * We use suffixes to the name in memcg because we can't have caches
233 * created in the system with the same name. But when we print them
234 * locally, better refer to them with the base name
236 static inline const char *cache_name(struct kmem_cache
*s
)
238 if (!is_root_cache(s
))
239 s
= s
->memcg_params
.root_cache
;
244 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
245 * That said the caller must assure the memcg's cache won't go away by either
246 * taking a css reference to the owner cgroup, or holding the slab_mutex.
248 static inline struct kmem_cache
*
249 cache_from_memcg_idx(struct kmem_cache
*s
, int idx
)
251 struct kmem_cache
*cachep
;
252 struct memcg_cache_array
*arr
;
255 arr
= rcu_dereference(s
->memcg_params
.memcg_caches
);
258 * Make sure we will access the up-to-date value. The code updating
259 * memcg_caches issues a write barrier to match this (see
260 * memcg_create_kmem_cache()).
262 cachep
= READ_ONCE(arr
->entries
[idx
]);
268 static inline struct kmem_cache
*memcg_root_cache(struct kmem_cache
*s
)
270 if (is_root_cache(s
))
272 return s
->memcg_params
.root_cache
;
275 static __always_inline
int memcg_charge_slab(struct page
*page
,
276 gfp_t gfp
, int order
,
277 struct kmem_cache
*s
)
279 if (!memcg_kmem_enabled())
281 if (is_root_cache(s
))
283 return memcg_kmem_charge_memcg(page
, gfp
, order
, s
->memcg_params
.memcg
);
286 static __always_inline
void memcg_uncharge_slab(struct page
*page
, int order
,
287 struct kmem_cache
*s
)
289 if (!memcg_kmem_enabled())
291 memcg_kmem_uncharge(page
, order
);
294 extern void slab_init_memcg_params(struct kmem_cache
*);
295 extern void memcg_link_cache(struct kmem_cache
*s
);
296 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache
*s
,
297 void (*deact_fn
)(struct kmem_cache
*));
299 #else /* CONFIG_MEMCG_KMEM */
301 /* If !memcg, all caches are root. */
302 #define slab_root_caches slab_caches
303 #define root_caches_node list
305 #define for_each_memcg_cache(iter, root) \
306 for ((void)(iter), (void)(root); 0; )
308 static inline bool is_root_cache(struct kmem_cache
*s
)
313 static inline bool slab_equal_or_root(struct kmem_cache
*s
,
314 struct kmem_cache
*p
)
319 static inline const char *cache_name(struct kmem_cache
*s
)
324 static inline struct kmem_cache
*
325 cache_from_memcg_idx(struct kmem_cache
*s
, int idx
)
330 static inline struct kmem_cache
*memcg_root_cache(struct kmem_cache
*s
)
335 static inline int memcg_charge_slab(struct page
*page
, gfp_t gfp
, int order
,
336 struct kmem_cache
*s
)
341 static inline void memcg_uncharge_slab(struct page
*page
, int order
,
342 struct kmem_cache
*s
)
346 static inline void slab_init_memcg_params(struct kmem_cache
*s
)
350 static inline void memcg_link_cache(struct kmem_cache
*s
)
354 #endif /* CONFIG_MEMCG_KMEM */
356 static inline struct kmem_cache
*cache_from_obj(struct kmem_cache
*s
, void *x
)
358 struct kmem_cache
*cachep
;
362 * When kmemcg is not being used, both assignments should return the
363 * same value. but we don't want to pay the assignment price in that
364 * case. If it is not compiled in, the compiler should be smart enough
365 * to not do even the assignment. In that case, slab_equal_or_root
366 * will also be a constant.
368 if (!memcg_kmem_enabled() &&
369 !unlikely(s
->flags
& SLAB_CONSISTENCY_CHECKS
))
372 page
= virt_to_head_page(x
);
373 cachep
= page
->slab_cache
;
374 if (slab_equal_or_root(cachep
, s
))
377 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
378 __func__
, s
->name
, cachep
->name
);
383 static inline size_t slab_ksize(const struct kmem_cache
*s
)
386 return s
->object_size
;
388 #else /* CONFIG_SLUB */
389 # ifdef CONFIG_SLUB_DEBUG
391 * Debugging requires use of the padding between object
392 * and whatever may come after it.
394 if (s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
))
395 return s
->object_size
;
397 if (s
->flags
& SLAB_KASAN
)
398 return s
->object_size
;
400 * If we have the need to store the freelist pointer
401 * back there or track user information then we can
402 * only use the space before that information.
404 if (s
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_STORE_USER
))
407 * Else we can use all the padding etc for the allocation
413 static inline struct kmem_cache
*slab_pre_alloc_hook(struct kmem_cache
*s
,
416 flags
&= gfp_allowed_mask
;
418 fs_reclaim_acquire(flags
);
419 fs_reclaim_release(flags
);
421 might_sleep_if(gfpflags_allow_blocking(flags
));
423 if (should_failslab(s
, flags
))
426 if (memcg_kmem_enabled() &&
427 ((flags
& __GFP_ACCOUNT
) || (s
->flags
& SLAB_ACCOUNT
)))
428 return memcg_kmem_get_cache(s
);
433 static inline void slab_post_alloc_hook(struct kmem_cache
*s
, gfp_t flags
,
434 size_t size
, void **p
)
438 flags
&= gfp_allowed_mask
;
439 for (i
= 0; i
< size
; i
++) {
442 kmemleak_alloc_recursive(object
, s
->object_size
, 1,
444 kasan_slab_alloc(s
, object
, flags
);
447 if (memcg_kmem_enabled())
448 memcg_kmem_put_cache(s
);
453 * The slab lists for all objects.
455 struct kmem_cache_node
{
456 spinlock_t list_lock
;
459 struct list_head slabs_partial
; /* partial list first, better asm code */
460 struct list_head slabs_full
;
461 struct list_head slabs_free
;
462 unsigned long total_slabs
; /* length of all slab lists */
463 unsigned long free_slabs
; /* length of free slab list only */
464 unsigned long free_objects
;
465 unsigned int free_limit
;
466 unsigned int colour_next
; /* Per-node cache coloring */
467 struct array_cache
*shared
; /* shared per node */
468 struct alien_cache
**alien
; /* on other nodes */
469 unsigned long next_reap
; /* updated without locking */
470 int free_touched
; /* updated without locking */
474 unsigned long nr_partial
;
475 struct list_head partial
;
476 #ifdef CONFIG_SLUB_DEBUG
477 atomic_long_t nr_slabs
;
478 atomic_long_t total_objects
;
479 struct list_head full
;
485 static inline struct kmem_cache_node
*get_node(struct kmem_cache
*s
, int node
)
487 return s
->node
[node
];
491 * Iterator over all nodes. The body will be executed for each node that has
492 * a kmem_cache_node structure allocated (which is true for all online nodes)
494 #define for_each_kmem_cache_node(__s, __node, __n) \
495 for (__node = 0; __node < nr_node_ids; __node++) \
496 if ((__n = get_node(__s, __node)))
500 void *slab_start(struct seq_file
*m
, loff_t
*pos
);
501 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
);
502 void slab_stop(struct seq_file
*m
, void *p
);
503 void *memcg_slab_start(struct seq_file
*m
, loff_t
*pos
);
504 void *memcg_slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
);
505 void memcg_slab_stop(struct seq_file
*m
, void *p
);
506 int memcg_slab_show(struct seq_file
*m
, void *p
);
508 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
509 void dump_unreclaimable_slab(void);
511 static inline void dump_unreclaimable_slab(void)
516 void ___cache_free(struct kmem_cache
*cache
, void *x
, unsigned long addr
);
518 #ifdef CONFIG_SLAB_FREELIST_RANDOM
519 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
521 void cache_random_seq_destroy(struct kmem_cache
*cachep
);
523 static inline int cache_random_seq_create(struct kmem_cache
*cachep
,
524 unsigned int count
, gfp_t gfp
)
528 static inline void cache_random_seq_destroy(struct kmem_cache
*cachep
) { }
529 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
531 #endif /* MM_SLAB_H */