Merge tag '6.11-rc-smb-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux-stable.git] / mm / slab.h
blobdcdb56b8e7f51db5bdf13bec4f3b68bc74b1f243
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
16 * Internal slab definitions
19 #ifdef CONFIG_64BIT
20 # ifdef system_has_cmpxchg128
21 # define system_has_freelist_aba() system_has_cmpxchg128()
22 # define try_cmpxchg_freelist try_cmpxchg128
23 # endif
24 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128
25 typedef u128 freelist_full_t;
26 #else /* CONFIG_64BIT */
27 # ifdef system_has_cmpxchg64
28 # define system_has_freelist_aba() system_has_cmpxchg64()
29 # define try_cmpxchg_freelist try_cmpxchg64
30 # endif
31 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64
32 typedef u64 freelist_full_t;
33 #endif /* CONFIG_64BIT */
35 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
36 #undef system_has_freelist_aba
37 #endif
40 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
41 * problems with cmpxchg of just a pointer.
43 typedef union {
44 struct {
45 void *freelist;
46 unsigned long counter;
48 freelist_full_t full;
49 } freelist_aba_t;
51 /* Reuses the bits in struct page */
52 struct slab {
53 unsigned long __page_flags;
55 struct kmem_cache *slab_cache;
56 union {
57 struct {
58 union {
59 struct list_head slab_list;
60 #ifdef CONFIG_SLUB_CPU_PARTIAL
61 struct {
62 struct slab *next;
63 int slabs; /* Nr of slabs left */
65 #endif
67 /* Double-word boundary */
68 union {
69 struct {
70 void *freelist; /* first free object */
71 union {
72 unsigned long counters;
73 struct {
74 unsigned inuse:16;
75 unsigned objects:15;
76 unsigned frozen:1;
80 #ifdef system_has_freelist_aba
81 freelist_aba_t freelist_counter;
82 #endif
85 struct rcu_head rcu_head;
88 unsigned int __page_type;
89 atomic_t __page_refcount;
90 #ifdef CONFIG_SLAB_OBJ_EXT
91 unsigned long obj_exts;
92 #endif
95 #define SLAB_MATCH(pg, sl) \
96 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
97 SLAB_MATCH(flags, __page_flags);
98 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
99 SLAB_MATCH(_refcount, __page_refcount);
100 #ifdef CONFIG_MEMCG
101 SLAB_MATCH(memcg_data, obj_exts);
102 #elif defined(CONFIG_SLAB_OBJ_EXT)
103 SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
104 #endif
105 #undef SLAB_MATCH
106 static_assert(sizeof(struct slab) <= sizeof(struct page));
107 #if defined(system_has_freelist_aba)
108 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
109 #endif
112 * folio_slab - Converts from folio to slab.
113 * @folio: The folio.
115 * Currently struct slab is a different representation of a folio where
116 * folio_test_slab() is true.
118 * Return: The slab which contains this folio.
120 #define folio_slab(folio) (_Generic((folio), \
121 const struct folio *: (const struct slab *)(folio), \
122 struct folio *: (struct slab *)(folio)))
125 * slab_folio - The folio allocated for a slab
126 * @slab: The slab.
128 * Slabs are allocated as folios that contain the individual objects and are
129 * using some fields in the first struct page of the folio - those fields are
130 * now accessed by struct slab. It is occasionally necessary to convert back to
131 * a folio in order to communicate with the rest of the mm. Please use this
132 * helper function instead of casting yourself, as the implementation may change
133 * in the future.
135 #define slab_folio(s) (_Generic((s), \
136 const struct slab *: (const struct folio *)s, \
137 struct slab *: (struct folio *)s))
140 * page_slab - Converts from first struct page to slab.
141 * @p: The first (either head of compound or single) page of slab.
143 * A temporary wrapper to convert struct page to struct slab in situations where
144 * we know the page is the compound head, or single order-0 page.
146 * Long-term ideally everything would work with struct slab directly or go
147 * through folio to struct slab.
149 * Return: The slab which contains this page
151 #define page_slab(p) (_Generic((p), \
152 const struct page *: (const struct slab *)(p), \
153 struct page *: (struct slab *)(p)))
156 * slab_page - The first struct page allocated for a slab
157 * @slab: The slab.
159 * A convenience wrapper for converting slab to the first struct page of the
160 * underlying folio, to communicate with code not yet converted to folio or
161 * struct slab.
163 #define slab_page(s) folio_page(slab_folio(s), 0)
166 * If network-based swap is enabled, sl*b must keep track of whether pages
167 * were allocated from pfmemalloc reserves.
169 static inline bool slab_test_pfmemalloc(const struct slab *slab)
171 return folio_test_active(slab_folio(slab));
174 static inline void slab_set_pfmemalloc(struct slab *slab)
176 folio_set_active(slab_folio(slab));
179 static inline void slab_clear_pfmemalloc(struct slab *slab)
181 folio_clear_active(slab_folio(slab));
184 static inline void __slab_clear_pfmemalloc(struct slab *slab)
186 __folio_clear_active(slab_folio(slab));
189 static inline void *slab_address(const struct slab *slab)
191 return folio_address(slab_folio(slab));
194 static inline int slab_nid(const struct slab *slab)
196 return folio_nid(slab_folio(slab));
199 static inline pg_data_t *slab_pgdat(const struct slab *slab)
201 return folio_pgdat(slab_folio(slab));
204 static inline struct slab *virt_to_slab(const void *addr)
206 struct folio *folio = virt_to_folio(addr);
208 if (!folio_test_slab(folio))
209 return NULL;
211 return folio_slab(folio);
214 static inline int slab_order(const struct slab *slab)
216 return folio_order(slab_folio(slab));
219 static inline size_t slab_size(const struct slab *slab)
221 return PAGE_SIZE << slab_order(slab);
224 #ifdef CONFIG_SLUB_CPU_PARTIAL
225 #define slub_percpu_partial(c) ((c)->partial)
227 #define slub_set_percpu_partial(c, p) \
228 ({ \
229 slub_percpu_partial(c) = (p)->next; \
232 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
233 #else
234 #define slub_percpu_partial(c) NULL
236 #define slub_set_percpu_partial(c, p)
238 #define slub_percpu_partial_read_once(c) NULL
239 #endif // CONFIG_SLUB_CPU_PARTIAL
242 * Word size structure that can be atomically updated or read and that
243 * contains both the order and the number of objects that a slab of the
244 * given order would contain.
246 struct kmem_cache_order_objects {
247 unsigned int x;
251 * Slab cache management.
253 struct kmem_cache {
254 #ifndef CONFIG_SLUB_TINY
255 struct kmem_cache_cpu __percpu *cpu_slab;
256 #endif
257 /* Used for retrieving partial slabs, etc. */
258 slab_flags_t flags;
259 unsigned long min_partial;
260 unsigned int size; /* Object size including metadata */
261 unsigned int object_size; /* Object size without metadata */
262 struct reciprocal_value reciprocal_size;
263 unsigned int offset; /* Free pointer offset */
264 #ifdef CONFIG_SLUB_CPU_PARTIAL
265 /* Number of per cpu partial objects to keep around */
266 unsigned int cpu_partial;
267 /* Number of per cpu partial slabs to keep around */
268 unsigned int cpu_partial_slabs;
269 #endif
270 struct kmem_cache_order_objects oo;
272 /* Allocation and freeing of slabs */
273 struct kmem_cache_order_objects min;
274 gfp_t allocflags; /* gfp flags to use on each alloc */
275 int refcount; /* Refcount for slab cache destroy */
276 void (*ctor)(void *object); /* Object constructor */
277 unsigned int inuse; /* Offset to metadata */
278 unsigned int align; /* Alignment */
279 unsigned int red_left_pad; /* Left redzone padding size */
280 const char *name; /* Name (only for display!) */
281 struct list_head list; /* List of slab caches */
282 #ifdef CONFIG_SYSFS
283 struct kobject kobj; /* For sysfs */
284 #endif
285 #ifdef CONFIG_SLAB_FREELIST_HARDENED
286 unsigned long random;
287 #endif
289 #ifdef CONFIG_NUMA
291 * Defragmentation by allocating from a remote node.
293 unsigned int remote_node_defrag_ratio;
294 #endif
296 #ifdef CONFIG_SLAB_FREELIST_RANDOM
297 unsigned int *random_seq;
298 #endif
300 #ifdef CONFIG_KASAN_GENERIC
301 struct kasan_cache kasan_info;
302 #endif
304 #ifdef CONFIG_HARDENED_USERCOPY
305 unsigned int useroffset; /* Usercopy region offset */
306 unsigned int usersize; /* Usercopy region size */
307 #endif
309 struct kmem_cache_node *node[MAX_NUMNODES];
312 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
313 #define SLAB_SUPPORTS_SYSFS
314 void sysfs_slab_unlink(struct kmem_cache *s);
315 void sysfs_slab_release(struct kmem_cache *s);
316 #else
317 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
318 static inline void sysfs_slab_release(struct kmem_cache *s) { }
319 #endif
321 void *fixup_red_left(struct kmem_cache *s, void *p);
323 static inline void *nearest_obj(struct kmem_cache *cache,
324 const struct slab *slab, void *x)
326 void *object = x - (x - slab_address(slab)) % cache->size;
327 void *last_object = slab_address(slab) +
328 (slab->objects - 1) * cache->size;
329 void *result = (unlikely(object > last_object)) ? last_object : object;
331 result = fixup_red_left(cache, result);
332 return result;
335 /* Determine object index from a given position */
336 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
337 void *addr, void *obj)
339 return reciprocal_divide(kasan_reset_tag(obj) - addr,
340 cache->reciprocal_size);
343 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
344 const struct slab *slab, void *obj)
346 if (is_kfence_address(obj))
347 return 0;
348 return __obj_to_index(cache, slab_address(slab), obj);
351 static inline int objs_per_slab(const struct kmem_cache *cache,
352 const struct slab *slab)
354 return slab->objects;
358 * State of the slab allocator.
360 * This is used to describe the states of the allocator during bootup.
361 * Allocators use this to gradually bootstrap themselves. Most allocators
362 * have the problem that the structures used for managing slab caches are
363 * allocated from slab caches themselves.
365 enum slab_state {
366 DOWN, /* No slab functionality yet */
367 PARTIAL, /* SLUB: kmem_cache_node available */
368 UP, /* Slab caches usable but not all extras yet */
369 FULL /* Everything is working */
372 extern enum slab_state slab_state;
374 /* The slab cache mutex protects the management structures during changes */
375 extern struct mutex slab_mutex;
377 /* The list of all slab caches on the system */
378 extern struct list_head slab_caches;
380 /* The slab cache that manages slab cache information */
381 extern struct kmem_cache *kmem_cache;
383 /* A table of kmalloc cache names and sizes */
384 extern const struct kmalloc_info_struct {
385 const char *name[NR_KMALLOC_TYPES];
386 unsigned int size;
387 } kmalloc_info[];
389 /* Kmalloc array related functions */
390 void setup_kmalloc_cache_index_table(void);
391 void create_kmalloc_caches(void);
393 extern u8 kmalloc_size_index[24];
395 static inline unsigned int size_index_elem(unsigned int bytes)
397 return (bytes - 1) / 8;
401 * Find the kmem_cache structure that serves a given size of
402 * allocation
404 * This assumes size is larger than zero and not larger than
405 * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
407 static inline struct kmem_cache *
408 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
410 unsigned int index;
412 if (!b)
413 b = &kmalloc_caches[kmalloc_type(flags, caller)];
414 if (size <= 192)
415 index = kmalloc_size_index[size_index_elem(size)];
416 else
417 index = fls(size - 1);
419 return (*b)[index];
422 gfp_t kmalloc_fix_flags(gfp_t flags);
424 /* Functions provided by the slab allocators */
425 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
427 void __init kmem_cache_init(void);
428 extern void create_boot_cache(struct kmem_cache *, const char *name,
429 unsigned int size, slab_flags_t flags,
430 unsigned int useroffset, unsigned int usersize);
432 int slab_unmergeable(struct kmem_cache *s);
433 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
434 slab_flags_t flags, const char *name, void (*ctor)(void *));
435 struct kmem_cache *
436 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
437 slab_flags_t flags, void (*ctor)(void *));
439 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
441 static inline bool is_kmalloc_cache(struct kmem_cache *s)
443 return (s->flags & SLAB_KMALLOC);
446 /* Legal flag mask for kmem_cache_create(), for various configurations */
447 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
448 SLAB_CACHE_DMA32 | SLAB_PANIC | \
449 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
451 #ifdef CONFIG_SLUB_DEBUG
452 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
453 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
454 #else
455 #define SLAB_DEBUG_FLAGS (0)
456 #endif
458 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
459 SLAB_TEMPORARY | SLAB_ACCOUNT | \
460 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
462 /* Common flags available with current configuration */
463 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
465 /* Common flags permitted for kmem_cache_create */
466 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
467 SLAB_RED_ZONE | \
468 SLAB_POISON | \
469 SLAB_STORE_USER | \
470 SLAB_TRACE | \
471 SLAB_CONSISTENCY_CHECKS | \
472 SLAB_NOLEAKTRACE | \
473 SLAB_RECLAIM_ACCOUNT | \
474 SLAB_TEMPORARY | \
475 SLAB_ACCOUNT | \
476 SLAB_KMALLOC | \
477 SLAB_NO_MERGE | \
478 SLAB_NO_USER_FLAGS)
480 bool __kmem_cache_empty(struct kmem_cache *);
481 int __kmem_cache_shutdown(struct kmem_cache *);
482 void __kmem_cache_release(struct kmem_cache *);
483 int __kmem_cache_shrink(struct kmem_cache *);
484 void slab_kmem_cache_release(struct kmem_cache *);
486 struct seq_file;
487 struct file;
489 struct slabinfo {
490 unsigned long active_objs;
491 unsigned long num_objs;
492 unsigned long active_slabs;
493 unsigned long num_slabs;
494 unsigned long shared_avail;
495 unsigned int limit;
496 unsigned int batchcount;
497 unsigned int shared;
498 unsigned int objects_per_slab;
499 unsigned int cache_order;
502 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
504 #ifdef CONFIG_SLUB_DEBUG
505 #ifdef CONFIG_SLUB_DEBUG_ON
506 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
507 #else
508 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
509 #endif
510 extern void print_tracking(struct kmem_cache *s, void *object);
511 long validate_slab_cache(struct kmem_cache *s);
512 static inline bool __slub_debug_enabled(void)
514 return static_branch_unlikely(&slub_debug_enabled);
516 #else
517 static inline void print_tracking(struct kmem_cache *s, void *object)
520 static inline bool __slub_debug_enabled(void)
522 return false;
524 #endif
527 * Returns true if any of the specified slab_debug flags is enabled for the
528 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
529 * the static key.
531 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
533 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
534 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
535 if (__slub_debug_enabled())
536 return s->flags & flags;
537 return false;
540 #ifdef CONFIG_SLAB_OBJ_EXT
543 * slab_obj_exts - get the pointer to the slab object extension vector
544 * associated with a slab.
545 * @slab: a pointer to the slab struct
547 * Returns a pointer to the object extension vector associated with the slab,
548 * or NULL if no such vector has been associated yet.
550 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
552 unsigned long obj_exts = READ_ONCE(slab->obj_exts);
554 #ifdef CONFIG_MEMCG
555 VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
556 slab_page(slab));
557 VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
558 #endif
559 return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
562 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
563 gfp_t gfp, bool new_slab);
565 #else /* CONFIG_SLAB_OBJ_EXT */
567 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
569 return NULL;
572 #endif /* CONFIG_SLAB_OBJ_EXT */
574 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
576 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
577 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
580 #ifdef CONFIG_MEMCG
581 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
582 gfp_t flags, size_t size, void **p);
583 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
584 void **p, int objects, struct slabobj_ext *obj_exts);
585 #endif
587 size_t __ksize(const void *objp);
589 static inline size_t slab_ksize(const struct kmem_cache *s)
591 #ifdef CONFIG_SLUB_DEBUG
593 * Debugging requires use of the padding between object
594 * and whatever may come after it.
596 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
597 return s->object_size;
598 #endif
599 if (s->flags & SLAB_KASAN)
600 return s->object_size;
602 * If we have the need to store the freelist pointer
603 * back there or track user information then we can
604 * only use the space before that information.
606 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
607 return s->inuse;
609 * Else we can use all the padding etc for the allocation
611 return s->size;
614 #ifdef CONFIG_SLUB_DEBUG
615 void dump_unreclaimable_slab(void);
616 #else
617 static inline void dump_unreclaimable_slab(void)
620 #endif
622 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
624 #ifdef CONFIG_SLAB_FREELIST_RANDOM
625 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
626 gfp_t gfp);
627 void cache_random_seq_destroy(struct kmem_cache *cachep);
628 #else
629 static inline int cache_random_seq_create(struct kmem_cache *cachep,
630 unsigned int count, gfp_t gfp)
632 return 0;
634 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
635 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
637 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
639 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
640 &init_on_alloc)) {
641 if (c->ctor)
642 return false;
643 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
644 return flags & __GFP_ZERO;
645 return true;
647 return flags & __GFP_ZERO;
650 static inline bool slab_want_init_on_free(struct kmem_cache *c)
652 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
653 &init_on_free))
654 return !(c->ctor ||
655 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
656 return false;
659 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
660 void debugfs_slab_release(struct kmem_cache *);
661 #else
662 static inline void debugfs_slab_release(struct kmem_cache *s) { }
663 #endif
665 #ifdef CONFIG_PRINTK
666 #define KS_ADDRS_COUNT 16
667 struct kmem_obj_info {
668 void *kp_ptr;
669 struct slab *kp_slab;
670 void *kp_objp;
671 unsigned long kp_data_offset;
672 struct kmem_cache *kp_slab_cache;
673 void *kp_ret;
674 void *kp_stack[KS_ADDRS_COUNT];
675 void *kp_free_stack[KS_ADDRS_COUNT];
677 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
678 #endif
680 void __check_heap_object(const void *ptr, unsigned long n,
681 const struct slab *slab, bool to_user);
683 #ifdef CONFIG_SLUB_DEBUG
684 void skip_orig_size_check(struct kmem_cache *s, const void *object);
685 #endif
687 #endif /* MM_SLAB_H */