1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
16 * Internal slab definitions
20 # ifdef system_has_cmpxchg128
21 # define system_has_freelist_aba() system_has_cmpxchg128()
22 # define try_cmpxchg_freelist try_cmpxchg128
24 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128
25 typedef u128 freelist_full_t
;
26 #else /* CONFIG_64BIT */
27 # ifdef system_has_cmpxchg64
28 # define system_has_freelist_aba() system_has_cmpxchg64()
29 # define try_cmpxchg_freelist try_cmpxchg64
31 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64
32 typedef u64 freelist_full_t
;
33 #endif /* CONFIG_64BIT */
35 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
36 #undef system_has_freelist_aba
40 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
41 * problems with cmpxchg of just a pointer.
46 unsigned long counter
;
51 /* Reuses the bits in struct page */
53 unsigned long __page_flags
;
55 struct kmem_cache
*slab_cache
;
59 struct list_head slab_list
;
60 #ifdef CONFIG_SLUB_CPU_PARTIAL
63 int slabs
; /* Nr of slabs left */
67 /* Double-word boundary */
70 void *freelist
; /* first free object */
72 unsigned long counters
;
80 #ifdef system_has_freelist_aba
81 freelist_aba_t freelist_counter
;
85 struct rcu_head rcu_head
;
88 unsigned int __page_type
;
89 atomic_t __page_refcount
;
90 #ifdef CONFIG_SLAB_OBJ_EXT
91 unsigned long obj_exts
;
95 #define SLAB_MATCH(pg, sl) \
96 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
97 SLAB_MATCH(flags
, __page_flags
);
98 SLAB_MATCH(compound_head
, slab_cache
); /* Ensure bit 0 is clear */
99 SLAB_MATCH(_refcount
, __page_refcount
);
101 SLAB_MATCH(memcg_data
, obj_exts
);
102 #elif defined(CONFIG_SLAB_OBJ_EXT)
103 SLAB_MATCH(_unused_slab_obj_exts
, obj_exts
);
106 static_assert(sizeof(struct slab
) <= sizeof(struct page
));
107 #if defined(system_has_freelist_aba)
108 static_assert(IS_ALIGNED(offsetof(struct slab
, freelist
), sizeof(freelist_aba_t
)));
112 * folio_slab - Converts from folio to slab.
115 * Currently struct slab is a different representation of a folio where
116 * folio_test_slab() is true.
118 * Return: The slab which contains this folio.
120 #define folio_slab(folio) (_Generic((folio), \
121 const struct folio *: (const struct slab *)(folio), \
122 struct folio *: (struct slab *)(folio)))
125 * slab_folio - The folio allocated for a slab
128 * Slabs are allocated as folios that contain the individual objects and are
129 * using some fields in the first struct page of the folio - those fields are
130 * now accessed by struct slab. It is occasionally necessary to convert back to
131 * a folio in order to communicate with the rest of the mm. Please use this
132 * helper function instead of casting yourself, as the implementation may change
135 #define slab_folio(s) (_Generic((s), \
136 const struct slab *: (const struct folio *)s, \
137 struct slab *: (struct folio *)s))
140 * page_slab - Converts from first struct page to slab.
141 * @p: The first (either head of compound or single) page of slab.
143 * A temporary wrapper to convert struct page to struct slab in situations where
144 * we know the page is the compound head, or single order-0 page.
146 * Long-term ideally everything would work with struct slab directly or go
147 * through folio to struct slab.
149 * Return: The slab which contains this page
151 #define page_slab(p) (_Generic((p), \
152 const struct page *: (const struct slab *)(p), \
153 struct page *: (struct slab *)(p)))
156 * slab_page - The first struct page allocated for a slab
159 * A convenience wrapper for converting slab to the first struct page of the
160 * underlying folio, to communicate with code not yet converted to folio or
163 #define slab_page(s) folio_page(slab_folio(s), 0)
166 * If network-based swap is enabled, sl*b must keep track of whether pages
167 * were allocated from pfmemalloc reserves.
169 static inline bool slab_test_pfmemalloc(const struct slab
*slab
)
171 return folio_test_active(slab_folio(slab
));
174 static inline void slab_set_pfmemalloc(struct slab
*slab
)
176 folio_set_active(slab_folio(slab
));
179 static inline void slab_clear_pfmemalloc(struct slab
*slab
)
181 folio_clear_active(slab_folio(slab
));
184 static inline void __slab_clear_pfmemalloc(struct slab
*slab
)
186 __folio_clear_active(slab_folio(slab
));
189 static inline void *slab_address(const struct slab
*slab
)
191 return folio_address(slab_folio(slab
));
194 static inline int slab_nid(const struct slab
*slab
)
196 return folio_nid(slab_folio(slab
));
199 static inline pg_data_t
*slab_pgdat(const struct slab
*slab
)
201 return folio_pgdat(slab_folio(slab
));
204 static inline struct slab
*virt_to_slab(const void *addr
)
206 struct folio
*folio
= virt_to_folio(addr
);
208 if (!folio_test_slab(folio
))
211 return folio_slab(folio
);
214 static inline int slab_order(const struct slab
*slab
)
216 return folio_order(slab_folio(slab
));
219 static inline size_t slab_size(const struct slab
*slab
)
221 return PAGE_SIZE
<< slab_order(slab
);
224 #ifdef CONFIG_SLUB_CPU_PARTIAL
225 #define slub_percpu_partial(c) ((c)->partial)
227 #define slub_set_percpu_partial(c, p) \
229 slub_percpu_partial(c) = (p)->next; \
232 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
234 #define slub_percpu_partial(c) NULL
236 #define slub_set_percpu_partial(c, p)
238 #define slub_percpu_partial_read_once(c) NULL
239 #endif // CONFIG_SLUB_CPU_PARTIAL
242 * Word size structure that can be atomically updated or read and that
243 * contains both the order and the number of objects that a slab of the
244 * given order would contain.
246 struct kmem_cache_order_objects
{
251 * Slab cache management.
254 #ifndef CONFIG_SLUB_TINY
255 struct kmem_cache_cpu __percpu
*cpu_slab
;
257 /* Used for retrieving partial slabs, etc. */
259 unsigned long min_partial
;
260 unsigned int size
; /* Object size including metadata */
261 unsigned int object_size
; /* Object size without metadata */
262 struct reciprocal_value reciprocal_size
;
263 unsigned int offset
; /* Free pointer offset */
264 #ifdef CONFIG_SLUB_CPU_PARTIAL
265 /* Number of per cpu partial objects to keep around */
266 unsigned int cpu_partial
;
267 /* Number of per cpu partial slabs to keep around */
268 unsigned int cpu_partial_slabs
;
270 struct kmem_cache_order_objects oo
;
272 /* Allocation and freeing of slabs */
273 struct kmem_cache_order_objects min
;
274 gfp_t allocflags
; /* gfp flags to use on each alloc */
275 int refcount
; /* Refcount for slab cache destroy */
276 void (*ctor
)(void *object
); /* Object constructor */
277 unsigned int inuse
; /* Offset to metadata */
278 unsigned int align
; /* Alignment */
279 unsigned int red_left_pad
; /* Left redzone padding size */
280 const char *name
; /* Name (only for display!) */
281 struct list_head list
; /* List of slab caches */
283 struct kobject kobj
; /* For sysfs */
285 #ifdef CONFIG_SLAB_FREELIST_HARDENED
286 unsigned long random
;
291 * Defragmentation by allocating from a remote node.
293 unsigned int remote_node_defrag_ratio
;
296 #ifdef CONFIG_SLAB_FREELIST_RANDOM
297 unsigned int *random_seq
;
300 #ifdef CONFIG_KASAN_GENERIC
301 struct kasan_cache kasan_info
;
304 #ifdef CONFIG_HARDENED_USERCOPY
305 unsigned int useroffset
; /* Usercopy region offset */
306 unsigned int usersize
; /* Usercopy region size */
309 struct kmem_cache_node
*node
[MAX_NUMNODES
];
312 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
313 #define SLAB_SUPPORTS_SYSFS
314 void sysfs_slab_unlink(struct kmem_cache
*s
);
315 void sysfs_slab_release(struct kmem_cache
*s
);
317 static inline void sysfs_slab_unlink(struct kmem_cache
*s
) { }
318 static inline void sysfs_slab_release(struct kmem_cache
*s
) { }
321 void *fixup_red_left(struct kmem_cache
*s
, void *p
);
323 static inline void *nearest_obj(struct kmem_cache
*cache
,
324 const struct slab
*slab
, void *x
)
326 void *object
= x
- (x
- slab_address(slab
)) % cache
->size
;
327 void *last_object
= slab_address(slab
) +
328 (slab
->objects
- 1) * cache
->size
;
329 void *result
= (unlikely(object
> last_object
)) ? last_object
: object
;
331 result
= fixup_red_left(cache
, result
);
335 /* Determine object index from a given position */
336 static inline unsigned int __obj_to_index(const struct kmem_cache
*cache
,
337 void *addr
, void *obj
)
339 return reciprocal_divide(kasan_reset_tag(obj
) - addr
,
340 cache
->reciprocal_size
);
343 static inline unsigned int obj_to_index(const struct kmem_cache
*cache
,
344 const struct slab
*slab
, void *obj
)
346 if (is_kfence_address(obj
))
348 return __obj_to_index(cache
, slab_address(slab
), obj
);
351 static inline int objs_per_slab(const struct kmem_cache
*cache
,
352 const struct slab
*slab
)
354 return slab
->objects
;
358 * State of the slab allocator.
360 * This is used to describe the states of the allocator during bootup.
361 * Allocators use this to gradually bootstrap themselves. Most allocators
362 * have the problem that the structures used for managing slab caches are
363 * allocated from slab caches themselves.
366 DOWN
, /* No slab functionality yet */
367 PARTIAL
, /* SLUB: kmem_cache_node available */
368 UP
, /* Slab caches usable but not all extras yet */
369 FULL
/* Everything is working */
372 extern enum slab_state slab_state
;
374 /* The slab cache mutex protects the management structures during changes */
375 extern struct mutex slab_mutex
;
377 /* The list of all slab caches on the system */
378 extern struct list_head slab_caches
;
380 /* The slab cache that manages slab cache information */
381 extern struct kmem_cache
*kmem_cache
;
383 /* A table of kmalloc cache names and sizes */
384 extern const struct kmalloc_info_struct
{
385 const char *name
[NR_KMALLOC_TYPES
];
389 /* Kmalloc array related functions */
390 void setup_kmalloc_cache_index_table(void);
391 void create_kmalloc_caches(void);
393 extern u8 kmalloc_size_index
[24];
395 static inline unsigned int size_index_elem(unsigned int bytes
)
397 return (bytes
- 1) / 8;
401 * Find the kmem_cache structure that serves a given size of
404 * This assumes size is larger than zero and not larger than
405 * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
407 static inline struct kmem_cache
*
408 kmalloc_slab(size_t size
, kmem_buckets
*b
, gfp_t flags
, unsigned long caller
)
413 b
= &kmalloc_caches
[kmalloc_type(flags
, caller
)];
415 index
= kmalloc_size_index
[size_index_elem(size
)];
417 index
= fls(size
- 1);
422 gfp_t
kmalloc_fix_flags(gfp_t flags
);
424 /* Functions provided by the slab allocators */
425 int __kmem_cache_create(struct kmem_cache
*, slab_flags_t flags
);
427 void __init
kmem_cache_init(void);
428 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
429 unsigned int size
, slab_flags_t flags
,
430 unsigned int useroffset
, unsigned int usersize
);
432 int slab_unmergeable(struct kmem_cache
*s
);
433 struct kmem_cache
*find_mergeable(unsigned size
, unsigned align
,
434 slab_flags_t flags
, const char *name
, void (*ctor
)(void *));
436 __kmem_cache_alias(const char *name
, unsigned int size
, unsigned int align
,
437 slab_flags_t flags
, void (*ctor
)(void *));
439 slab_flags_t
kmem_cache_flags(slab_flags_t flags
, const char *name
);
441 static inline bool is_kmalloc_cache(struct kmem_cache
*s
)
443 return (s
->flags
& SLAB_KMALLOC
);
446 /* Legal flag mask for kmem_cache_create(), for various configurations */
447 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
448 SLAB_CACHE_DMA32 | SLAB_PANIC | \
449 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
451 #ifdef CONFIG_SLUB_DEBUG
452 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
453 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
455 #define SLAB_DEBUG_FLAGS (0)
458 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
459 SLAB_TEMPORARY | SLAB_ACCOUNT | \
460 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
462 /* Common flags available with current configuration */
463 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
465 /* Common flags permitted for kmem_cache_create */
466 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
471 SLAB_CONSISTENCY_CHECKS | \
473 SLAB_RECLAIM_ACCOUNT | \
480 bool __kmem_cache_empty(struct kmem_cache
*);
481 int __kmem_cache_shutdown(struct kmem_cache
*);
482 void __kmem_cache_release(struct kmem_cache
*);
483 int __kmem_cache_shrink(struct kmem_cache
*);
484 void slab_kmem_cache_release(struct kmem_cache
*);
490 unsigned long active_objs
;
491 unsigned long num_objs
;
492 unsigned long active_slabs
;
493 unsigned long num_slabs
;
494 unsigned long shared_avail
;
496 unsigned int batchcount
;
498 unsigned int objects_per_slab
;
499 unsigned int cache_order
;
502 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
504 #ifdef CONFIG_SLUB_DEBUG
505 #ifdef CONFIG_SLUB_DEBUG_ON
506 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled
);
508 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled
);
510 extern void print_tracking(struct kmem_cache
*s
, void *object
);
511 long validate_slab_cache(struct kmem_cache
*s
);
512 static inline bool __slub_debug_enabled(void)
514 return static_branch_unlikely(&slub_debug_enabled
);
517 static inline void print_tracking(struct kmem_cache
*s
, void *object
)
520 static inline bool __slub_debug_enabled(void)
527 * Returns true if any of the specified slab_debug flags is enabled for the
528 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
531 static inline bool kmem_cache_debug_flags(struct kmem_cache
*s
, slab_flags_t flags
)
533 if (IS_ENABLED(CONFIG_SLUB_DEBUG
))
534 VM_WARN_ON_ONCE(!(flags
& SLAB_DEBUG_FLAGS
));
535 if (__slub_debug_enabled())
536 return s
->flags
& flags
;
540 #ifdef CONFIG_SLAB_OBJ_EXT
543 * slab_obj_exts - get the pointer to the slab object extension vector
544 * associated with a slab.
545 * @slab: a pointer to the slab struct
547 * Returns a pointer to the object extension vector associated with the slab,
548 * or NULL if no such vector has been associated yet.
550 static inline struct slabobj_ext
*slab_obj_exts(struct slab
*slab
)
552 unsigned long obj_exts
= READ_ONCE(slab
->obj_exts
);
555 VM_BUG_ON_PAGE(obj_exts
&& !(obj_exts
& MEMCG_DATA_OBJEXTS
),
557 VM_BUG_ON_PAGE(obj_exts
& MEMCG_DATA_KMEM
, slab_page(slab
));
559 return (struct slabobj_ext
*)(obj_exts
& ~OBJEXTS_FLAGS_MASK
);
562 int alloc_slab_obj_exts(struct slab
*slab
, struct kmem_cache
*s
,
563 gfp_t gfp
, bool new_slab
);
565 #else /* CONFIG_SLAB_OBJ_EXT */
567 static inline struct slabobj_ext
*slab_obj_exts(struct slab
*slab
)
572 #endif /* CONFIG_SLAB_OBJ_EXT */
574 static inline enum node_stat_item
cache_vmstat_idx(struct kmem_cache
*s
)
576 return (s
->flags
& SLAB_RECLAIM_ACCOUNT
) ?
577 NR_SLAB_RECLAIMABLE_B
: NR_SLAB_UNRECLAIMABLE_B
;
581 bool __memcg_slab_post_alloc_hook(struct kmem_cache
*s
, struct list_lru
*lru
,
582 gfp_t flags
, size_t size
, void **p
);
583 void __memcg_slab_free_hook(struct kmem_cache
*s
, struct slab
*slab
,
584 void **p
, int objects
, struct slabobj_ext
*obj_exts
);
587 size_t __ksize(const void *objp
);
589 static inline size_t slab_ksize(const struct kmem_cache
*s
)
591 #ifdef CONFIG_SLUB_DEBUG
593 * Debugging requires use of the padding between object
594 * and whatever may come after it.
596 if (s
->flags
& (SLAB_RED_ZONE
| SLAB_POISON
))
597 return s
->object_size
;
599 if (s
->flags
& SLAB_KASAN
)
600 return s
->object_size
;
602 * If we have the need to store the freelist pointer
603 * back there or track user information then we can
604 * only use the space before that information.
606 if (s
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_STORE_USER
))
609 * Else we can use all the padding etc for the allocation
614 #ifdef CONFIG_SLUB_DEBUG
615 void dump_unreclaimable_slab(void);
617 static inline void dump_unreclaimable_slab(void)
622 void ___cache_free(struct kmem_cache
*cache
, void *x
, unsigned long addr
);
624 #ifdef CONFIG_SLAB_FREELIST_RANDOM
625 int cache_random_seq_create(struct kmem_cache
*cachep
, unsigned int count
,
627 void cache_random_seq_destroy(struct kmem_cache
*cachep
);
629 static inline int cache_random_seq_create(struct kmem_cache
*cachep
,
630 unsigned int count
, gfp_t gfp
)
634 static inline void cache_random_seq_destroy(struct kmem_cache
*cachep
) { }
635 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
637 static inline bool slab_want_init_on_alloc(gfp_t flags
, struct kmem_cache
*c
)
639 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
,
643 if (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
))
644 return flags
& __GFP_ZERO
;
647 return flags
& __GFP_ZERO
;
650 static inline bool slab_want_init_on_free(struct kmem_cache
*c
)
652 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON
,
655 (c
->flags
& (SLAB_TYPESAFE_BY_RCU
| SLAB_POISON
)));
659 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
660 void debugfs_slab_release(struct kmem_cache
*);
662 static inline void debugfs_slab_release(struct kmem_cache
*s
) { }
666 #define KS_ADDRS_COUNT 16
667 struct kmem_obj_info
{
669 struct slab
*kp_slab
;
671 unsigned long kp_data_offset
;
672 struct kmem_cache
*kp_slab_cache
;
674 void *kp_stack
[KS_ADDRS_COUNT
];
675 void *kp_free_stack
[KS_ADDRS_COUNT
];
677 void __kmem_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
);
680 void __check_heap_object(const void *ptr
, unsigned long n
,
681 const struct slab
*slab
, bool to_user
);
683 #ifdef CONFIG_SLUB_DEBUG
684 void skip_orig_size_check(struct kmem_cache
*s
, const void *object
);
687 #endif /* MM_SLAB_H */