staging: dwc2: fix dwc2_hcd_qtd_add()
[linux-2.6/btrfs-unstable.git] / include / linux / slab.h
blob6c5cc0ea871348673c2a45253e377c1136402250
1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 */
9 #ifndef _LINUX_SLAB_H
10 #define _LINUX_SLAB_H
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 #include <linux/workqueue.h>
18 * Flags to pass to kmem_cache_create().
19 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
21 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
22 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
23 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
24 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
25 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
26 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
27 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
29 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
31 * This delays freeing the SLAB page by a grace period, it does _NOT_
32 * delay object freeing. This means that if you do kmem_cache_free()
33 * that memory location is free to be reused at any time. Thus it may
34 * be possible to see another object there in the same RCU grace period.
36 * This feature only ensures the memory location backing the object
37 * stays valid, the trick to using this is relying on an independent
38 * object validation pass. Something like:
40 * rcu_read_lock()
41 * again:
42 * obj = lockless_lookup(key);
43 * if (obj) {
44 * if (!try_get_ref(obj)) // might fail for free objects
45 * goto again;
47 * if (obj->key != key) { // not the object we expected
48 * put_ref(obj);
49 * goto again;
50 * }
51 * }
52 * rcu_read_unlock();
54 * See also the comment on struct slab_rcu in mm/slab.c.
56 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
57 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
58 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
60 /* Flag to prevent checks on free */
61 #ifdef CONFIG_DEBUG_OBJECTS
62 # define SLAB_DEBUG_OBJECTS 0x00400000UL
63 #else
64 # define SLAB_DEBUG_OBJECTS 0x00000000UL
65 #endif
67 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
69 /* Don't track use of uninitialized memory */
70 #ifdef CONFIG_KMEMCHECK
71 # define SLAB_NOTRACK 0x01000000UL
72 #else
73 # define SLAB_NOTRACK 0x00000000UL
74 #endif
75 #ifdef CONFIG_FAILSLAB
76 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
77 #else
78 # define SLAB_FAILSLAB 0x00000000UL
79 #endif
81 /* The following flags affect the page allocator grouping pages by mobility */
82 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
83 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
85 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
87 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
89 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
90 * Both make kfree a no-op.
92 #define ZERO_SIZE_PTR ((void *)16)
94 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR)
98 struct mem_cgroup;
100 * struct kmem_cache related prototypes
102 void __init kmem_cache_init(void);
103 int slab_is_available(void);
105 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
106 unsigned long,
107 void (*)(void *));
108 struct kmem_cache *
109 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
110 unsigned long, void (*)(void *), struct kmem_cache *);
111 void kmem_cache_destroy(struct kmem_cache *);
112 int kmem_cache_shrink(struct kmem_cache *);
113 void kmem_cache_free(struct kmem_cache *, void *);
116 * Please use this macro to create slab caches. Simply specify the
117 * name of the structure and maybe some flags that are listed above.
119 * The alignment of the struct determines object alignment. If you
120 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
121 * then the objects will be properly aligned in SMP configurations.
123 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
124 sizeof(struct __struct), __alignof__(struct __struct),\
125 (__flags), NULL)
128 * Common kmalloc functions provided by all allocators
130 void * __must_check __krealloc(const void *, size_t, gfp_t);
131 void * __must_check krealloc(const void *, size_t, gfp_t);
132 void kfree(const void *);
133 void kzfree(const void *);
134 size_t ksize(const void *);
137 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
138 * alignment larger than the alignment of a 64-bit integer.
139 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
141 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
142 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
143 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
144 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
145 #else
146 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
147 #endif
149 #ifdef CONFIG_SLOB
151 * Common fields provided in kmem_cache by all slab allocators
152 * This struct is either used directly by the allocator (SLOB)
153 * or the allocator must include definitions for all fields
154 * provided in kmem_cache_common in their definition of kmem_cache.
156 * Once we can do anonymous structs (C11 standard) we could put a
157 * anonymous struct definition in these allocators so that the
158 * separate allocations in the kmem_cache structure of SLAB and
159 * SLUB is no longer needed.
161 struct kmem_cache {
162 unsigned int object_size;/* The original size of the object */
163 unsigned int size; /* The aligned/padded/added on size */
164 unsigned int align; /* Alignment as calculated */
165 unsigned long flags; /* Active flags on the slab */
166 const char *name; /* Slab name for sysfs */
167 int refcount; /* Use counter */
168 void (*ctor)(void *); /* Called on object slot creation */
169 struct list_head list; /* List of all slab caches on the system */
172 #endif /* CONFIG_SLOB */
175 * Kmalloc array related definitions
178 #ifdef CONFIG_SLAB
180 * The largest kmalloc size supported by the SLAB allocators is
181 * 32 megabyte (2^25) or the maximum allocatable page order if that is
182 * less than 32 MB.
184 * WARNING: Its not easy to increase this value since the allocators have
185 * to do various tricks to work around compiler limitations in order to
186 * ensure proper constant folding.
188 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
189 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
190 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
191 #ifndef KMALLOC_SHIFT_LOW
192 #define KMALLOC_SHIFT_LOW 5
193 #endif
194 #endif
196 #ifdef CONFIG_SLUB
198 * SLUB allocates up to order 2 pages directly and otherwise
199 * passes the request to the page allocator.
201 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
202 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
203 #ifndef KMALLOC_SHIFT_LOW
204 #define KMALLOC_SHIFT_LOW 3
205 #endif
206 #endif
208 #ifdef CONFIG_SLOB
210 * SLOB passes all page size and larger requests to the page allocator.
211 * No kmalloc array is necessary since objects of different sizes can
212 * be allocated from the same page.
214 #define KMALLOC_SHIFT_MAX 30
215 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
216 #ifndef KMALLOC_SHIFT_LOW
217 #define KMALLOC_SHIFT_LOW 3
218 #endif
219 #endif
221 /* Maximum allocatable size */
222 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
223 /* Maximum size for which we actually use a slab cache */
224 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
225 /* Maximum order allocatable via the slab allocagtor */
226 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
229 * Kmalloc subsystem.
231 #ifndef KMALLOC_MIN_SIZE
232 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
233 #endif
235 #ifndef CONFIG_SLOB
236 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
237 #ifdef CONFIG_ZONE_DMA
238 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
239 #endif
242 * Figure out which kmalloc slab an allocation of a certain size
243 * belongs to.
244 * 0 = zero alloc
245 * 1 = 65 .. 96 bytes
246 * 2 = 120 .. 192 bytes
247 * n = 2^(n-1) .. 2^n -1
249 static __always_inline int kmalloc_index(size_t size)
251 if (!size)
252 return 0;
254 if (size <= KMALLOC_MIN_SIZE)
255 return KMALLOC_SHIFT_LOW;
257 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
258 return 1;
259 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
260 return 2;
261 if (size <= 8) return 3;
262 if (size <= 16) return 4;
263 if (size <= 32) return 5;
264 if (size <= 64) return 6;
265 if (size <= 128) return 7;
266 if (size <= 256) return 8;
267 if (size <= 512) return 9;
268 if (size <= 1024) return 10;
269 if (size <= 2 * 1024) return 11;
270 if (size <= 4 * 1024) return 12;
271 if (size <= 8 * 1024) return 13;
272 if (size <= 16 * 1024) return 14;
273 if (size <= 32 * 1024) return 15;
274 if (size <= 64 * 1024) return 16;
275 if (size <= 128 * 1024) return 17;
276 if (size <= 256 * 1024) return 18;
277 if (size <= 512 * 1024) return 19;
278 if (size <= 1024 * 1024) return 20;
279 if (size <= 2 * 1024 * 1024) return 21;
280 if (size <= 4 * 1024 * 1024) return 22;
281 if (size <= 8 * 1024 * 1024) return 23;
282 if (size <= 16 * 1024 * 1024) return 24;
283 if (size <= 32 * 1024 * 1024) return 25;
284 if (size <= 64 * 1024 * 1024) return 26;
285 BUG();
287 /* Will never be reached. Needed because the compiler may complain */
288 return -1;
290 #endif /* !CONFIG_SLOB */
292 #ifdef CONFIG_SLAB
293 #include <linux/slab_def.h>
294 #endif
296 #ifdef CONFIG_SLUB
297 #include <linux/slub_def.h>
298 #endif
300 #ifdef CONFIG_SLOB
301 #include <linux/slob_def.h>
302 #endif
305 * Determine size used for the nth kmalloc cache.
306 * return size or 0 if a kmalloc cache for that
307 * size does not exist
309 static __always_inline int kmalloc_size(int n)
311 #ifndef CONFIG_SLOB
312 if (n > 2)
313 return 1 << n;
315 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
316 return 96;
318 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
319 return 192;
320 #endif
321 return 0;
325 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
326 * Intended for arches that get misalignment faults even for 64 bit integer
327 * aligned buffers.
329 #ifndef ARCH_SLAB_MINALIGN
330 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
331 #endif
333 * This is the main placeholder for memcg-related information in kmem caches.
334 * struct kmem_cache will hold a pointer to it, so the memory cost while
335 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
336 * would otherwise be if that would be bundled in kmem_cache: we'll need an
337 * extra pointer chase. But the trade off clearly lays in favor of not
338 * penalizing non-users.
340 * Both the root cache and the child caches will have it. For the root cache,
341 * this will hold a dynamically allocated array large enough to hold
342 * information about the currently limited memcgs in the system.
344 * Child caches will hold extra metadata needed for its operation. Fields are:
346 * @memcg: pointer to the memcg this cache belongs to
347 * @list: list_head for the list of all caches in this memcg
348 * @root_cache: pointer to the global, root cache, this cache was derived from
349 * @dead: set to true after the memcg dies; the cache may still be around.
350 * @nr_pages: number of pages that belongs to this cache.
351 * @destroy: worker to be called whenever we are ready, or believe we may be
352 * ready, to destroy this cache.
354 struct memcg_cache_params {
355 bool is_root_cache;
356 union {
357 struct kmem_cache *memcg_caches[0];
358 struct {
359 struct mem_cgroup *memcg;
360 struct list_head list;
361 struct kmem_cache *root_cache;
362 bool dead;
363 atomic_t nr_pages;
364 struct work_struct destroy;
369 int memcg_update_all_caches(int num_memcgs);
371 struct seq_file;
372 int cache_show(struct kmem_cache *s, struct seq_file *m);
373 void print_slabinfo_header(struct seq_file *m);
376 * kmalloc - allocate memory
377 * @size: how many bytes of memory are required.
378 * @flags: the type of memory to allocate.
380 * The @flags argument may be one of:
382 * %GFP_USER - Allocate memory on behalf of user. May sleep.
384 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
386 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
387 * For example, use this inside interrupt handlers.
389 * %GFP_HIGHUSER - Allocate pages from high memory.
391 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
393 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
395 * %GFP_NOWAIT - Allocation will not sleep.
397 * %GFP_THISNODE - Allocate node-local memory only.
399 * %GFP_DMA - Allocation suitable for DMA.
400 * Should only be used for kmalloc() caches. Otherwise, use a
401 * slab created with SLAB_DMA.
403 * Also it is possible to set different flags by OR'ing
404 * in one or more of the following additional @flags:
406 * %__GFP_COLD - Request cache-cold pages instead of
407 * trying to return cache-warm pages.
409 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
411 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
412 * (think twice before using).
414 * %__GFP_NORETRY - If memory is not immediately available,
415 * then give up at once.
417 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
419 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
421 * There are other flags available as well, but these are not intended
422 * for general use, and so are not documented here. For a full list of
423 * potential flags, always refer to linux/gfp.h.
425 * kmalloc is the normal method of allocating memory
426 * in the kernel.
428 static __always_inline void *kmalloc(size_t size, gfp_t flags);
431 * kmalloc_array - allocate memory for an array.
432 * @n: number of elements.
433 * @size: element size.
434 * @flags: the type of memory to allocate (see kmalloc).
436 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
438 if (size != 0 && n > SIZE_MAX / size)
439 return NULL;
440 return __kmalloc(n * size, flags);
444 * kcalloc - allocate memory for an array. The memory is set to zero.
445 * @n: number of elements.
446 * @size: element size.
447 * @flags: the type of memory to allocate (see kmalloc).
449 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
451 return kmalloc_array(n, size, flags | __GFP_ZERO);
454 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
456 * kmalloc_node - allocate memory from a specific node
457 * @size: how many bytes of memory are required.
458 * @flags: the type of memory to allocate (see kmalloc).
459 * @node: node to allocate from.
461 * kmalloc() for non-local nodes, used to allocate from a specific node
462 * if available. Equivalent to kmalloc() in the non-NUMA single-node
463 * case.
465 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
467 return kmalloc(size, flags);
470 static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
472 return __kmalloc(size, flags);
475 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
477 static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
478 gfp_t flags, int node)
480 return kmem_cache_alloc(cachep, flags);
482 #endif /* !CONFIG_NUMA && !CONFIG_SLOB */
485 * kmalloc_track_caller is a special version of kmalloc that records the
486 * calling function of the routine calling it for slab leak tracking instead
487 * of just the calling function (confusing, eh?).
488 * It's useful when the call to kmalloc comes from a widely-used standard
489 * allocator where we care about the real place the memory allocation
490 * request comes from.
492 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
493 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
494 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
495 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
496 #define kmalloc_track_caller(size, flags) \
497 __kmalloc_track_caller(size, flags, _RET_IP_)
498 #else
499 #define kmalloc_track_caller(size, flags) \
500 __kmalloc(size, flags)
501 #endif /* DEBUG_SLAB */
503 #ifdef CONFIG_NUMA
505 * kmalloc_node_track_caller is a special version of kmalloc_node that
506 * records the calling function of the routine calling it for slab leak
507 * tracking instead of just the calling function (confusing, eh?).
508 * It's useful when the call to kmalloc_node comes from a widely-used
509 * standard allocator where we care about the real place the memory
510 * allocation request comes from.
512 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
513 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
514 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
515 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
516 #define kmalloc_node_track_caller(size, flags, node) \
517 __kmalloc_node_track_caller(size, flags, node, \
518 _RET_IP_)
519 #else
520 #define kmalloc_node_track_caller(size, flags, node) \
521 __kmalloc_node(size, flags, node)
522 #endif
524 #else /* CONFIG_NUMA */
526 #define kmalloc_node_track_caller(size, flags, node) \
527 kmalloc_track_caller(size, flags)
529 #endif /* CONFIG_NUMA */
532 * Shortcuts
534 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
536 return kmem_cache_alloc(k, flags | __GFP_ZERO);
540 * kzalloc - allocate memory. The memory is set to zero.
541 * @size: how many bytes of memory are required.
542 * @flags: the type of memory to allocate (see kmalloc).
544 static inline void *kzalloc(size_t size, gfp_t flags)
546 return kmalloc(size, flags | __GFP_ZERO);
550 * kzalloc_node - allocate zeroed memory from a particular memory node.
551 * @size: how many bytes of memory are required.
552 * @flags: the type of memory to allocate (see kmalloc).
553 * @node: memory node from which to allocate
555 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
557 return kmalloc_node(size, flags | __GFP_ZERO, node);
561 * Determine the size of a slab object
563 static inline unsigned int kmem_cache_size(struct kmem_cache *s)
565 return s->object_size;
568 void __init kmem_cache_init_late(void);
570 #endif /* _LINUX_SLAB_H */