usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / sys / slaballoc.h
blobed1df45a5f3356333f4851e67df34a10ec1618e0
1 /*
2 * KERN_SLABALLOC.H - Kernel SLAB memory allocator
3 *
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * $DragonFly: src/sys/sys/slaballoc.h,v 1.8 2005/06/20 20:49:12 dillon Exp $
39 #ifndef _SYS_SLABALLOC_H_
40 #define _SYS_SLABALLOC_H_
42 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
44 #ifndef _SYS_STDINT_H_
45 #include <sys/stdint.h>
46 #endif
47 #ifndef _SYS_MALLOC_H_
48 #include <sys/malloc.h>
49 #endif
52 * Note that any allocations which are exact multiples of PAGE_SIZE, or
53 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
55 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
56 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
57 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
58 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
59 #define ZALLOC_OVSZ_MAGIC 0x736c6163 /* magic sanity */
60 #define ZALLOC_SLAB_SLIDE 20
63 #if ZALLOC_ZONE_LIMIT == 16384
64 #define NZONES 72
65 #elif ZALLOC_ZONE_LIMIT == 32768
66 #define NZONES 80
67 #else
68 #error "I couldn't figure out NZONES"
69 #endif
72 * Chunk structure for free elements
74 typedef struct SLChunk {
75 struct SLChunk *c_Next;
76 } SLChunk;
78 #if defined(SLAB_DEBUG)
80 * Only used for kernels compiled w/SLAB_DEBUG
82 struct ZSources {
83 const char *file;
84 int line;
87 #endif
90 * The IN-BAND zone header is placed at the beginning of each zone.
92 * NOTE! All fields are cpu-local except z_RChunks. Remote cpus free
93 * chunks using atomic ops to z_RChunks and then signal local
94 * cpus as necessary.
96 typedef struct SLZone {
97 __int32_t z_Magic; /* magic number for sanity check */
98 int z_Cpu; /* which cpu owns this zone? */
99 struct globaldata *z_CpuGd; /* which cpu owns this zone? */
100 TAILQ_ENTRY(SLZone) z_Entry;/* ZoneAry[] if z_NFree!=0, else Free*Zones */
101 void *z_UNused01;
102 int z_NFree; /* total free chunks / ualloc space in zone */
103 int z_NMax; /* maximum free chunks */
104 char *z_BasePtr; /* pointer to start of chunk array */
105 int z_UIndex; /* current initial allocation index */
106 int z_UEndIndex; /* last (first) allocation index */
107 int z_ChunkSize; /* chunk size for validation */
108 int z_ZoneIndex;
109 int z_Flags;
110 SLChunk *z_LChunks; /* linked list of chunks current cpu */
111 SLChunk **z_LChunksp; /* tailp */
112 SLChunk *z_RChunks; /* linked list of chunks remote cpu */
113 int z_RSignal; /* signal interlock */
114 int z_RCount; /* prevent local destruction w/inflight ipis */
115 #if defined(SLAB_DEBUG)
116 #define SLAB_DEBUG_ENTRIES 32 /* must be power of 2 */
117 struct ZSources z_Sources[SLAB_DEBUG_ENTRIES];
118 struct ZSources z_AltSources[SLAB_DEBUG_ENTRIES];
119 #endif
120 #if defined(INVARIANTS)
121 __uint32_t z_Bitmap[]; /* bitmap of free chunks for sanity check */
122 #endif
123 } SLZone;
125 #define SLZF_UNOTZEROD 0x0001
127 TAILQ_HEAD(SLZoneList, SLZone);
128 typedef struct SLZoneList SLZoneList;
130 typedef struct SLGlobalData {
131 SLZoneList ZoneAry[NZONES]; /* linked list of zones NFree > 0 */
132 SLZoneList FreeZones; /* whole zones that have become free */
133 SLZoneList FreeOvZones; /* oversized zones */
134 int NFreeZones; /* free zone count */
135 int JunkIndex;
136 struct malloc_type ZoneInfo; /* stats on meta-zones allocated */
137 } SLGlobalData;
139 #endif /* _KERNEL */
141 #endif /* _SYS_SLABALLOC_H_ */