kernel - Enhance CPUMASK and atomic ops
[dragonfly.git] / sys / cpu / x86_64 / include / types.h
blobfc1d57081a70c61d8e573f1eaa80533743566774
1 /*-
2 * Copyright (c) 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2008 The DragonFly Project.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
30 * @(#)types.h 8.3 (Berkeley) 1/5/94
31 * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
34 #ifndef _CPU_TYPES_H_
35 #define _CPU_TYPES_H_
37 #include <machine/stdint.h>
39 #if defined(__x86_64__)
40 typedef __int64_t __segsz_t; /* segment size */
41 typedef __int64_t register_t;
42 typedef __uint64_t u_register_t;
43 #elif defined(__i386__)
44 typedef __int32_t __segsz_t; /* segment size */
45 typedef __int32_t register_t;
46 typedef __uint32_t u_register_t;
47 #endif
49 typedef unsigned long vm_offset_t; /* address space bounded offset */
50 typedef unsigned long vm_size_t; /* address space bounded size */
52 typedef __uint64_t vm_pindex_t; /* physical page index */
53 typedef __int64_t vm_ooffset_t; /* VM object bounded offset */
54 typedef __uint64_t vm_poff_t; /* physical offset */
55 typedef __uint64_t vm_paddr_t; /* physical addr (same as vm_poff_t) */
57 #ifdef _KERNEL
58 typedef __int64_t intfptr_t;
59 typedef __uint64_t uintfptr_t;
60 #endif
63 * MMU page tables
65 typedef __uint64_t pml4_entry_t;
66 typedef __uint64_t pdp_entry_t;
67 typedef __uint64_t pd_entry_t;
68 typedef __uint64_t pt_entry_t;
69 typedef __uint32_t cpulock_t; /* count and exclusive lock */
72 * cpumask_t - a mask representing a set of cpus and supporting routines.
74 * WARNING! It is recommended that this mask NOT be made variably-sized
75 * because it affects a huge number of system structures. However,
76 * kernel code (non-module) can be optimized to not operate on the
77 * whole mask.
80 #define CPUMASK_ELEMENTS 4 /* tested by assembly for #error */
82 typedef struct {
83 __uint64_t ary[4];
84 } cpumask_t;
86 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
88 #define CPUMASK_INITIALIZER_ALLONES { .ary = { (__uint64_t)-1, \
89 (__uint64_t)-1, \
90 (__uint64_t)-1, \
91 (__uint64_t)-1 } }
92 #define CPUMASK_INITIALIZER_ONLYONE { .ary = { 1, 0, 0, 0 } }
94 #define CPUMASK_SIMPLE(cpu) ((__uint64_t)1 << (cpu))
96 #define CPUMASK_ADDR(mask, cpu) \
97 (((cpu) < 64) ? &(mask).ary[0] : \
98 (((cpu) < 128) ? &(mask).ary[1] : \
99 (((cpu) < 192) ? &(mask).ary[2] : &(mask).ary[3])))
101 #define BSRCPUMASK(val) ((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
102 ((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
103 ((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
104 bsrq((val).ary[0]))))
106 #define BSFCPUMASK(val) ((val).ary[0] ? bsfq((val).ary[0]) : \
107 ((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
108 ((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
109 192 + bsfq((val).ary[3]))))
111 #define CPUMASK_CMPMASKEQ(val1, val2) ((val1).ary[0] == (val2).ary[0] && \
112 (val1).ary[1] == (val2).ary[1] && \
113 (val1).ary[2] == (val2).ary[2] && \
114 (val1).ary[3] == (val2).ary[3])
116 #define CPUMASK_CMPMASKNEQ(val1, val2) ((val1).ary[0] != (val2).ary[0] || \
117 (val1).ary[1] != (val2).ary[1] || \
118 (val1).ary[2] != (val2).ary[2] || \
119 (val1).ary[3] != (val2).ary[3])
121 #define CPUMASK_ISUP(val) ((val).ary[0] == 1 && \
122 (val).ary[1] == 0 && \
123 (val).ary[2] == 0 && \
124 (val).ary[3] == 0)
126 #define CPUMASK_TESTZERO(val) ((val).ary[0] == 0 && \
127 (val).ary[1] == 0 && \
128 (val).ary[2] == 0 && \
129 (val).ary[3] == 0)
131 #define CPUMASK_TESTNZERO(val) ((val).ary[0] != 0 || \
132 (val).ary[1] != 0 || \
133 (val).ary[2] != 0 || \
134 (val).ary[3] != 0)
136 #define CPUMASK_TESTBIT(val, i) ((val).ary[((i) >> 6) & 3] & \
137 CPUMASK_SIMPLE((i) & 63))
139 #define CPUMASK_TESTMASK(val1, val2) (((val1).ary[0] & (val2.ary[0])) || \
140 ((val1).ary[1] & (val2.ary[1])) || \
141 ((val1).ary[2] & (val2.ary[2])) || \
142 ((val1).ary[3] & (val2.ary[3])))
144 #define CPUMASK_LOWMASK(val) ((val).ary[0])
146 #define CPUMASK_ORBIT(mask, i) ((mask).ary[((i) >> 6) & 3] |= \
147 CPUMASK_SIMPLE((i) & 63))
149 #define CPUMASK_ANDBIT(mask, i) ((mask).ary[((i) >> 6) & 3] &= \
150 CPUMASK_SIMPLE((i) & 63))
152 #define CPUMASK_NANDBIT(mask, i) ((mask).ary[((i) >> 6) & 3] &= \
153 ~CPUMASK_SIMPLE((i) & 63))
155 #define CPUMASK_ASSZERO(mask) do { \
156 (mask).ary[0] = 0; \
157 (mask).ary[1] = 0; \
158 (mask).ary[2] = 0; \
159 (mask).ary[3] = 0; \
160 } while(0)
162 #define CPUMASK_ASSALLONES(mask) do { \
163 (mask).ary[0] = (__uint64_t)-1; \
164 (mask).ary[1] = (__uint64_t)-1; \
165 (mask).ary[2] = (__uint64_t)-1; \
166 (mask).ary[3] = (__uint64_t)-1; \
167 } while(0)
169 #define CPUMASK_ASSBIT(mask, i) do { \
170 CPUMASK_ASSZERO(mask); \
171 CPUMASK_ORBIT(mask, i); \
172 } while(0)
174 #define CPUMASK_ASSBMASK(mask, i) do { \
175 if (i < 64) { \
176 (mask).ary[0] = CPUMASK_SIMPLE(i) - 1; \
177 (mask).ary[1] = 0; \
178 (mask).ary[2] = 0; \
179 (mask).ary[3] = 0; \
180 } else if (i < 128) { \
181 (mask).ary[0] = (__uint64_t)-1; \
182 (mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1; \
183 (mask).ary[2] = 0; \
184 (mask).ary[3] = 0; \
185 } else if (i < 192) { \
186 (mask).ary[0] = (__uint64_t)-1; \
187 (mask).ary[1] = (__uint64_t)-1; \
188 (mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1; \
189 (mask).ary[3] = 0; \
190 } else { \
191 (mask).ary[0] = (__uint64_t)-1; \
192 (mask).ary[1] = (__uint64_t)-1; \
193 (mask).ary[2] = (__uint64_t)-1; \
194 (mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1; \
196 } while(0)
198 #define CPUMASK_ASSNBMASK(mask, i) do { \
199 if (i < 64) { \
200 (mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1); \
201 (mask).ary[1] = (__uint64_t)-1; \
202 (mask).ary[2] = (__uint64_t)-1; \
203 (mask).ary[3] = (__uint64_t)-1; \
204 } else if (i < 128) { \
205 (mask).ary[0] = 0; \
206 (mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
207 (mask).ary[2] = (__uint64_t)-1; \
208 (mask).ary[3] = (__uint64_t)-1; \
209 } else if (i < 192) { \
210 (mask).ary[0] = 0; \
211 (mask).ary[1] = 0; \
212 (mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
213 (mask).ary[3] = (__uint64_t)-1; \
214 } else { \
215 (mask).ary[0] = 0; \
216 (mask).ary[1] = 0; \
217 (mask).ary[2] = 0; \
218 (mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
220 } while(0)
222 #define CPUMASK_ANDMASK(mask, val) do { \
223 (mask).ary[0] &= (val).ary[0]; \
224 (mask).ary[1] &= (val).ary[1]; \
225 (mask).ary[2] &= (val).ary[2]; \
226 (mask).ary[3] &= (val).ary[3]; \
227 } while(0)
229 #define CPUMASK_NANDMASK(mask, val) do { \
230 (mask).ary[0] &= ~(val).ary[0]; \
231 (mask).ary[1] &= ~(val).ary[1]; \
232 (mask).ary[2] &= ~(val).ary[2]; \
233 (mask).ary[3] &= ~(val).ary[3]; \
234 } while(0)
236 #define CPUMASK_ORMASK(mask, val) do { \
237 (mask).ary[0] |= (val).ary[0]; \
238 (mask).ary[1] |= (val).ary[1]; \
239 (mask).ary[2] |= (val).ary[2]; \
240 (mask).ary[3] |= (val).ary[3]; \
241 } while(0)
243 #define CPUMASK_XORMASK(mask, val) do { \
244 (mask).ary[0] ^= (val).ary[0]; \
245 (mask).ary[1] ^= (val).ary[1]; \
246 (mask).ary[2] ^= (val).ary[2]; \
247 (mask).ary[3] ^= (val).ary[3]; \
248 } while(0)
250 #define ATOMIC_CPUMASK_ORBIT(mask, i) \
251 atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3], \
252 CPUMASK_SIMPLE((i) & 63))
254 #define ATOMIC_CPUMASK_NANDBIT(mask, i) \
255 atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
256 CPUMASK_SIMPLE((i) & 63))
258 #define ATOMIC_CPUMASK_TESTANDSET(mask, i) \
259 atomic_testandset_long(&(mask).ary[((i) >> 6) & 3], (i))
261 #define ATOMIC_CPUMASK_TESTANDCLR(mask, i) \
262 atomic_testandclear_long(&(mask).ary[((i) >> 6) & 3], (i))
264 #define ATOMIC_CPUMASK_ORMASK(mask, val) do { \
265 atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
266 atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
267 atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
268 atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
269 } while(0)
271 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do { \
272 atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
273 atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
274 atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
275 atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
276 } while(0)
278 #define ATOMIC_CPUMASK_COPY(mask, val) do { \
279 atomic_store_rel_cpumask(&(mask).ary[0], (val).ary[0]);\
280 atomic_store_rel_cpumask(&(mask).ary[1], (val).ary[1]);\
281 atomic_store_rel_cpumask(&(mask).ary[2], (val).ary[2]);\
282 atomic_store_rel_cpumask(&(mask).ary[3], (val).ary[3]);\
283 } while(0)
285 #endif
287 #define CPULOCK_EXCLBIT 0 /* exclusive lock bit number */
288 #define CPULOCK_EXCL 0x00000001 /* exclusive lock */
289 #define CPULOCK_INCR 0x00000002 /* auxillary counter add/sub */
290 #define CPULOCK_CNTMASK 0x7FFFFFFE
292 #define PML4SIZE sizeof(pml4_entry_t) /* for assembly files */
293 #define PDPSIZE sizeof(pdp_entry_t) /* for assembly files */
294 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
295 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
297 #endif /* !_CPU_TYPES_H_ */