linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / sys / spinlock2.h
blob0c974f708be9db3ab2679b1e5c9ed03f9c0d8a86
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.12 2008/06/04 04:34:54 nth Exp $
35 #ifndef _SYS_SPINLOCK2_H_
36 #define _SYS_SPINLOCK2_H_
38 #ifndef _KERNEL
40 #error "This file should not be included by userland programs."
42 #else
44 #ifndef _SYS_SYSTM_H_
45 #include <sys/systm.h>
46 #endif
47 #ifndef _SYS_THREAD2_H_
48 #include <sys/thread2.h>
49 #endif
50 #ifndef _SYS_GLOBALDATA_H_
51 #include <sys/globaldata.h>
52 #endif
53 #ifndef _MACHINE_ATOMIC_H_
54 #include <machine/atomic.h>
55 #endif
56 #ifndef _MACHINE_CPUFUNC_H_
57 #include <machine/cpufunc.h>
58 #endif
61 * SPECIAL NOTE! Obtaining a spinlock does not enter a critical section
62 * or protect against FAST interrupts but it will prevent thread preemption.
63 * Because the spinlock code path is ultra critical, we do not check for
64 * LWKT reschedule requests (due to an interrupt thread not being able to
65 * preempt).
68 #ifdef SMP
70 extern int spin_trylock_wr_contested(globaldata_t gd, struct spinlock *mtx,
71 int value);
72 extern void spin_lock_wr_contested(struct spinlock *mtx, int value);
73 extern void spin_lock_rd_contested(struct spinlock *mtx);
75 #endif
77 #ifdef SMP
80 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
81 * TRUE on success.
83 static __inline boolean_t
84 spin_trylock_wr(struct spinlock *mtx)
86 globaldata_t gd = mycpu;
87 int value;
89 ++gd->gd_spinlocks_wr;
90 if ((value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE)) != 0)
91 return (spin_trylock_wr_contested(gd, mtx, value));
92 return (TRUE);
95 #else
97 static __inline boolean_t
98 spin_trylock_wr(struct spinlock *mtx)
100 globaldata_t gd = mycpu;
102 ++gd->gd_spinlocks_wr;
103 return (TRUE);
106 #endif
109 * Obtain an exclusive spinlock and return. Shortcut the case where the only
110 * cached read lock was from our own cpu (it can just be cleared).
112 static __inline void
113 spin_lock_wr_quick(globaldata_t gd, struct spinlock *mtx)
115 #ifdef SMP
116 int value;
117 #endif
119 ++gd->gd_spinlocks_wr;
120 #ifdef SMP
121 if ((value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE)) != 0) {
122 value &= ~gd->gd_cpumask;
123 if (value)
124 spin_lock_wr_contested(mtx, value);
126 #endif
129 static __inline void
130 spin_lock_wr(struct spinlock *mtx)
132 spin_lock_wr_quick(mycpu, mtx);
135 #if 0
138 * Upgrade a shared spinlock to exclusive. Return TRUE if we were
139 * able to upgrade without another exclusive holder getting in before
140 * us, FALSE otherwise.
142 static __inline int
143 spin_lock_upgrade(struct spinlock *mtx)
145 globaldata_t gd = mycpu;
146 #ifdef SMP
147 int value;
148 #endif
150 ++gd->gd_spinlocks_wr;
151 #ifdef SMP
152 value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE);
153 cpu_sfence();
154 #endif
155 gd->gd_spinlock_rd = NULL;
156 #ifdef SMP
157 value &= ~gd->gd_cpumask;
158 if (value) {
159 spin_lock_wr_contested(mtx, value);
160 if (value & SPINLOCK_EXCLUSIVE)
161 return (FALSE);
162 XXX regain original shared lock?
164 return (TRUE);
165 #endif
168 #endif
171 * Obtain a shared spinlock and return. This is a critical code path.
173 * The vast majority of the overhead is in the cpu_mfence() (5ns vs 1ns for
174 * the entire rest of the procedure). Unfortunately we have to ensure that
175 * spinlock pointer is written out before we check the cpumask to interlock
176 * against an exclusive spinlock that clears the cpumask and then checks
177 * the spinlock pointer.
179 * But what is EXTREMELY important here is that we do not have to perform
180 * a locked bus cycle on the spinlock itself if the shared bit for our cpu
181 * is already found to be set. We only need the mfence, and the mfence is
182 * local to the cpu and never conflicts with other cpu's.
184 * This means that multiple parallel shared acessors (e.g. filedescriptor
185 * table lookups, namecache lookups) run at full speed and incur NO cache
186 * contention at all. It is the difference between 10ns and 40-100ns.
188 static __inline void
189 spin_lock_rd_quick(globaldata_t gd, struct spinlock *mtx)
191 gd->gd_spinlock_rd = mtx;
192 #ifdef SMP
193 cpu_mfence();
194 if ((mtx->lock & gd->gd_cpumask) == 0)
195 spin_lock_rd_contested(mtx);
196 #endif
199 static __inline void
200 spin_lock_rd(struct spinlock *mtx)
202 spin_lock_rd_quick(mycpu,mtx);
206 * Release an exclusive spinlock. We can just do this passively, only
207 * ensuring that our spinlock count is left intact until the mutex is
208 * cleared.
210 static __inline void
211 spin_unlock_wr_quick(globaldata_t gd, struct spinlock *mtx)
213 #ifdef SMP
214 mtx->lock = 0;
215 #endif
216 KKASSERT(gd->gd_spinlocks_wr > 0);
217 --gd->gd_spinlocks_wr;
220 static __inline void
221 spin_unlock_wr(struct spinlock *mtx)
223 spin_unlock_wr_quick(mycpu, mtx);
227 * Release a shared spinlock. We leave the shared bit set in the spinlock
228 * as a cache and simply clear the spinlock pointer for the cpu. This
229 * fast-paths another shared lock later at the cost of an exclusive lock
230 * having to check per-cpu spinlock pointers to determine when there are no
231 * shared holders remaining.
233 static __inline void
234 spin_unlock_rd_quick(globaldata_t gd, struct spinlock *mtx)
236 KKASSERT(gd->gd_spinlock_rd == mtx);
237 gd->gd_spinlock_rd = NULL;
240 static __inline void
241 spin_unlock_rd(struct spinlock *mtx)
243 spin_unlock_rd_quick(mycpu, mtx);
246 static __inline void
247 spin_init(struct spinlock *mtx)
249 mtx->lock = 0;
252 static __inline void
253 spin_uninit(struct spinlock *mtx)
255 /* unused */
258 #endif /* _KERNEL */
259 #endif /* _SYS_SPINLOCK2_H_ */