fm/ipmitopo: fix 64-bit compilation
[unleashed.git] / kernel / disp / disp_lock.c
blob188b12909cc41544b987f73f0e54f81656de812c
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
31 #include <sys/systm.h>
32 #include <sys/cmn_err.h>
33 #include <sys/debug.h>
34 #include <sys/inline.h>
35 #include <sys/disp.h>
36 #include <sys/kmem.h>
37 #include <sys/cpuvar.h>
38 #include <sys/vtrace.h>
39 #include <sys/lockstat.h>
40 #include <sys/spl.h>
41 #include <sys/atomic.h>
42 #include <sys/cpu.h>
45 * We check CPU_ON_INTR(CPU) when exiting a disp lock, rather than when
46 * entering it, for a purely pragmatic reason: when exiting a disp lock
47 * we know that we must be at PIL 10, and thus not preemptible; therefore
48 * we can safely load the CPU pointer without worrying about it changing.
50 static void
51 disp_onintr_panic(void)
53 panic("dispatcher invoked from high-level interrupt handler");
56 /* ARGSUSED */
57 void
58 disp_lock_init(disp_lock_t *lp, char *name)
60 DISP_LOCK_INIT(lp);
63 /* ARGSUSED */
64 void
65 disp_lock_destroy(disp_lock_t *lp)
67 DISP_LOCK_DESTROY(lp);
70 void
71 disp_lock_enter_high(disp_lock_t *lp)
73 lock_set(lp);
76 void
77 disp_lock_exit_high(disp_lock_t *lp)
79 if (CPU_ON_INTR(CPU) != 0)
80 disp_onintr_panic();
81 ASSERT(DISP_LOCK_HELD(lp));
82 lock_clear(lp);
85 void
86 disp_lock_enter(disp_lock_t *lp)
88 lock_set_spl(lp, ipltospl(DISP_LEVEL), &curthread->t_oldspl);
91 void
92 disp_lock_exit(disp_lock_t *lp)
94 if (CPU_ON_INTR(CPU) != 0)
95 disp_onintr_panic();
96 ASSERT(DISP_LOCK_HELD(lp));
97 if (CPU->cpu_kprunrun) {
98 lock_clear_splx(lp, curthread->t_oldspl);
99 kpreempt(KPREEMPT_SYNC);
100 } else {
101 lock_clear_splx(lp, curthread->t_oldspl);
105 void
106 disp_lock_exit_nopreempt(disp_lock_t *lp)
108 if (CPU_ON_INTR(CPU) != 0)
109 disp_onintr_panic();
110 ASSERT(DISP_LOCK_HELD(lp));
111 lock_clear_splx(lp, curthread->t_oldspl);
115 * Thread_lock() - get the correct dispatcher lock for the thread.
117 void
118 thread_lock(kthread_id_t t)
120 int s = splhigh();
122 if (CPU_ON_INTR(CPU) != 0)
123 disp_onintr_panic();
125 for (;;) {
126 lock_t *volatile *tlpp = &t->t_lockp;
127 lock_t *lp = *tlpp;
128 if (lock_try(lp)) {
129 if (lp == *tlpp) {
130 curthread->t_oldspl = (ushort_t)s;
131 return;
133 lock_clear(lp);
134 } else {
135 hrtime_t spin_time =
136 LOCKSTAT_START_TIME(LS_THREAD_LOCK_SPIN);
138 * Lower spl and spin on lock with non-atomic load
139 * to avoid cache activity. Spin until the lock
140 * becomes available or spontaneously changes.
142 splx(s);
143 while (lp == *tlpp && LOCK_HELD(lp)) {
144 if (panicstr) {
145 curthread->t_oldspl = splhigh();
146 return;
148 SMT_PAUSE();
151 LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_SPIN,
152 lp, spin_time);
153 s = splhigh();
159 * Thread_lock_high() - get the correct dispatcher lock for the thread.
160 * This version is called when already at high spl.
162 void
163 thread_lock_high(kthread_id_t t)
165 if (CPU_ON_INTR(CPU) != 0)
166 disp_onintr_panic();
168 for (;;) {
169 lock_t *volatile *tlpp = &t->t_lockp;
170 lock_t *lp = *tlpp;
171 if (lock_try(lp)) {
172 if (lp == *tlpp)
173 return;
174 lock_clear(lp);
175 } else {
176 hrtime_t spin_time =
177 LOCKSTAT_START_TIME(LS_THREAD_LOCK_HIGH_SPIN);
178 while (lp == *tlpp && LOCK_HELD(lp)) {
179 if (panicstr)
180 return;
181 SMT_PAUSE();
183 LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_HIGH_SPIN,
184 lp, spin_time);
190 * Called by THREAD_TRANSITION macro to change the thread state to
191 * the intermediate state-in-transititon state.
193 void
194 thread_transition(kthread_id_t t)
196 disp_lock_t *lp;
198 ASSERT(THREAD_LOCK_HELD(t));
199 ASSERT(t->t_lockp != &transition_lock);
201 lp = t->t_lockp;
202 t->t_lockp = &transition_lock;
203 disp_lock_exit_high(lp);
207 * Put thread in stop state, and set the lock pointer to the stop_lock.
208 * This effectively drops the lock on the thread, since the stop_lock
209 * isn't held.
210 * Eventually, stop_lock could be hashed if there is too much contention.
212 void
213 thread_stop(kthread_id_t t)
215 disp_lock_t *lp;
217 ASSERT(THREAD_LOCK_HELD(t));
218 ASSERT(t->t_lockp != &stop_lock);
220 lp = t->t_lockp;
221 t->t_state = TS_STOPPED;
223 * Ensure that t_state reaches global visibility before t_lockp
225 membar_producer();
226 t->t_lockp = &stop_lock;
227 disp_lock_exit(lp);