5285 pass in cpu_pause_func via pause_cpus
[illumos-gate.git] / usr / src / uts / sun4 / os / mp_states.c
blobd5f55006b4f3ae8469be66207074cdfbbf56bef0
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/systm.h>
26 #include <sys/membar.h>
27 #include <sys/machsystm.h>
28 #include <sys/x_call.h>
29 #include <sys/platform_module.h>
30 #include <sys/cpuvar.h>
31 #include <sys/cpu_module.h>
32 #include <sys/cmp.h>
33 #include <sys/dumphdr.h>
35 #include <sys/cpu_sgnblk_defs.h>
37 static cpuset_t cpu_idle_set;
38 static kmutex_t cpu_idle_lock;
39 typedef const char *fn_t;
42 * flags to determine if the PROM routines
43 * should be used to idle/resume/stop cpus
45 static int kern_idle[NCPU]; /* kernel's idle loop */
46 static int cpu_are_paused;
47 extern void debug_flush_windows();
50 * Initialize the idlestop mutex
52 void
53 idlestop_init(void)
55 mutex_init(&cpu_idle_lock, NULL, MUTEX_SPIN, (void *)ipltospl(PIL_15));
58 static void
59 cpu_idle_self(void)
61 uint_t s;
62 label_t save;
64 s = spl8();
65 debug_flush_windows();
67 CPU->cpu_m.in_prom = 1;
68 membar_stld();
70 save = curthread->t_pcb;
71 (void) setjmp(&curthread->t_pcb);
73 kern_idle[CPU->cpu_id] = 1;
74 while (kern_idle[CPU->cpu_id])
75 dumpsys_helper_nw();
77 CPU->cpu_m.in_prom = 0;
78 membar_stld();
80 curthread->t_pcb = save;
81 splx(s);
84 void
85 idle_other_cpus(void)
87 int i, cpuid, ntries;
88 int failed = 0;
90 if (ncpus == 1)
91 return;
93 mutex_enter(&cpu_idle_lock);
95 cpuid = CPU->cpu_id;
96 ASSERT(cpuid < NCPU);
98 cpu_idle_set = cpu_ready_set;
99 CPUSET_DEL(cpu_idle_set, cpuid);
101 if (CPUSET_ISNULL(cpu_idle_set))
102 return;
104 xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall,
105 (uint64_t)cpu_idle_self, NULL);
107 for (i = 0; i < NCPU; i++) {
108 if (!CPU_IN_SET(cpu_idle_set, i))
109 continue;
111 ntries = 0x10000;
112 while (!cpu[i]->cpu_m.in_prom && ntries) {
113 DELAY(50);
114 ntries--;
118 * A cpu failing to idle is an error condition, since
119 * we can't be sure anymore of its state.
121 if (!cpu[i]->cpu_m.in_prom) {
122 cmn_err(CE_WARN, "cpuid 0x%x failed to idle", i);
123 failed++;
127 if (failed) {
128 mutex_exit(&cpu_idle_lock);
129 cmn_err(CE_PANIC, "idle_other_cpus: not all cpus idled");
133 void
134 resume_other_cpus(void)
136 int i, ntries;
137 int cpuid = CPU->cpu_id;
138 boolean_t failed = B_FALSE;
140 if (ncpus == 1)
141 return;
143 ASSERT(cpuid < NCPU);
144 ASSERT(MUTEX_HELD(&cpu_idle_lock));
146 for (i = 0; i < NCPU; i++) {
147 if (!CPU_IN_SET(cpu_idle_set, i))
148 continue;
150 kern_idle[i] = 0;
151 membar_stld();
154 for (i = 0; i < NCPU; i++) {
155 if (!CPU_IN_SET(cpu_idle_set, i))
156 continue;
158 ntries = 0x10000;
159 while (cpu[i]->cpu_m.in_prom && ntries) {
160 DELAY(50);
161 ntries--;
165 * A cpu failing to resume is an error condition, since
166 * intrs may have been directed there.
168 if (cpu[i]->cpu_m.in_prom) {
169 cmn_err(CE_WARN, "cpuid 0x%x failed to resume", i);
170 continue;
172 CPUSET_DEL(cpu_idle_set, i);
175 failed = !CPUSET_ISNULL(cpu_idle_set);
177 mutex_exit(&cpu_idle_lock);
180 * Non-zero if a cpu failed to resume
182 if (failed)
183 cmn_err(CE_PANIC, "resume_other_cpus: not all cpus resumed");
188 * Stop all other cpu's before halting or rebooting. We pause the cpu's
189 * instead of sending a cross call.
191 void
192 stop_other_cpus(void)
194 mutex_enter(&cpu_lock);
195 if (cpu_are_paused) {
196 mutex_exit(&cpu_lock);
197 return;
200 if (ncpus > 1)
201 intr_redist_all_cpus_shutdown();
203 pause_cpus(NULL, NULL);
204 cpu_are_paused = 1;
206 mutex_exit(&cpu_lock);
209 int cpu_quiesce_microsecond_sanity_limit = 60 * 1000000;
211 void
212 mp_cpu_quiesce(cpu_t *cp0)
215 volatile cpu_t *cp = (volatile cpu_t *) cp0;
216 int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit;
217 int cpuid = cp->cpu_id;
218 int found_intr = 1;
219 static fn_t f = "mp_cpu_quiesce";
221 ASSERT(CPU->cpu_id != cpuid);
222 ASSERT(MUTEX_HELD(&cpu_lock));
223 ASSERT(cp->cpu_flags & CPU_QUIESCED);
227 * Declare CPU as no longer being READY to process interrupts and
228 * wait for them to stop. A CPU that is not READY can no longer
229 * participate in x-calls or x-traps.
231 cp->cpu_flags &= ~CPU_READY;
232 CPUSET_DEL(cpu_ready_set, cpuid);
233 membar_sync();
235 for (i = 0; i < sanity_limit; i++) {
236 if (cp->cpu_intr_actv == 0 &&
237 (cp->cpu_thread == cp->cpu_idle_thread ||
238 cp->cpu_thread == cp->cpu_startup_thread)) {
239 found_intr = 0;
240 break;
242 DELAY(1);
245 if (found_intr) {
247 if (cp->cpu_intr_actv) {
248 cmn_err(CE_PANIC, "%s: cpu_intr_actv != 0", f);
249 } else if (cp->cpu_thread != cp->cpu_idle_thread &&
250 cp->cpu_thread != cp->cpu_startup_thread) {
251 cmn_err(CE_PANIC, "%s: CPU %d is not quiesced",
252 f, cpuid);
259 * Start CPU on user request.
261 /* ARGSUSED */
263 mp_cpu_start(struct cpu *cp)
265 ASSERT(MUTEX_HELD(&cpu_lock));
267 * Platforms that use CPU signatures require the signature
268 * block update to indicate that this CPU is in the OS now.
270 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
272 cmp_error_resteer(cp->cpu_id);
274 return (0); /* nothing special to do on this arch */
278 * Stop CPU on user request.
280 /* ARGSUSED */
282 mp_cpu_stop(struct cpu *cp)
284 ASSERT(MUTEX_HELD(&cpu_lock));
286 cmp_error_resteer(cp->cpu_id);
289 * Platforms that use CPU signatures require the signature
290 * block update to indicate that this CPU is offlined now.
292 CPU_SIGNATURE(OS_SIG, SIGST_OFFLINE, SIGSUBST_NULL, cp->cpu_id);
293 return (0); /* nothing special to do on this arch */
297 * Power on CPU.
300 mp_cpu_poweron(struct cpu *cp)
302 ASSERT(MUTEX_HELD(&cpu_lock));
303 if (&plat_cpu_poweron)
304 return (plat_cpu_poweron(cp)); /* platform-dependent hook */
306 return (ENOTSUP);
310 * Power off CPU.
313 mp_cpu_poweroff(struct cpu *cp)
315 ASSERT(MUTEX_HELD(&cpu_lock));
316 if (&plat_cpu_poweroff)
317 return (plat_cpu_poweroff(cp)); /* platform-dependent hook */
319 return (ENOTSUP);
322 void
323 mp_cpu_faulted_enter(struct cpu *cp)
325 cpu_faulted_enter(cp);
328 void
329 mp_cpu_faulted_exit(struct cpu *cp)
331 cpu_faulted_exit(cp);