4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/systm.h>
26 #include <sys/membar.h>
27 #include <sys/machsystm.h>
28 #include <sys/x_call.h>
29 #include <sys/platform_module.h>
30 #include <sys/cpuvar.h>
31 #include <sys/cpu_module.h>
33 #include <sys/dumphdr.h>
35 #include <sys/cpu_sgnblk_defs.h>
37 static cpuset_t cpu_idle_set
;
38 static kmutex_t cpu_idle_lock
;
39 typedef const char *fn_t
;
42 * flags to determine if the PROM routines
43 * should be used to idle/resume/stop cpus
45 static int kern_idle
[NCPU
]; /* kernel's idle loop */
46 static int cpu_are_paused
;
47 extern void debug_flush_windows();
50 * Initialize the idlestop mutex
55 mutex_init(&cpu_idle_lock
, NULL
, MUTEX_SPIN
, (void *)ipltospl(PIL_15
));
65 debug_flush_windows();
67 CPU
->cpu_m
.in_prom
= 1;
70 save
= curthread
->t_pcb
;
71 (void) setjmp(&curthread
->t_pcb
);
73 kern_idle
[CPU
->cpu_id
] = 1;
74 while (kern_idle
[CPU
->cpu_id
])
77 CPU
->cpu_m
.in_prom
= 0;
80 curthread
->t_pcb
= save
;
93 mutex_enter(&cpu_idle_lock
);
98 cpu_idle_set
= cpu_ready_set
;
99 CPUSET_DEL(cpu_idle_set
, cpuid
);
101 if (CPUSET_ISNULL(cpu_idle_set
))
104 xt_some(cpu_idle_set
, (xcfunc_t
*)idle_stop_xcall
,
105 (uint64_t)cpu_idle_self
, NULL
);
107 for (i
= 0; i
< NCPU
; i
++) {
108 if (!CPU_IN_SET(cpu_idle_set
, i
))
112 while (!cpu
[i
]->cpu_m
.in_prom
&& ntries
) {
118 * A cpu failing to idle is an error condition, since
119 * we can't be sure anymore of its state.
121 if (!cpu
[i
]->cpu_m
.in_prom
) {
122 cmn_err(CE_WARN
, "cpuid 0x%x failed to idle", i
);
128 mutex_exit(&cpu_idle_lock
);
129 cmn_err(CE_PANIC
, "idle_other_cpus: not all cpus idled");
134 resume_other_cpus(void)
137 int cpuid
= CPU
->cpu_id
;
138 boolean_t failed
= B_FALSE
;
143 ASSERT(cpuid
< NCPU
);
144 ASSERT(MUTEX_HELD(&cpu_idle_lock
));
146 for (i
= 0; i
< NCPU
; i
++) {
147 if (!CPU_IN_SET(cpu_idle_set
, i
))
154 for (i
= 0; i
< NCPU
; i
++) {
155 if (!CPU_IN_SET(cpu_idle_set
, i
))
159 while (cpu
[i
]->cpu_m
.in_prom
&& ntries
) {
165 * A cpu failing to resume is an error condition, since
166 * intrs may have been directed there.
168 if (cpu
[i
]->cpu_m
.in_prom
) {
169 cmn_err(CE_WARN
, "cpuid 0x%x failed to resume", i
);
172 CPUSET_DEL(cpu_idle_set
, i
);
175 failed
= !CPUSET_ISNULL(cpu_idle_set
);
177 mutex_exit(&cpu_idle_lock
);
180 * Non-zero if a cpu failed to resume
183 cmn_err(CE_PANIC
, "resume_other_cpus: not all cpus resumed");
188 * Stop all other cpu's before halting or rebooting. We pause the cpu's
189 * instead of sending a cross call.
192 stop_other_cpus(void)
194 mutex_enter(&cpu_lock
);
195 if (cpu_are_paused
) {
196 mutex_exit(&cpu_lock
);
201 intr_redist_all_cpus_shutdown();
203 pause_cpus(NULL
, NULL
);
206 mutex_exit(&cpu_lock
);
209 int cpu_quiesce_microsecond_sanity_limit
= 60 * 1000000;
212 mp_cpu_quiesce(cpu_t
*cp0
)
215 volatile cpu_t
*cp
= (volatile cpu_t
*) cp0
;
216 int i
, sanity_limit
= cpu_quiesce_microsecond_sanity_limit
;
217 int cpuid
= cp
->cpu_id
;
219 static fn_t f
= "mp_cpu_quiesce";
221 ASSERT(CPU
->cpu_id
!= cpuid
);
222 ASSERT(MUTEX_HELD(&cpu_lock
));
223 ASSERT(cp
->cpu_flags
& CPU_QUIESCED
);
227 * Declare CPU as no longer being READY to process interrupts and
228 * wait for them to stop. A CPU that is not READY can no longer
229 * participate in x-calls or x-traps.
231 cp
->cpu_flags
&= ~CPU_READY
;
232 CPUSET_DEL(cpu_ready_set
, cpuid
);
235 for (i
= 0; i
< sanity_limit
; i
++) {
236 if (cp
->cpu_intr_actv
== 0 &&
237 (cp
->cpu_thread
== cp
->cpu_idle_thread
||
238 cp
->cpu_thread
== cp
->cpu_startup_thread
)) {
247 if (cp
->cpu_intr_actv
) {
248 cmn_err(CE_PANIC
, "%s: cpu_intr_actv != 0", f
);
249 } else if (cp
->cpu_thread
!= cp
->cpu_idle_thread
&&
250 cp
->cpu_thread
!= cp
->cpu_startup_thread
) {
251 cmn_err(CE_PANIC
, "%s: CPU %d is not quiesced",
259 * Start CPU on user request.
263 mp_cpu_start(struct cpu
*cp
)
265 ASSERT(MUTEX_HELD(&cpu_lock
));
267 * Platforms that use CPU signatures require the signature
268 * block update to indicate that this CPU is in the OS now.
270 CPU_SIGNATURE(OS_SIG
, SIGST_RUN
, SIGSUBST_NULL
, cp
->cpu_id
);
272 cmp_error_resteer(cp
->cpu_id
);
274 return (0); /* nothing special to do on this arch */
278 * Stop CPU on user request.
282 mp_cpu_stop(struct cpu
*cp
)
284 ASSERT(MUTEX_HELD(&cpu_lock
));
286 cmp_error_resteer(cp
->cpu_id
);
289 * Platforms that use CPU signatures require the signature
290 * block update to indicate that this CPU is offlined now.
292 CPU_SIGNATURE(OS_SIG
, SIGST_OFFLINE
, SIGSUBST_NULL
, cp
->cpu_id
);
293 return (0); /* nothing special to do on this arch */
300 mp_cpu_poweron(struct cpu
*cp
)
302 ASSERT(MUTEX_HELD(&cpu_lock
));
303 if (&plat_cpu_poweron
)
304 return (plat_cpu_poweron(cp
)); /* platform-dependent hook */
313 mp_cpu_poweroff(struct cpu
*cp
)
315 ASSERT(MUTEX_HELD(&cpu_lock
));
316 if (&plat_cpu_poweroff
)
317 return (plat_cpu_poweroff(cp
)); /* platform-dependent hook */
323 mp_cpu_faulted_enter(struct cpu
*cp
)
325 cpu_faulted_enter(cp
);
329 mp_cpu_faulted_exit(struct cpu
*cp
)
331 cpu_faulted_exit(cp
);