4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * CPU support routines for DR
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <sys/errno.h>
37 #include <sys/dditypes.h>
38 #include <sys/devops.h>
39 #include <sys/modctl.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunndi.h>
45 #include <sys/ddi_impldefs.h>
46 #include <sys/ndi_impldefs.h>
49 #include <sys/processor.h>
50 #include <sys/cpuvar.h>
51 #include <sys/mem_config.h>
52 #include <sys/promif.h>
53 #include <sys/x_call.h>
54 #include <sys/cpu_sgnblk_defs.h>
55 #include <sys/membar.h>
56 #include <sys/stack.h>
57 #include <sys/sysmacros.h>
58 #include <sys/machsystm.h>
59 #include <sys/spitregs.h>
61 #include <sys/archsystm.h>
62 #include <vm/hat_sfmmu.h>
65 #include <sys/x_call.h>
66 #include <sys/cpu_module.h>
67 #include <sys/cheetahregs.h>
69 #include <sys/autoconf.h>
70 #include <sys/cmn_err.h>
72 #include <sys/sbdpriv.h>
75 sbd_cpu_set_prop(sbd_cpu_unit_t
*cp
, dev_info_t
*dip
)
79 char *cache_str
= NULL
;
81 /* read in the CPU speed */
82 clock_freq
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
83 DDI_PROP_DONTPASS
, "clock-frequency", 0);
85 ASSERT(clock_freq
!= 0);
88 * The ecache property string is not the same
89 * for all CPU implementations.
91 switch (cp
->sbc_cpu_impl
) {
93 case CHEETAH_PLUS_IMPL
:
94 cache_str
= "ecache-size";
97 cache_str
= "l2-cache-size";
100 cache_str
= "l3-cache-size";
103 cmn_err(CE_WARN
, "cpu implementation type "
104 "is an unknown %d value", cp
->sbc_cpu_impl
);
109 if (cache_str
!= NULL
) {
110 /* read in the ecache size */
111 ecache_size
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
112 DDI_PROP_DONTPASS
, cache_str
, 0);
116 * In the case the size is still 0,
117 * a zero value will be displayed running non-debug.
119 ASSERT(ecache_size
!= 0);
121 /* convert to the proper units */
122 cp
->sbc_speed
= (clock_freq
+ 500000) / 1000000;
123 cp
->sbc_ecache
= ecache_size
/ (1024 * 1024);
127 sbd_fill_cpu_stat(sbd_cpu_unit_t
*cp
, dev_info_t
*dip
, sbd_cpu_stat_t
*csp
)
131 bzero((caddr_t
)csp
, sizeof (*csp
));
132 csp
->cs_type
= cp
->sbc_cm
.sbdev_type
;
133 csp
->cs_unit
= cp
->sbc_cm
.sbdev_unum
;
134 namelen
= sizeof (csp
->cs_name
);
135 (void) ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
136 OBP_DEVICETYPE
, (caddr_t
)csp
->cs_name
, &namelen
);
137 csp
->cs_busy
= cp
->sbc_cm
.sbdev_busy
;
138 csp
->cs_time
= cp
->sbc_cm
.sbdev_time
;
139 csp
->cs_ostate
= cp
->sbc_cm
.sbdev_ostate
;
140 csp
->cs_cpuid
= cp
->sbc_cpu_id
;
144 * If we have marked the cpu's condition previously
145 * then don't rewrite it
147 if (csp
->cs_cond
!= SBD_COND_UNUSABLE
)
148 csp
->cs_cond
= sbd_get_comp_cond(dip
);
151 * If the speed and ecache properties have not been
152 * cached yet, read them in from the device tree.
154 if ((cp
->sbc_speed
== 0) || (cp
->sbc_ecache
== 0))
155 sbd_cpu_set_prop(cp
, dip
);
157 /* use the cached speed and ecache values */
158 csp
->cs_speed
= cp
->sbc_speed
;
159 csp
->cs_ecache
= cp
->sbc_ecache
;
163 sbd_fill_cmp_stat(sbd_cpu_stat_t
*csp
, int ncores
, int impl
,
168 ASSERT(csp
&& psp
&& (ncores
>= 1));
170 bzero((caddr_t
)psp
, sizeof (*psp
));
173 * Fill in the common status information based
174 * on the data for the first core.
176 psp
->ps_type
= SBD_COMP_CMP
;
177 psp
->ps_unit
= SBD_CMP_NUM(csp
->cs_unit
);
178 (void) strncpy(psp
->ps_name
, csp
->cs_name
, sizeof (psp
->ps_name
));
179 psp
->ps_cond
= csp
->cs_cond
;
180 psp
->ps_busy
= csp
->cs_busy
;
181 psp
->ps_time
= csp
->cs_time
;
182 psp
->ps_ostate
= csp
->cs_ostate
;
183 psp
->ps_suspend
= csp
->cs_suspend
;
185 /* CMP specific status data */
186 *psp
->ps_cpuid
= csp
->cs_cpuid
;
188 psp
->ps_speed
= csp
->cs_speed
;
189 psp
->ps_ecache
= csp
->cs_ecache
;
192 * Walk through the data for the remaining cores.
193 * Make any adjustments to the common status data,
194 * or the shared CMP specific data if necessary.
196 for (core
= 1; core
< ncores
; core
++) {
199 * The following properties should be the same
200 * for all the cores of the CMP.
202 ASSERT(psp
->ps_unit
== SBD_CMP_NUM(csp
[core
].cs_unit
));
203 ASSERT(psp
->ps_speed
== csp
[core
].cs_speed
);
205 psp
->ps_cpuid
[core
] = csp
[core
].cs_cpuid
;
209 * Jaguar has a split ecache, so the ecache
210 * for each core must be added together to
211 * get the total ecache for the whole chip.
213 if (IS_JAGUAR(impl
)) {
214 psp
->ps_ecache
+= csp
[core
].cs_ecache
;
217 /* adjust time if necessary */
218 if (csp
[core
].cs_time
> psp
->ps_time
) {
219 psp
->ps_time
= csp
[core
].cs_time
;
222 psp
->ps_busy
|= csp
[core
].cs_busy
;
225 * If any of the cores are configured, the
226 * entire CMP is marked as configured.
228 if (csp
[core
].cs_ostate
== SBD_STAT_CONFIGURED
) {
229 psp
->ps_ostate
= csp
[core
].cs_ostate
;
235 sbd_cpu_flags(sbd_handle_t
*hp
, sbd_devset_t devset
, sbd_dev_stat_t
*dsp
)
241 sbd_cpu_stat_t cstat
[MAX_CORES_PER_CMP
];
243 sbp
= SBDH2BD(hp
->h_sbd
);
244 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
247 * Grab the status lock before accessing the dip as we allow
248 * concurrent status and branch unconfigure and disconnect.
250 * The disconnect thread clears the present devset first
251 * and then destroys dips. It is possible that the status
252 * thread checks the present devset before they are cleared
253 * but accesses the dip after they are destroyed causing a
254 * panic. To prevent this, the status thread should check
255 * the present devset and access dips with status lock held.
256 * Similarly disconnect thread should clear the present devset
257 * and destroy dips with status lock held.
259 mutex_enter(&sbp
->sb_slock
);
262 * Only look for requested devices that are actually present.
264 devset
&= SBD_DEVS_PRESENT(sbp
);
267 * Treat every CPU as a CMP. In the case where the
268 * device is not a CMP, treat it as a CMP with only
271 for (cmp
= ncpu
= 0; cmp
< MAX_CMP_UNITS_PER_BOARD
; cmp
++) {
279 if (DEVSET_IN_SET(devset
, SBD_COMP_CMP
, cmp
) == 0)
284 for (core
= 0; core
< MAX_CORES_PER_CMP
; core
++) {
287 unit
= sbdp_portid_to_cpu_unit(cmp
, core
);
290 * Check to make sure the cpu is in a state
291 * where its fully initialized.
293 if (SBD_DEVICE_STATE(sbp
, SBD_COMP_CPU
, unit
) ==
297 dip
= sbp
->sb_devlist
[NIX(SBD_COMP_CMP
)][unit
];
301 cp
= SBD_GET_BOARD_CPUUNIT(sbp
, unit
);
303 sbd_fill_cpu_stat(cp
, dip
, &cstat
[ncores
++]);
310 * Store the data to the outgoing array. If the
311 * device is a CMP, combine all the data for the
312 * cores into a single stat structure.
314 * The check for a CMP device uses the last core
315 * found, assuming that all cores will have the
316 * same implementation.
318 if (CPU_IMPL_IS_CMP(cp
->sbc_cpu_impl
)) {
319 psp
= (sbd_cmp_stat_t
*)dsp
;
320 sbd_fill_cmp_stat(cstat
, ncores
, cp
->sbc_cpu_impl
, psp
);
323 bcopy(cstat
, dsp
, sizeof (sbd_cpu_stat_t
));
330 mutex_exit(&sbp
->sb_slock
);
332 sbd_release_sbdp_handle(hdp
);
338 sbd_pre_release_cpu(sbd_handle_t
*hp
, sbd_devlist_t
*devlist
, int devnum
)
344 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
345 sbderror_t
*ep
= SBD_HD2ERR(hp
);
347 static fn_t f
= "sbd_pre_release_cpu";
350 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
352 * May have to juggle bootproc in release_component
354 mutex_enter(&cpu_lock
);
356 for (i
= 0; i
< devnum
; i
++, devlist
++) {
357 dip
= devlist
->dv_dip
;
359 cpuid
= sbdp_get_cpuid(hdp
, dip
);
361 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
363 "sbd:%s: failed to get cpuid for "
364 "dip (0x%p)", f
, (void *)dip
);
367 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
373 unit
= sbdp_get_unit_num(hdp
, dip
);
375 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
377 "sbd:%s: failed to get unit (cpu %d)",
381 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
386 cp
= SBD_GET_BOARD_CPUUNIT(sbp
, unit
);
387 cp
->sbc_cpu_flags
= cpu
[cpuid
]->cpu_flags
;
389 if (cpu_flagged_active(cp
->sbc_cpu_flags
)) {
390 int cpu_offline_flags
= 0;
392 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
)
393 cpu_offline_flags
= CPU_FORCED
;
394 PR_CPU("%s: offlining cpuid %d unit %d", f
,
396 if (cpu_offline(cpu
[cpuid
], cpu_offline_flags
)) {
398 "%s: failed to offline cpu %d",
401 SBD_SET_ERR(ep
, ESBD_OFFLINE
);
402 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
403 cpup
= cpu_get(cpuid
);
404 if (cpup
&& disp_bound_threads(cpup
, 0)) {
405 cmn_err(CE_WARN
, "sbd:%s: thread(s) "
414 if (sbdp_release_component(hdp
, dip
)) {
415 SBD_GET_PERR(hdp
->h_err
, ep
);
424 mutex_exit(&cpu_lock
);
428 * Need to unwind others since at this level (pre-release)
429 * the device state has not yet transitioned and failures
430 * will prevent us from reaching the "post" release
431 * function where states are normally transitioned.
433 for (; i
>= 0; i
--, devlist
--) {
434 dip
= devlist
->dv_dip
;
435 unit
= sbdp_get_unit_num(hdp
, dip
);
438 "sbd:%s: failed to get unit for "
439 "dip (0x%p)", f
, (void *)dip
);
442 (void) sbd_cancel_cpu(hp
, unit
);
446 SBD_INJECT_ERR(SBD_OFFLINE_CPU_PSEUDO_ERR
,
449 sbp
->sb_cpupath
[devnum
- 1]);
451 sbd_release_sbdp_handle(hdp
);
457 sbd_pre_attach_cpu(sbd_handle_t
*hp
, sbd_devlist_t
*devlist
, int devnum
)
462 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
465 static fn_t f
= "sbd_pre_attach_cpu";
468 PR_CPU("%s...\n", f
);
470 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
472 for (i
= 0; i
< devnum
; i
++, devlist
++) {
473 dip
= devlist
->dv_dip
;
475 ASSERT(sbd_is_cmp_child(dip
) || e_ddi_branch_held(dip
));
477 cpuid
= sbdp_get_cpuid(hdp
, dip
);
479 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
481 "sbd:%s: failed to get cpuid for "
482 "dip (0x%p)", f
, (void *)dip
);
485 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
490 unit
= sbdp_get_unit_num(hdp
, dip
);
492 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
494 "sbd:%s: failed to get unit (cpu %d)",
498 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
503 PR_CPU("%s: attach cpu-unit (%d.%d)\n",
504 f
, sbp
->sb_num
, unit
);
506 dstate
= SBD_DEVICE_STATE(sbp
, SBD_COMP_CPU
, unit
);
508 if (dstate
== SBD_STATE_UNCONFIGURED
) {
510 * If we're coming from the UNCONFIGURED
511 * state then the cpu's sigblock will
512 * still be mapped in. Need to unmap it
513 * before continuing with attachment.
515 PR_CPU("%s: unmapping sigblk for cpu %d\n",
518 /* platform specific release of sigblk */
519 CPU_SGN_MAPOUT(cpuid
);
524 mutex_enter(&cpu_lock
);
526 sbd_release_sbdp_handle(hdp
);
532 sbd_post_attach_cpu(sbd_handle_t
*hp
, sbd_devlist_t
*devlist
, int devnum
)
535 sbderror_t
*ep
= SBD_HD2ERR(hp
);
536 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
540 int err
= ESBD_NOERROR
;
542 static fn_t f
= "sbd_post_attach_cpu";
543 sbd_cpu_unit_t
*cpup
;
546 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
548 /* Startup and online newly-attached CPUs */
549 for (i
= 0; i
< devnum
; i
++, devlist
++) {
550 dip
= devlist
->dv_dip
;
551 cpuid
= sbdp_get_cpuid(hdp
, dip
);
553 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
555 "sbd:%s: failed to get cpuid for "
556 "dip (0x%p)", f
, (void *)dip
);
559 SBD_GET_PERR(hdp
->h_err
, ep
);
567 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
569 "sbd:%s: cpu_get failed for cpu %d",
573 SBD_SET_ERR(ep
, ESBD_INTERNAL
);
574 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
579 if (cpu_is_poweredoff(cp
)) {
580 if (cpu_poweron(cp
) != 0) {
581 SBD_SET_ERR(ep
, ESBD_CPUSTART
);
582 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
584 "%s: failed to power-on cpu %d",
588 SBD_INJECT_ERR(SBD_POWERON_CPU_PSEUDO_ERR
,
592 PR_CPU("%s: cpu %d powered ON\n", f
, cpuid
);
595 if (cpu_is_offline(cp
)) {
596 PR_CPU("%s: onlining cpu %d...\n", f
, cpuid
);
598 if (cpu_online(cp
) != 0) {
599 SBD_SET_ERR(ep
, ESBD_ONLINE
);
600 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
602 "%s: failed to online cpu %d",
605 SBD_INJECT_ERR(SBD_ONLINE_CPU_PSEUDO_ERR
,
612 * if there is no error mark the cpu as OK to use
614 if (SBD_GET_ERR(ep
) == 0) {
615 unit
= sbdp_get_unit_num(hdp
, dip
);
617 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
619 "sbd:%s: failed to get unit "
620 "(cpu %d)", f
, cpuid
);
623 SBD_GET_PERR(hdp
->h_err
,
628 cpup
= SBD_GET_BOARD_CPUUNIT(sbp
, unit
);
629 cpup
->sbc_cm
.sbdev_cond
= SBD_COND_OK
;
633 mutex_exit(&cpu_lock
);
635 sbd_release_sbdp_handle(hdp
);
637 if (err
!= ESBD_NOERROR
) {
645 sbd_pre_detach_cpu(sbd_handle_t
*hp
, sbd_devlist_t
*devlist
, int devnum
)
652 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
653 sbderror_t
*ep
= SBD_HD2ERR(hp
);
654 static fn_t f
= "sbd_pre_detach_cpu";
658 PR_CPU("%s...\n", f
);
660 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
662 mutex_enter(&cpu_lock
);
664 for (i
= 0; i
< devnum
; i
++, devlist
++) {
665 dip
= devlist
->dv_dip
;
666 cpuid
= sbdp_get_cpuid(hdp
, dip
);
668 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
670 "sbd:%s: failed to get cpuid for "
671 "dip (0x%p)", f
, (void *)dip
);
674 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
679 cpu
= cpu_get(cpuid
);
682 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
684 "sbd:%s: failed to get cpu %d",
688 SBD_SET_ERR(ep
, ESBD_INTERNAL
);
689 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
694 unit
= sbdp_get_unit_num(hdp
, dip
);
696 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
698 "sbd:%s: failed to get unit (cpu %d)",
702 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
707 PR_CPU("%s: OS detach cpu-unit (%d.%d)\n",
708 f
, sbp
->sb_num
, unit
);
711 * CPUs were offlined during Release.
713 if (cpu_is_poweredoff(cpu
)) {
714 PR_CPU("%s: cpu %d already powered OFF\n", f
, cpuid
);
718 if (cpu_is_offline(cpu
)) {
721 if (e
= cpu_poweroff(cpu
)) {
723 "%s: failed to power-off cpu %d "
726 SBD_SET_ERR(ep
, ESBD_CPUSTOP
);
727 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
732 PR_CPU("%s: cpu %d powered OFF\n",
736 cmn_err(CE_WARN
, "%s: cpu %d still active",
738 SBD_SET_ERR(ep
, ESBD_BUSY
);
739 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[i
]);
745 sbd_release_sbdp_handle(hdp
);
751 sbd_post_detach_cpu(sbd_handle_t
*hp
, sbd_devlist_t
*devlist
, int devnum
)
753 static fn_t f
= "sbd_post_detach_cpu";
755 sbderror_t
*ep
= SBD_HD2ERR(hp
);
756 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
760 sbd_cpu_unit_t
*cpup
;
763 PR_CPU("%s...\n", f
);
766 * We should be holding the cpu_lock at this point,
767 * and should have blocked device tree changes.
769 ASSERT(MUTEX_HELD(&cpu_lock
));
771 for (i
= 0; i
< devnum
; i
++, devlist
++) {
772 dip
= devlist
->dv_dip
;
773 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
774 cpuid
= sbdp_get_cpuid(hdp
, dip
);
776 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
778 "sbd:%s: failed to get cpuid for "
779 "dip (0x%p)", f
, (void *)dip
);
782 SBD_GET_PERR(hdp
->h_err
, ep
);
787 * if there is no error mark the cpu as unusable
789 if (SBD_GET_ERR(ep
) == 0) {
790 unit
= sbdp_get_unit_num(hdp
, dip
);
792 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
) {
794 "sbd:%s: failed to get unit "
795 "(cpu %d)", f
, cpuid
);
798 SBD_GET_PERR(hdp
->h_err
,
803 cpup
= SBD_GET_BOARD_CPUUNIT(sbp
, unit
);
804 cpup
->sbc_cm
.sbdev_cond
= SBD_COND_UNUSABLE
;
806 sbd_release_sbdp_handle(hdp
);
809 mutex_exit(&cpu_lock
);
816 * Cancel previous release operation for cpu. For cpus this means simply
817 * bringing cpus that were offline back online. Note that they had to have been
818 * online at the time they were released. If attempting to power on or online
819 * a CPU fails, SBD_CPUERR_FATAL is returned to indicate that the CPU appears to
820 * be unsalvageable. If a CPU reaches an online or nointr state but can't be
821 * taken to a "lesser" state, SBD_CPUERR_RECOVERABLE is returned to indicate
822 * that it was not returned to its original state but appears to be functional.
823 * Note that the latter case can occur due to unexpected but non-erroneous CPU
824 * manipulation (e.g. by the "psradm" command) during the DR operation.
827 sbd_cancel_cpu(sbd_handle_t
*hp
, int unit
)
829 int rv
= SBD_CPUERR_NONE
;
830 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
831 sbderror_t
*ep
= SBD_HD2ERR(hp
);
833 static fn_t f
= "sbd_cancel_cpu";
835 int cpu_offline_flags
= 0;
837 PR_ALL("%s...\n", f
);
839 cp
= SBD_GET_BOARD_CPUUNIT(sbp
, unit
);
842 * If CPU should remain off, nothing needs to be done.
844 if (cpu_flagged_poweredoff(cp
->sbc_cpu_flags
))
847 if (hp
->h_flags
& SBD_IOCTL_FLAG_FORCE
)
848 cpu_offline_flags
= CPU_FORCED
;
851 * CPU had been either offline, online, or set to no-intr. We
852 * will return a component to its original state that it was
853 * prior to the failed DR operation. There is a possible race
854 * condition between the calls to this function and re-obtaining
855 * the cpu_lock where a cpu state could change. Because of this
856 * we can't externally document that we are trying to roll cpus
857 * back to their original state, but we believe a best effort
861 mutex_enter(&cpu_lock
);
862 cpup
= cpu
[cp
->sbc_cpu_id
];
865 * The following will compare the cpu's current state with a
866 * snapshot of its state taken before the failed DR operation
870 if (cpu_is_poweredoff(cpup
)) {
871 if (cpu_poweron(cpup
)) {
873 "sbd:%s: failed to power-on cpu %d",
875 SBD_SET_ERR(ep
, ESBD_CPUSTART
);
876 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[unit
]);
877 rv
= SBD_CPUERR_FATAL
;
880 SBD_INJECT_ERR(SBD_POWERON_CPU_PSEUDO_ERR
,
883 sbp
->sb_cpupath
[unit
]);
887 if (cpu_is_offline(cpup
)) {
888 if (cpu_flagged_offline(cp
->sbc_cpu_flags
)) {
889 PR_CPU("%s: leaving cpu %d OFFLINE\n",
891 } else if (cpu_online(cpup
)) {
893 "sbd:%s: failed to online cpu %d",
895 SBD_SET_ERR(ep
, ESBD_ONLINE
);
896 SBD_SET_ERRSTR(ep
, sbp
->sb_cpupath
[unit
]);
897 rv
= SBD_CPUERR_FATAL
;
900 SBD_INJECT_ERR(SBD_ONLINE_CPU_PSEUDO_ERR
,
903 sbp
->sb_cpupath
[unit
]);
908 if (cpu_is_online(cpup
)) {
909 if (cpu_flagged_online(cp
->sbc_cpu_flags
)) {
910 PR_CPU("%s: setting cpu %d ONLINE\n",
912 } else if (cpu_flagged_offline(cp
->sbc_cpu_flags
)) {
913 if (cpu_offline(cpup
, cpu_offline_flags
)) {
915 "sbd:%s: failed to offline"
916 " cpu %d", f
, cp
->sbc_cpu_id
);
917 rv
= SBD_CPUERR_RECOVERABLE
;
920 } else if (cpu_flagged_nointr(cp
->sbc_cpu_flags
)) {
921 if (cpu_intr_disable(cpup
)) {
922 cmn_err(CE_WARN
, "%s: failed to "
923 "disable interrupts on cpu %d",
925 rv
= SBD_CPUERR_RECOVERABLE
;
927 PR_CPU("%s: setting cpu %d to NOINTR"
936 if (cpu_is_nointr(cpup
)) {
937 if (cpu_flagged_online(cp
->sbc_cpu_flags
)) {
938 cpu_intr_enable(cpup
);
939 PR_CPU("%s: setting cpu %d ONLINE"
943 if (cpu_flagged_offline(cp
->sbc_cpu_flags
)) {
944 if (cpu_offline(cpup
, cpu_offline_flags
)) {
946 "sbd:%s: failed to offline"
947 " cpu %d", f
, cp
->sbc_cpu_id
);
948 rv
= SBD_CPUERR_RECOVERABLE
;
953 mutex_exit(&cpu_lock
);
959 sbd_connect_cpu(sbd_board_t
*sbp
, int unit
)
966 extern kmutex_t cpu_lock
;
967 static fn_t f
= "sbd_connect_cpu";
968 sbd_handle_t
*hp
= MACHBD2HD(sbp
);
971 * get dip for cpu just located in tree walk
973 if (SBD_DEV_IS_PRESENT(sbp
, SBD_COMP_CPU
, unit
)) {
974 dip
= sbp
->sb_devlist
[NIX(SBD_COMP_CPU
)][unit
];
977 "sbd:%s: bad dip for cpu unit %d board %d",
978 f
, unit
, sbp
->sb_num
);
981 PR_CPU("%s...\n", f
);
987 * if sbd has attached this cpu, no need to bring
990 if (SBD_DEV_IS_ATTACHED(sbp
, SBD_COMP_CPU
, unit
)) {
994 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
996 cpuid
= sbdp_get_cpuid(hdp
, dip
);
998 sbd_release_sbdp_handle(hdp
);
1003 * if the cpu is already under Solaris control,
1006 mutex_enter(&cpu_lock
);
1007 cpu
= cpu_get(cpuid
);
1008 mutex_exit(&cpu_lock
);
1010 sbd_release_sbdp_handle(hdp
);
1014 rv
= sbdp_connect_cpu(hdp
, dip
, cpuid
);
1017 sbp
->sb_memaccess_ok
= 0;
1019 "sbd:%s: failed to wake up cpu unit %d board %d",
1020 f
, unit
, sbp
->sb_num
);
1021 sbd_release_sbdp_handle(hdp
);
1024 sbd_release_sbdp_handle(hdp
);
1030 sbd_disconnect_cpu(sbd_handle_t
*hp
, int unit
)
1032 sbd_board_t
*sbp
= SBDH2BD(hp
->h_sbd
);
1037 processorid_t cpuid
;
1038 static fn_t f
= "sbd_disconnect_cpu";
1040 PR_CPU("%s...\n", f
);
1042 ASSERT((SBD_DEVICE_STATE(sbp
, SBD_COMP_CPU
, unit
) ==
1043 SBD_STATE_CONNECTED
) ||
1044 (SBD_DEVICE_STATE(sbp
, SBD_COMP_CPU
, unit
) ==
1045 SBD_STATE_UNCONFIGURED
));
1047 cp
= SBD_GET_BOARD_CPUUNIT(sbp
, unit
);
1049 cpuid
= cp
->sbc_cpu_id
;
1051 dip
= sbp
->sb_devlist
[NIX(SBD_COMP_CPU
)][unit
];
1053 hdp
= sbd_get_sbdp_handle(sbp
, hp
);
1055 rv
= sbdp_disconnect_cpu(hdp
, dip
, cpuid
);
1058 SBD_GET_PERR(hdp
->h_err
, SBD_HD2ERR(hp
));
1060 sbd_release_sbdp_handle(hdp
);