4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2018, Joyent, Inc.
29 * Public interface to routines implemented by CPU modules
32 #include <sys/types.h>
33 #include <sys/atomic.h>
34 #include <sys/x86_archext.h>
35 #include <sys/cpu_module_impl.h>
36 #include <sys/cpu_module_ms.h>
37 #include <sys/fm/util.h>
38 #include <sys/reboot.h>
39 #include <sys/modctl.h>
40 #include <sys/param.h>
41 #include <sys/cmn_err.h>
42 #include <sys/systm.h>
43 #include <sys/fm/protocol.h>
45 #include <sys/ontrap.h>
47 #include <sys/privregs.h>
48 #include <sys/machsystm.h>
51 * Set to force cmi_init to fail.
56 * Set to avoid MCA initialization.
58 int cmi_no_mca_init
= 0;
61 * If cleared for debugging we will not attempt to load a model-specific
62 * cpu module but will load the generic cpu module instead.
64 int cmi_force_generic
= 0;
67 * If cleared for debugging, we will suppress panicking on fatal hardware
68 * errors. This should *only* be used for debugging; it use can and will
69 * cause data corruption if actual hardware errors are detected by the system.
71 int cmi_panic_on_uncorrectable_error
= 1;
74 * Set to indicate whether we are able to enable cmci interrupt.
76 int cmi_enable_cmci
= 0;
79 * Subdirectory (relative to the module search path) in which we will
80 * look for cpu modules.
82 #define CPUMOD_SUBDIR "cpu"
85 * CPU modules have a filenames such as "cpu.AuthenticAMD.15" and
86 * "cpu.generic" - the "cpu" prefix is specified by the following.
88 #define CPUMOD_PREFIX "cpu"
91 * Structure used to keep track of cpu modules we have loaded and their ops
96 const cmi_ops_t
*cmi_ops
;
97 struct modctl
*cmi_modp
;
101 static cmi_t
*cmi_list
;
102 static const cmi_mc_ops_t
*cmi_mc_global_ops
;
103 static void *cmi_mc_global_data
;
104 static kmutex_t cmi_load_lock
;
107 * Functions we need from cmi_hw.c that are not part of the cpu_module.h
110 extern cmi_hdl_t
cmi_hdl_create(enum cmi_hdl_class
, uint_t
, uint_t
, uint_t
);
111 extern void cmi_hdl_destroy(cmi_hdl_t ophdl
);
112 extern void cmi_hdl_setcmi(cmi_hdl_t
, void *, void *);
113 extern void *cmi_hdl_getcmi(cmi_hdl_t
);
114 extern void cmi_hdl_setmc(cmi_hdl_t
, const struct cmi_mc_ops
*, void *);
115 extern void cmi_hdl_inj_begin(cmi_hdl_t
);
116 extern void cmi_hdl_inj_end(cmi_hdl_t
);
117 extern void cmi_read_smbios(cmi_hdl_t
);
119 #define HDL2CMI(hdl) cmi_hdl_getcmi(hdl)
121 #define CMI_OPS(cmi) (cmi)->cmi_ops
122 #define CMI_OP_PRESENT(cmi, op) ((cmi) && CMI_OPS(cmi)->op != NULL)
124 #define CMI_MATCH_VENDOR 0 /* Just match on vendor */
125 #define CMI_MATCH_FAMILY 1 /* Match down to family */
126 #define CMI_MATCH_MODEL 2 /* Match down to model */
127 #define CMI_MATCH_STEPPING 3 /* Match down to stepping */
132 ASSERT(MUTEX_HELD(&cmi_load_lock
));
134 cmi
->cmi_prev
= NULL
;
135 cmi
->cmi_next
= cmi_list
;
136 if (cmi_list
!= NULL
)
137 cmi_list
->cmi_prev
= cmi
;
142 cmi_unlink(cmi_t
*cmi
)
144 ASSERT(MUTEX_HELD(&cmi_load_lock
));
145 ASSERT(cmi
->cmi_refcnt
== 0);
147 if (cmi
->cmi_prev
!= NULL
)
148 cmi
->cmi_prev
= cmi
->cmi_next
;
150 if (cmi
->cmi_next
!= NULL
)
151 cmi
->cmi_next
->cmi_prev
= cmi
->cmi_prev
;
154 cmi_list
= cmi
->cmi_next
;
158 * Hold the module in memory. We call to CPU modules without using the
159 * stubs mechanism, so these modules must be manually held in memory.
160 * The mod_ref acts as if another loaded module has a dependency on us.
165 ASSERT(MUTEX_HELD(&cmi_load_lock
));
167 mutex_enter(&mod_lock
);
168 cmi
->cmi_modp
->mod_ref
++;
169 mutex_exit(&mod_lock
);
176 ASSERT(MUTEX_HELD(&cmi_load_lock
));
178 mutex_enter(&mod_lock
);
179 cmi
->cmi_modp
->mod_ref
--;
180 mutex_exit(&mod_lock
);
182 if (--cmi
->cmi_refcnt
== 0) {
184 kmem_free(cmi
, sizeof (cmi_t
));
189 cmi_getops(modctl_t
*modp
)
193 if ((ops
= (cmi_ops_t
*)modlookup_by_modctl(modp
, "_cmi_ops")) ==
195 cmn_err(CE_WARN
, "cpu module '%s' is invalid: no _cmi_ops "
196 "found", modp
->mod_modname
);
200 if (ops
->cmi_init
== NULL
) {
201 cmn_err(CE_WARN
, "cpu module '%s' is invalid: no cmi_init "
202 "entry point", modp
->mod_modname
);
210 cmi_load_modctl(modctl_t
*modp
)
215 cmi_api_ver_t apiver
;
217 ASSERT(MUTEX_HELD(&cmi_load_lock
));
219 for (cmi
= cmi_list
; cmi
!= NULL
; cmi
= cmi
->cmi_next
) {
220 if (cmi
->cmi_modp
== modp
)
224 if ((ver
= modlookup_by_modctl(modp
, "_cmi_api_version"))
225 == (uintptr_t)NULL
) {
227 * Apparently a cpu module before versioning was introduced -
228 * we call this version 0.
230 apiver
= CMI_API_VERSION_0
;
232 apiver
= *((cmi_api_ver_t
*)ver
);
233 if (!CMI_API_VERSION_CHKMAGIC(apiver
)) {
234 cmn_err(CE_WARN
, "cpu module '%s' is invalid: "
235 "_cmi_api_version 0x%x has bad magic",
236 modp
->mod_modname
, apiver
);
241 if (apiver
!= CMI_API_VERSION
) {
242 cmn_err(CE_WARN
, "cpu module '%s' has API version %d, "
243 "kernel requires API version %d", modp
->mod_modname
,
244 CMI_API_VERSION_TOPRINT(apiver
),
245 CMI_API_VERSION_TOPRINT(CMI_API_VERSION
));
249 if ((ops
= cmi_getops(modp
)) == NULL
)
252 cmi
= kmem_zalloc(sizeof (*cmi
), KM_SLEEP
);
254 cmi
->cmi_modp
= modp
;
262 cmi_cpu_match(cmi_hdl_t hdl1
, cmi_hdl_t hdl2
, int match
)
264 if (match
>= CMI_MATCH_VENDOR
&&
265 cmi_hdl_vendor(hdl1
) != cmi_hdl_vendor(hdl2
))
268 if (match
>= CMI_MATCH_FAMILY
&&
269 cmi_hdl_family(hdl1
) != cmi_hdl_family(hdl2
))
272 if (match
>= CMI_MATCH_MODEL
&&
273 cmi_hdl_model(hdl1
) != cmi_hdl_model(hdl2
))
276 if (match
>= CMI_MATCH_STEPPING
&&
277 cmi_hdl_stepping(hdl1
) != cmi_hdl_stepping(hdl2
))
284 cmi_search_list_cb(cmi_hdl_t whdl
, void *arg1
, void *arg2
, void *arg3
)
286 cmi_hdl_t thdl
= (cmi_hdl_t
)arg1
;
287 int match
= *((int *)arg2
);
288 cmi_hdl_t
*rsltp
= (cmi_hdl_t
*)arg3
;
290 if (cmi_cpu_match(thdl
, whdl
, match
)) {
291 cmi_hdl_hold(whdl
); /* short-term hold */
293 return (CMI_HDL_WALK_DONE
);
295 return (CMI_HDL_WALK_NEXT
);
300 cmi_search_list(cmi_hdl_t hdl
, int match
)
302 cmi_hdl_t dhdl
= NULL
;
305 ASSERT(MUTEX_HELD(&cmi_load_lock
));
307 cmi_hdl_walk(cmi_search_list_cb
, (void *)hdl
, (void *)&match
, &dhdl
);
310 cmi_hdl_rele(dhdl
); /* held in cmi_search_list_cb */
317 cmi_load_module(cmi_hdl_t hdl
, int match
, int *chosenp
)
324 ASSERT(MUTEX_HELD(&cmi_load_lock
));
325 ASSERT(match
== CMI_MATCH_STEPPING
|| match
== CMI_MATCH_MODEL
||
326 match
== CMI_MATCH_FAMILY
|| match
== CMI_MATCH_VENDOR
);
329 * Have we already loaded a module for a cpu with the same
330 * vendor/family/model/stepping?
332 if ((cmi
= cmi_search_list(hdl
, match
)) != NULL
) {
337 s
[0] = cmi_hdl_family(hdl
);
338 s
[1] = cmi_hdl_model(hdl
);
339 s
[2] = cmi_hdl_stepping(hdl
);
340 modid
= modload_qualified(CPUMOD_SUBDIR
, CPUMOD_PREFIX
,
341 cmi_hdl_vendorstr(hdl
), ".", s
, match
, chosenp
);
346 modp
= mod_hold_by_id(modid
);
347 cmi
= cmi_load_modctl(modp
);
350 mod_release_mod(modp
);
356 * Try to load a cpu module with specific support for this chip type.
359 cmi_load_specific(cmi_hdl_t hdl
, void **datap
)
365 ASSERT(MUTEX_HELD(&cmi_load_lock
));
367 for (i
= CMI_MATCH_STEPPING
; i
>= CMI_MATCH_VENDOR
; i
--) {
370 if ((cmi
= cmi_load_module(hdl
, i
, &suffixlevel
)) == NULL
)
374 * A module has loaded and has a _cmi_ops structure, and the
375 * module has been held for this instance. Call its cmi_init
376 * entry point - we expect success (0) or ENOTSUP.
378 if ((err
= cmi
->cmi_ops
->cmi_init(hdl
, datap
)) == 0) {
379 if (boothowto
& RB_VERBOSE
) {
380 printf("initialized cpu module '%s' on "
381 "chip %d core %d strand %d\n",
382 cmi
->cmi_modp
->mod_modname
,
383 cmi_hdl_chipid(hdl
), cmi_hdl_coreid(hdl
),
384 cmi_hdl_strandid(hdl
));
387 } else if (err
!= ENOTSUP
) {
388 cmn_err(CE_WARN
, "failed to init cpu module '%s' on "
389 "chip %d core %d strand %d: err=%d\n",
390 cmi
->cmi_modp
->mod_modname
,
391 cmi_hdl_chipid(hdl
), cmi_hdl_coreid(hdl
),
392 cmi_hdl_strandid(hdl
), err
);
396 * The module failed or declined to init, so release
397 * it and update i to be equal to the number
398 * of suffices actually used in the last module path.
408 * Load the generic IA32 MCA cpu module, which may still supplement
409 * itself with model-specific support through cpu model-specific modules.
412 cmi_load_generic(cmi_hdl_t hdl
, void **datap
)
419 ASSERT(MUTEX_HELD(&cmi_load_lock
));
421 if ((modid
= modload(CPUMOD_SUBDIR
, CPUMOD_PREFIX
".generic")) == -1)
424 modp
= mod_hold_by_id(modid
);
425 cmi
= cmi_load_modctl(modp
);
428 mod_release_mod(modp
);
433 if ((err
= cmi
->cmi_ops
->cmi_init(hdl
, datap
)) != 0) {
435 cmn_err(CE_WARN
, CPUMOD_PREFIX
".generic failed to "
436 "init: err=%d", err
);
445 cmi_init(enum cmi_hdl_class
class, uint_t chipid
, uint_t coreid
,
457 mutex_enter(&cmi_load_lock
);
459 if ((hdl
= cmi_hdl_create(class, chipid
, coreid
, strandid
)) == NULL
) {
460 mutex_exit(&cmi_load_lock
);
461 cmn_err(CE_WARN
, "There will be no MCA support on chip %d "
462 "core %d strand %d (cmi_hdl_create returned NULL)\n",
463 chipid
, coreid
, strandid
);
467 if (!cmi_force_generic
)
468 cmi
= cmi_load_specific(hdl
, &data
);
470 if (cmi
== NULL
&& (cmi
= cmi_load_generic(hdl
, &data
)) == NULL
) {
471 cmn_err(CE_WARN
, "There will be no MCA support on chip %d "
472 "core %d strand %d\n", chipid
, coreid
, strandid
);
474 mutex_exit(&cmi_load_lock
);
478 cmi_hdl_setcmi(hdl
, cmi
, data
);
482 cmi_read_smbios(hdl
);
484 mutex_exit(&cmi_load_lock
);
490 * cmi_fini is called on DR deconfigure of a cpu resource.
491 * It should not be called at simple offline of a cpu.
494 cmi_fini(cmi_hdl_t hdl
)
496 cmi_t
*cmi
= HDL2CMI(hdl
);
498 if (cms_present(hdl
))
501 if (CMI_OP_PRESENT(cmi
, cmi_fini
))
502 CMI_OPS(cmi
)->cmi_fini(hdl
);
504 cmi_hdl_destroy(hdl
);
508 * cmi_post_startup is called from post_startup for the boot cpu only (no
509 * other cpus are started yet).
512 cmi_post_startup(void)
517 if (cmi_no_mca_init
!= 0 ||
518 (hdl
= cmi_hdl_any()) == NULL
) /* short-term hold */
523 if (CMI_OP_PRESENT(cmi
, cmi_post_startup
))
524 CMI_OPS(cmi
)->cmi_post_startup(hdl
);
530 * Called just once from start_other_cpus when all processors are started.
531 * This will not be called for each cpu, so the registered op must not
532 * assume it is called as such. We are not necessarily executing on
536 cmi_post_mpstartup(void)
541 if (cmi_no_mca_init
!= 0 ||
542 (hdl
= cmi_hdl_any()) == NULL
) /* short-term hold */
547 if (CMI_OP_PRESENT(cmi
, cmi_post_mpstartup
))
548 CMI_OPS(cmi
)->cmi_post_mpstartup(hdl
);
554 cmi_faulted_enter(cmi_hdl_t hdl
)
556 cmi_t
*cmi
= HDL2CMI(hdl
);
558 if (cmi_no_mca_init
!= 0)
561 if (CMI_OP_PRESENT(cmi
, cmi_faulted_enter
))
562 CMI_OPS(cmi
)->cmi_faulted_enter(hdl
);
566 cmi_faulted_exit(cmi_hdl_t hdl
)
568 cmi_t
*cmi
= HDL2CMI(hdl
);
570 if (cmi_no_mca_init
!= 0)
573 if (CMI_OP_PRESENT(cmi
, cmi_faulted_exit
))
574 CMI_OPS(cmi
)->cmi_faulted_exit(hdl
);
578 cmi_mca_init(cmi_hdl_t hdl
)
582 if (cmi_no_mca_init
!= 0)
587 if (CMI_OP_PRESENT(cmi
, cmi_mca_init
))
588 CMI_OPS(cmi
)->cmi_mca_init(hdl
);
591 #define CMI_RESPONSE_PANIC 0x0 /* panic must have value 0 */
592 #define CMI_RESPONSE_NONE 0x1
593 #define CMI_RESPONSE_CKILL 0x2
594 #define CMI_RESPONSE_REBOOT 0x3 /* not implemented */
595 #define CMI_RESPONSE_ONTRAP_PROT 0x4
596 #define CMI_RESPONSE_LOFAULT_PROT 0x5
599 * Return 0 if we will panic in response to this machine check, otherwise
600 * non-zero. If the caller is cmi_mca_trap in this file then the nonzero
601 * return values are to be interpreted from CMI_RESPONSE_* above.
603 * This function must just return what will be done without actually
604 * doing anything; this includes not changing the regs.
607 cmi_mce_response(struct regs
*rp
, uint64_t disp
)
609 int panicrsp
= cmi_panic_on_uncorrectable_error
? CMI_RESPONSE_PANIC
:
613 ASSERT(rp
!= NULL
); /* don't call for polling, only on #MC */
616 * If no bits are set in the disposition then there is nothing to
617 * worry about and we do not need to trampoline to ontrap or
621 return (CMI_RESPONSE_NONE
);
624 * Unconstrained errors cannot be forgiven, even by ontrap or
625 * lofault protection. The data is not poisoned and may not
626 * even belong to the trapped context - eg a writeback of
627 * data that is found to be bad.
629 if (disp
& CMI_ERRDISP_UC_UNCONSTRAINED
)
633 * ontrap OT_DATA_EC and lofault protection forgive any disposition
634 * other than unconstrained, even those normally forced fatal.
636 if ((otp
= curthread
->t_ontrap
) != NULL
&& otp
->ot_prot
& OT_DATA_EC
)
637 return (CMI_RESPONSE_ONTRAP_PROT
);
638 else if (curthread
->t_lofault
)
639 return (CMI_RESPONSE_LOFAULT_PROT
);
642 * Forced-fatal errors are terminal even in user mode.
644 if (disp
& CMI_ERRDISP_FORCEFATAL
)
648 * If the trapped context is corrupt or we have no instruction pointer
649 * to resume at (and aren't trampolining to a fault handler)
650 * then in the kernel case we must panic and in usermode we
651 * kill the affected contract.
653 if (disp
& (CMI_ERRDISP_CURCTXBAD
| CMI_ERRDISP_RIPV_INVALID
))
654 return (USERMODE(rp
->r_cs
) ? CMI_RESPONSE_CKILL
: panicrsp
);
657 * Anything else is harmless
659 return (CMI_RESPONSE_NONE
);
662 int cma_mca_trap_panic_suppressed
= 0;
667 if (cmi_panic_on_uncorrectable_error
) {
668 fm_panic("Unrecoverable Machine-Check Exception");
670 cmn_err(CE_WARN
, "suppressing panic from fatal #mc");
671 cma_mca_trap_panic_suppressed
++;
676 int cma_mca_trap_contract_kills
= 0;
677 int cma_mca_trap_ontrap_forgiven
= 0;
678 int cma_mca_trap_lofault_forgiven
= 0;
681 * Native #MC handler - we branch to here from mcetrap
685 cmi_mca_trap(struct regs
*rp
)
687 cmi_hdl_t hdl
= NULL
;
692 if (cmi_no_mca_init
!= 0)
696 * This function can call cmn_err, and the cpu module cmi_mca_trap
697 * entry point may also elect to call cmn_err (e.g., if it can't
698 * log the error onto an errorq, say very early in boot).
699 * We need to let cprintf know that we must not block.
703 if ((hdl
= cmi_hdl_lookup(CMI_HDL_NATIVE
, cmi_ntv_hwchipid(CPU
),
704 cmi_ntv_hwcoreid(CPU
), cmi_ntv_hwstrandid(CPU
))) == NULL
||
705 (cmi
= HDL2CMI(hdl
)) == NULL
||
706 !CMI_OP_PRESENT(cmi
, cmi_mca_trap
)) {
708 cmn_err(CE_WARN
, "#MC exception on cpuid %d: %s",
710 hdl
? "handle lookup ok but no #MC handler found" :
711 "handle lookup failed");
720 disp
= CMI_OPS(cmi
)->cmi_mca_trap(hdl
, rp
);
722 switch (cmi_mce_response(rp
, disp
)) {
724 cmn_err(CE_WARN
, "Invalid response from cmi_mce_response");
727 case CMI_RESPONSE_PANIC
:
731 case CMI_RESPONSE_NONE
:
734 case CMI_RESPONSE_CKILL
:
735 ttolwp(curthread
)->lwp_pcb
.pcb_flags
|= ASYNC_HWERR
;
737 cma_mca_trap_contract_kills
++;
740 case CMI_RESPONSE_ONTRAP_PROT
: {
741 on_trap_data_t
*otp
= curthread
->t_ontrap
;
742 otp
->ot_trap
= OT_DATA_EC
;
743 rp
->r_pc
= otp
->ot_trampoline
;
744 cma_mca_trap_ontrap_forgiven
++;
748 case CMI_RESPONSE_LOFAULT_PROT
:
750 rp
->r_pc
= curthread
->t_lofault
;
751 cma_mca_trap_lofault_forgiven
++;
760 cmi_hdl_poke(cmi_hdl_t hdl
)
762 cmi_t
*cmi
= HDL2CMI(hdl
);
764 if (!CMI_OP_PRESENT(cmi
, cmi_hdl_poke
))
767 CMI_OPS(cmi
)->cmi_hdl_poke(hdl
);
773 cmi_hdl_t hdl
= NULL
;
776 if (cmi_no_mca_init
!= 0)
779 if ((hdl
= cmi_hdl_lookup(CMI_HDL_NATIVE
, cmi_ntv_hwchipid(CPU
),
780 cmi_ntv_hwcoreid(CPU
), cmi_ntv_hwstrandid(CPU
))) == NULL
||
781 (cmi
= HDL2CMI(hdl
)) == NULL
||
782 !CMI_OP_PRESENT(cmi
, cmi_cmci_trap
)) {
784 cmn_err(CE_WARN
, "CMCI interrupt on cpuid %d: %s",
786 hdl
? "handle lookup ok but no CMCI handler found" :
787 "handle lookup failed");
795 CMI_OPS(cmi
)->cmi_cmci_trap(hdl
);
801 cmi_mc_register(cmi_hdl_t hdl
, const cmi_mc_ops_t
*mcops
, void *mcdata
)
803 if (!cmi_no_mca_init
)
804 cmi_hdl_setmc(hdl
, mcops
, mcdata
);
808 cmi_mc_register_global(const cmi_mc_ops_t
*mcops
, void *mcdata
)
810 if (!cmi_no_mca_init
) {
811 if (cmi_mc_global_ops
!= NULL
|| cmi_mc_global_data
!= NULL
||
812 mcops
== NULL
|| mcops
->cmi_mc_patounum
== NULL
||
813 mcops
->cmi_mc_unumtopa
== NULL
) {
814 return (CMIERR_UNKNOWN
);
816 cmi_mc_global_data
= mcdata
;
817 cmi_mc_global_ops
= mcops
;
819 return (CMI_SUCCESS
);
823 cmi_mc_sw_memscrub_disable(void)
829 cmi_mc_patounum(uint64_t pa
, uint8_t valid_hi
, uint8_t valid_lo
, uint32_t synd
,
830 int syndtype
, mc_unum_t
*up
)
832 const struct cmi_mc_ops
*mcops
;
837 return (CMIERR_MC_ABSENT
);
839 if (cmi_mc_global_ops
!= NULL
) {
840 if (cmi_mc_global_ops
->cmi_mc_patounum
== NULL
)
841 return (CMIERR_MC_NOTSUP
);
842 return (cmi_mc_global_ops
->cmi_mc_patounum(cmi_mc_global_data
,
843 pa
, valid_hi
, valid_lo
, synd
, syndtype
, up
));
846 if ((hdl
= cmi_hdl_any()) == NULL
) /* short-term hold */
847 return (CMIERR_MC_ABSENT
);
849 if ((mcops
= cmi_hdl_getmcops(hdl
)) == NULL
||
850 mcops
->cmi_mc_patounum
== NULL
) {
852 return (CMIERR_MC_NOTSUP
);
855 rv
= mcops
->cmi_mc_patounum(cmi_hdl_getmcdata(hdl
), pa
, valid_hi
,
856 valid_lo
, synd
, syndtype
, up
);
864 cmi_mc_unumtopa(mc_unum_t
*up
, nvlist_t
*nvl
, uint64_t *pap
)
866 const struct cmi_mc_ops
*mcops
;
871 if (up
!= NULL
&& nvl
!= NULL
)
872 return (CMIERR_API
); /* convert from just one form */
875 return (CMIERR_MC_ABSENT
);
877 if (cmi_mc_global_ops
!= NULL
) {
878 if (cmi_mc_global_ops
->cmi_mc_unumtopa
== NULL
)
879 return (CMIERR_MC_NOTSUP
);
880 return (cmi_mc_global_ops
->cmi_mc_unumtopa(cmi_mc_global_data
,
884 if ((hdl
= cmi_hdl_any()) == NULL
) /* short-term hold */
885 return (CMIERR_MC_ABSENT
);
887 if ((mcops
= cmi_hdl_getmcops(hdl
)) == NULL
||
888 mcops
->cmi_mc_unumtopa
== NULL
) {
891 if (nvl
!= NULL
&& nvlist_lookup_nvlist(nvl
,
892 FM_FMRI_HC_SPECIFIC
, &hcsp
) == 0 &&
893 (nvlist_lookup_uint64(hcsp
,
894 "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR
, pap
) == 0 ||
895 nvlist_lookup_uint64(hcsp
, FM_FMRI_HC_SPECIFIC_PHYSADDR
,
897 return (CMIERR_MC_PARTIALUNUMTOPA
);
899 return (mcops
&& mcops
->cmi_mc_unumtopa
== NULL
?
900 CMIERR_MC_NOTSUP
: CMIERR_MC_ABSENT
);
904 rv
= mcops
->cmi_mc_unumtopa(cmi_hdl_getmcdata(hdl
), up
, nvl
, pap
);
912 cmi_mc_logout(cmi_hdl_t hdl
, boolean_t ismc
, boolean_t sync
)
914 const struct cmi_mc_ops
*mcops
;
919 if (cmi_mc_global_ops
!= NULL
)
920 mcops
= cmi_mc_global_ops
;
922 mcops
= cmi_hdl_getmcops(hdl
);
924 if (mcops
!= NULL
&& mcops
->cmi_mc_logout
!= NULL
)
925 mcops
->cmi_mc_logout(hdl
, ismc
, sync
);
929 cmi_hdl_msrinject(cmi_hdl_t hdl
, cmi_mca_regs_t
*regs
, uint_t nregs
,
932 cmi_t
*cmi
= cmi_hdl_getcmi(hdl
);
935 if (!CMI_OP_PRESENT(cmi
, cmi_msrinject
))
936 return (CMIERR_NOTSUP
);
938 cmi_hdl_inj_begin(hdl
);
939 rc
= CMI_OPS(cmi
)->cmi_msrinject(hdl
, regs
, nregs
, force
);
940 cmi_hdl_inj_end(hdl
);
946 cmi_panic_on_ue(void)
948 return (cmi_panic_on_uncorrectable_error
? B_TRUE
: B_FALSE
);
952 cmi_panic_callback(void)
957 if (cmi_no_mca_init
|| (hdl
= cmi_hdl_any()) == NULL
)
960 cmi
= cmi_hdl_getcmi(hdl
);
961 if (CMI_OP_PRESENT(cmi
, cmi_panic_callback
))
962 CMI_OPS(cmi
)->cmi_panic_callback();
969 cmi_hdl_chipident(cmi_hdl_t hdl
)
971 cmi_t
*cmi
= cmi_hdl_getcmi(hdl
);
973 if (!CMI_OP_PRESENT(cmi
, cmi_ident
))
976 return (CMI_OPS(cmi
)->cmi_ident(hdl
));