5955 pat_sync is clever enough to check for X86FSET_PAT
[illumos-gate.git] / usr / src / uts / i86pc / os / cmi_hw.c
blobbe119e70461e368aab77f1f07bccbca2aefaa5e6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
31 * CPU Module Interface - hardware abstraction.
34 #ifdef __xpv
35 #include <sys/xpv_user.h>
36 #endif
38 #include <sys/types.h>
39 #include <sys/cpu_module.h>
40 #include <sys/kmem.h>
41 #include <sys/x86_archext.h>
42 #include <sys/cpuvar.h>
43 #include <sys/ksynch.h>
44 #include <sys/x_call.h>
45 #include <sys/pghw.h>
46 #include <sys/pci_cfgacc.h>
47 #include <sys/pci_cfgspace.h>
48 #include <sys/archsystm.h>
49 #include <sys/ontrap.h>
50 #include <sys/controlregs.h>
51 #include <sys/sunddi.h>
52 #include <sys/trap.h>
53 #include <sys/mca_x86.h>
54 #include <sys/processor.h>
55 #include <sys/cmn_err.h>
56 #include <sys/nvpair.h>
57 #include <sys/fm/util.h>
58 #include <sys/fm/protocol.h>
59 #include <sys/fm/smb/fmsmb.h>
60 #include <sys/cpu_module_impl.h>
63 * Variable which determines if the SMBIOS supports x86 generic topology; or
64 * if legacy topolgy enumeration will occur.
66 extern int x86gentopo_legacy;
69 * Outside of this file consumers use the opaque cmi_hdl_t. This
70 * definition is duplicated in the generic_cpu mdb module, so keep
71 * them in-sync when making changes.
73 typedef struct cmi_hdl_impl {
74 enum cmi_hdl_class cmih_class; /* Handle nature */
75 const struct cmi_hdl_ops *cmih_ops; /* Operations vector */
76 uint_t cmih_chipid; /* Chipid of cpu resource */
77 uint_t cmih_procnodeid; /* Nodeid of cpu resource */
78 uint_t cmih_coreid; /* Core within die */
79 uint_t cmih_strandid; /* Thread within core */
80 uint_t cmih_procnodes_per_pkg; /* Nodes in a processor */
81 boolean_t cmih_mstrand; /* cores are multithreaded */
82 volatile uint32_t *cmih_refcntp; /* Reference count pointer */
83 uint64_t cmih_msrsrc; /* MSR data source flags */
84 void *cmih_hdlpriv; /* cmi_hw.c private data */
85 void *cmih_spec; /* cmi_hdl_{set,get}_specific */
86 void *cmih_cmi; /* cpu mod control structure */
87 void *cmih_cmidata; /* cpu mod private data */
88 const struct cmi_mc_ops *cmih_mcops; /* Memory-controller ops */
89 void *cmih_mcdata; /* Memory-controller data */
90 uint64_t cmih_flags; /* See CMIH_F_* below */
91 uint16_t cmih_smbiosid; /* SMBIOS Type 4 struct ID */
92 uint_t cmih_smb_chipid; /* SMBIOS factored chipid */
93 nvlist_t *cmih_smb_bboard; /* SMBIOS bboard nvlist */
94 } cmi_hdl_impl_t;
96 #define IMPLHDL(ophdl) ((cmi_hdl_impl_t *)ophdl)
97 #define HDLOPS(hdl) ((hdl)->cmih_ops)
99 #define CMIH_F_INJACTV 0x1ULL
100 #define CMIH_F_DEAD 0x2ULL
103 * Ops structure for handle operations.
105 struct cmi_hdl_ops {
107 * These ops are required in an implementation.
109 uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
110 const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
111 uint_t (*cmio_family)(cmi_hdl_impl_t *);
112 uint_t (*cmio_model)(cmi_hdl_impl_t *);
113 uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
114 uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
115 uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
116 uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
117 uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
118 uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
119 uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
120 uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
121 const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
122 uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
123 const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
125 id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
127 * These ops are optional in an implementation.
129 ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
130 void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
131 cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
132 cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
133 cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
134 void (*cmio_int)(cmi_hdl_impl_t *, int);
135 int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
136 uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
137 uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
138 nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
141 static const struct cmi_hdl_ops cmi_hdl_ops;
144 * Handles are looked up from contexts such as polling, injection etc
145 * where the context is reasonably well defined (although a poller could
146 * interrupt any old thread holding any old lock). They are also looked
147 * up by machine check handlers, which may strike at inconvenient times
148 * such as during handle initialization or destruction or during handle
149 * lookup (which the #MC handler itself will also have to perform).
151 * So keeping handles in a linked list makes locking difficult when we
152 * consider #MC handlers. Our solution is to have a look-up table indexed
153 * by that which uniquely identifies a handle - chip/core/strand id -
154 * with each entry a structure including a pointer to a handle
155 * structure for the resource, and a reference count for the handle.
156 * Reference counts are modified atomically. The public cmi_hdl_hold
157 * always succeeds because this can only be used after handle creation
158 * and before the call to destruct, so the hold count is already at least one.
159 * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
160 * we must be certain that the count has not already decrmented to zero
161 * before applying our hold.
163 * The table is an array of maximum number of chips defined in
164 * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
165 * entry is NULL. Each entry is a pointer to another array which contains a
166 * list of all strands of the chip. This first level table is allocated when
167 * first we want to populate an entry. The size of the latter (per chip) table
168 * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
170 * Ideally we should only allocate to the actual number of chips, cores per
171 * chip and strand per core. The number of chips is not available until all
172 * of them are passed. The number of cores and strands are partially available.
173 * For now we stick with the above approach.
175 #define CMI_MAX_CHIPID_NBITS 6 /* max chipid of 63 */
176 #define CMI_MAX_CORES_PER_CHIP_NBITS 4 /* 16 cores per chip max */
177 #define CMI_MAX_STRANDS_PER_CORE_NBITS 3 /* 8 strands per core max */
179 #define CMI_MAX_CHIPID ((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
180 #define CMI_MAX_CORES_PER_CHIP(cbits) (1 << (cbits))
181 #define CMI_MAX_COREID(cbits) ((1 << (cbits)) - 1)
182 #define CMI_MAX_STRANDS_PER_CORE(sbits) (1 << (sbits))
183 #define CMI_MAX_STRANDID(sbits) ((1 << (sbits)) - 1)
184 #define CMI_MAX_STRANDS_PER_CHIP(cbits, sbits) \
185 (CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits))
187 #define CMI_CHIPID_ARR_SZ (1 << CMI_MAX_CHIPID_NBITS)
189 typedef struct cmi_hdl_ent {
190 volatile uint32_t cmae_refcnt;
191 cmi_hdl_impl_t *cmae_hdlp;
192 } cmi_hdl_ent_t;
194 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
197 * Default values for the number of core and strand bits.
199 uint_t cmi_core_nbits = CMI_MAX_CORES_PER_CHIP_NBITS;
200 uint_t cmi_strand_nbits = CMI_MAX_STRANDS_PER_CORE_NBITS;
201 static int cmi_ext_topo_check = 0;
204 * Controls where we will source PCI config space data.
206 #define CMI_PCICFG_FLAG_RD_HWOK 0x0001
207 #define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002
208 #define CMI_PCICFG_FLAG_WR_HWOK 0x0004
209 #define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008
211 static uint64_t cmi_pcicfg_flags =
212 CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
213 CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
216 * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
218 #define CMI_MSR_FLAG_RD_HWOK 0x0001
219 #define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002
220 #define CMI_MSR_FLAG_WR_HWOK 0x0004
221 #define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008
223 int cmi_call_func_ntv_tries = 3;
225 static cmi_errno_t
226 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
228 cmi_errno_t rc = -1;
229 int i;
231 kpreempt_disable();
233 if (CPU->cpu_id == cpuid) {
234 (*func)(arg1, arg2, (xc_arg_t)&rc);
235 } else {
237 * This should not happen for a #MC trap or a poll, so
238 * this is likely an error injection or similar.
239 * We will try to cross call with xc_trycall - we
240 * can't guarantee success with xc_call because
241 * the interrupt code in the case of a #MC may
242 * already hold the xc mutex.
244 for (i = 0; i < cmi_call_func_ntv_tries; i++) {
245 cpuset_t cpus;
247 CPUSET_ONLY(cpus, cpuid);
248 xc_priority(arg1, arg2, (xc_arg_t)&rc,
249 CPUSET2BV(cpus), func);
250 if (rc != -1)
251 break;
253 DELAY(1);
257 kpreempt_enable();
259 return (rc != -1 ? rc : CMIERR_DEADLOCK);
262 static uint64_t injcnt;
264 void
265 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
267 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
269 if (hdl != NULL)
270 hdl->cmih_flags |= CMIH_F_INJACTV;
271 if (injcnt++ == 0) {
272 cmn_err(CE_NOTE, "Hardware error injection/simulation "
273 "activity noted");
277 void
278 cmi_hdl_inj_end(cmi_hdl_t ophdl)
280 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
282 ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
283 if (hdl != NULL)
284 hdl->cmih_flags &= ~CMIH_F_INJACTV;
287 boolean_t
288 cmi_inj_tainted(void)
290 return (injcnt != 0 ? B_TRUE : B_FALSE);
294 * =======================================================
295 * | MSR Interposition |
296 * | ----------------- |
297 * | |
298 * -------------------------------------------------------
301 #define CMI_MSRI_HASHSZ 16
302 #define CMI_MSRI_HASHIDX(hdl, msr) \
303 (((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
305 struct cmi_msri_bkt {
306 kmutex_t msrib_lock;
307 struct cmi_msri_hashent *msrib_head;
310 struct cmi_msri_hashent {
311 struct cmi_msri_hashent *msrie_next;
312 struct cmi_msri_hashent *msrie_prev;
313 cmi_hdl_impl_t *msrie_hdl;
314 uint_t msrie_msrnum;
315 uint64_t msrie_msrval;
318 #define CMI_MSRI_MATCH(ent, hdl, req_msr) \
319 ((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
321 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
323 static void
324 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
326 int idx = CMI_MSRI_HASHIDX(hdl, msr);
327 struct cmi_msri_bkt *hbp = &msrihash[idx];
328 struct cmi_msri_hashent *hep;
330 mutex_enter(&hbp->msrib_lock);
332 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
333 if (CMI_MSRI_MATCH(hep, hdl, msr))
334 break;
337 if (hep != NULL) {
338 hep->msrie_msrval = val;
339 } else {
340 hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
341 hep->msrie_hdl = hdl;
342 hep->msrie_msrnum = msr;
343 hep->msrie_msrval = val;
345 if (hbp->msrib_head != NULL)
346 hbp->msrib_head->msrie_prev = hep;
347 hep->msrie_next = hbp->msrib_head;
348 hep->msrie_prev = NULL;
349 hbp->msrib_head = hep;
352 mutex_exit(&hbp->msrib_lock);
356 * Look for a match for the given hanlde and msr. Return 1 with valp
357 * filled if a match is found, otherwise return 0 with valp untouched.
359 static int
360 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
362 int idx = CMI_MSRI_HASHIDX(hdl, msr);
363 struct cmi_msri_bkt *hbp = &msrihash[idx];
364 struct cmi_msri_hashent *hep;
367 * This function is called during #MC trap handling, so we should
368 * consider the possibility that the hash mutex is held by the
369 * interrupted thread. This should not happen because interposition
370 * is an artificial injection mechanism and the #MC is requested
371 * after adding entries, but just in case of a real #MC at an
372 * unlucky moment we'll use mutex_tryenter here.
374 if (!mutex_tryenter(&hbp->msrib_lock))
375 return (0);
377 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
378 if (CMI_MSRI_MATCH(hep, hdl, msr)) {
379 *valp = hep->msrie_msrval;
380 break;
384 mutex_exit(&hbp->msrib_lock);
386 return (hep != NULL);
390 * Remove any interposed value that matches.
392 static void
393 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
396 int idx = CMI_MSRI_HASHIDX(hdl, msr);
397 struct cmi_msri_bkt *hbp = &msrihash[idx];
398 struct cmi_msri_hashent *hep;
400 if (!mutex_tryenter(&hbp->msrib_lock))
401 return;
403 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
404 if (CMI_MSRI_MATCH(hep, hdl, msr)) {
405 if (hep->msrie_prev != NULL)
406 hep->msrie_prev->msrie_next = hep->msrie_next;
408 if (hep->msrie_next != NULL)
409 hep->msrie_next->msrie_prev = hep->msrie_prev;
411 if (hbp->msrib_head == hep)
412 hbp->msrib_head = hep->msrie_next;
414 kmem_free(hep, sizeof (*hep));
415 break;
419 mutex_exit(&hbp->msrib_lock);
423 * =======================================================
424 * | PCI Config Space Interposition |
425 * | ------------------------------ |
426 * | |
427 * -------------------------------------------------------
431 * Hash for interposed PCI config space values. We lookup on bus/dev/fun/offset
432 * and then record whether the value stashed was made with a byte, word or
433 * doubleword access; we will only return a hit for an access of the
434 * same size. If you access say a 32-bit register using byte accesses
435 * and then attempt to read the full 32-bit value back you will not obtain
436 * any sort of merged result - you get a lookup miss.
439 #define CMI_PCII_HASHSZ 16
440 #define CMI_PCII_HASHIDX(b, d, f, o) \
441 (((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
443 struct cmi_pcii_bkt {
444 kmutex_t pciib_lock;
445 struct cmi_pcii_hashent *pciib_head;
448 struct cmi_pcii_hashent {
449 struct cmi_pcii_hashent *pcii_next;
450 struct cmi_pcii_hashent *pcii_prev;
451 int pcii_bus;
452 int pcii_dev;
453 int pcii_func;
454 int pcii_reg;
455 int pcii_asize;
456 uint32_t pcii_val;
459 #define CMI_PCII_MATCH(ent, b, d, f, r, asz) \
460 ((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
461 (ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
462 (ent)->pcii_asize == (asz))
464 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
468 * Add a new entry to the PCI interpose hash, overwriting any existing
469 * entry that is found.
471 static void
472 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
474 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
475 struct cmi_pcii_bkt *hbp = &pciihash[idx];
476 struct cmi_pcii_hashent *hep;
478 cmi_hdl_inj_begin(NULL);
480 mutex_enter(&hbp->pciib_lock);
482 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
483 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
484 break;
487 if (hep != NULL) {
488 hep->pcii_val = val;
489 } else {
490 hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
491 hep->pcii_bus = bus;
492 hep->pcii_dev = dev;
493 hep->pcii_func = func;
494 hep->pcii_reg = reg;
495 hep->pcii_asize = asz;
496 hep->pcii_val = val;
498 if (hbp->pciib_head != NULL)
499 hbp->pciib_head->pcii_prev = hep;
500 hep->pcii_next = hbp->pciib_head;
501 hep->pcii_prev = NULL;
502 hbp->pciib_head = hep;
505 mutex_exit(&hbp->pciib_lock);
507 cmi_hdl_inj_end(NULL);
511 * Look for a match for the given bus/dev/func/reg; return 1 with valp
512 * filled if a match is found, otherwise return 0 with valp untouched.
514 static int
515 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
517 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
518 struct cmi_pcii_bkt *hbp = &pciihash[idx];
519 struct cmi_pcii_hashent *hep;
521 if (!mutex_tryenter(&hbp->pciib_lock))
522 return (0);
524 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
525 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
526 *valp = hep->pcii_val;
527 break;
531 mutex_exit(&hbp->pciib_lock);
533 return (hep != NULL);
536 static void
537 pcii_rment(int bus, int dev, int func, int reg, int asz)
539 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
540 struct cmi_pcii_bkt *hbp = &pciihash[idx];
541 struct cmi_pcii_hashent *hep;
543 mutex_enter(&hbp->pciib_lock);
545 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
546 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
547 if (hep->pcii_prev != NULL)
548 hep->pcii_prev->pcii_next = hep->pcii_next;
550 if (hep->pcii_next != NULL)
551 hep->pcii_next->pcii_prev = hep->pcii_prev;
553 if (hbp->pciib_head == hep)
554 hbp->pciib_head = hep->pcii_next;
556 kmem_free(hep, sizeof (*hep));
557 break;
561 mutex_exit(&hbp->pciib_lock);
564 #ifndef __xpv
567 * =======================================================
568 * | Native methods |
569 * | -------------- |
570 * | |
571 * | These are used when we are running native on bare- |
572 * | metal, or simply don't know any better. |
573 * ---------------------------------------------------------
576 #define HDLPRIV(hdl) ((cpu_t *)(hdl)->cmih_hdlpriv)
578 static uint_t
579 ntv_vendor(cmi_hdl_impl_t *hdl)
581 return (cpuid_getvendor(HDLPRIV(hdl)));
584 static const char *
585 ntv_vendorstr(cmi_hdl_impl_t *hdl)
587 return (cpuid_getvendorstr(HDLPRIV(hdl)));
590 static uint_t
591 ntv_family(cmi_hdl_impl_t *hdl)
593 return (cpuid_getfamily(HDLPRIV(hdl)));
596 static uint_t
597 ntv_model(cmi_hdl_impl_t *hdl)
599 return (cpuid_getmodel(HDLPRIV(hdl)));
602 static uint_t
603 ntv_stepping(cmi_hdl_impl_t *hdl)
605 return (cpuid_getstep(HDLPRIV(hdl)));
608 static uint_t
609 ntv_chipid(cmi_hdl_impl_t *hdl)
611 return (hdl->cmih_chipid);
615 static uint_t
616 ntv_procnodeid(cmi_hdl_impl_t *hdl)
618 return (hdl->cmih_procnodeid);
621 static uint_t
622 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
624 return (hdl->cmih_procnodes_per_pkg);
627 static uint_t
628 ntv_coreid(cmi_hdl_impl_t *hdl)
630 return (hdl->cmih_coreid);
633 static uint_t
634 ntv_strandid(cmi_hdl_impl_t *hdl)
636 return (hdl->cmih_strandid);
639 static uint_t
640 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
642 return (cpuid_get_apicid(HDLPRIV(hdl)));
645 static uint16_t
646 ntv_smbiosid(cmi_hdl_impl_t *hdl)
648 return (hdl->cmih_smbiosid);
651 static uint_t
652 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
654 return (hdl->cmih_smb_chipid);
657 static nvlist_t *
658 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
660 return (hdl->cmih_smb_bboard);
663 static uint32_t
664 ntv_chiprev(cmi_hdl_impl_t *hdl)
666 return (cpuid_getchiprev(HDLPRIV(hdl)));
669 static const char *
670 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
672 return (cpuid_getchiprevstr(HDLPRIV(hdl)));
675 static uint32_t
676 ntv_getsockettype(cmi_hdl_impl_t *hdl)
678 return (cpuid_getsockettype(HDLPRIV(hdl)));
681 static const char *
682 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
684 return (cpuid_getsocketstr(HDLPRIV(hdl)));
687 static id_t
688 ntv_logical_id(cmi_hdl_impl_t *hdl)
690 return (HDLPRIV(hdl)->cpu_id);
693 /*ARGSUSED*/
694 static int
695 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
697 ulong_t *dest = (ulong_t *)arg1;
698 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
700 *dest = getcr4();
701 *rcp = CMI_SUCCESS;
703 return (0);
706 static ulong_t
707 ntv_getcr4(cmi_hdl_impl_t *hdl)
709 cpu_t *cp = HDLPRIV(hdl);
710 ulong_t val;
712 (void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
714 return (val);
717 /*ARGSUSED*/
718 static int
719 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
721 ulong_t val = (ulong_t)arg1;
722 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
724 setcr4(val);
725 *rcp = CMI_SUCCESS;
727 return (0);
730 static void
731 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
733 cpu_t *cp = HDLPRIV(hdl);
735 (void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
738 volatile uint32_t cmi_trapped_rdmsr;
740 /*ARGSUSED*/
741 static int
742 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
744 uint_t msr = (uint_t)arg1;
745 uint64_t *valp = (uint64_t *)arg2;
746 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
748 on_trap_data_t otd;
750 if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
751 if (checked_rdmsr(msr, valp) == 0)
752 *rcp = CMI_SUCCESS;
753 else
754 *rcp = CMIERR_NOTSUP;
755 } else {
756 *rcp = CMIERR_MSRGPF;
757 atomic_inc_32(&cmi_trapped_rdmsr);
759 no_trap();
761 return (0);
764 static cmi_errno_t
765 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
767 cpu_t *cp = HDLPRIV(hdl);
769 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
770 return (CMIERR_INTERPOSE);
772 return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
773 (xc_arg_t)msr, (xc_arg_t)valp));
776 volatile uint32_t cmi_trapped_wrmsr;
778 /*ARGSUSED*/
779 static int
780 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
782 uint_t msr = (uint_t)arg1;
783 uint64_t val = *((uint64_t *)arg2);
784 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
785 on_trap_data_t otd;
787 if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
788 if (checked_wrmsr(msr, val) == 0)
789 *rcp = CMI_SUCCESS;
790 else
791 *rcp = CMIERR_NOTSUP;
792 } else {
793 *rcp = CMIERR_MSRGPF;
794 atomic_inc_32(&cmi_trapped_wrmsr);
796 no_trap();
798 return (0);
802 static cmi_errno_t
803 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
805 cpu_t *cp = HDLPRIV(hdl);
807 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
808 return (CMI_SUCCESS);
810 return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
811 (xc_arg_t)msr, (xc_arg_t)&val));
814 static cmi_errno_t
815 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
817 msri_addent(hdl, msr, val);
818 return (CMI_SUCCESS);
821 /*ARGSUSED*/
822 static int
823 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
825 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
826 int int_no = (int)arg1;
828 if (int_no == T_MCE)
829 int18();
830 else
831 int_cmci();
832 *rcp = CMI_SUCCESS;
834 return (0);
837 static void
838 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
840 cpu_t *cp = HDLPRIV(hdl);
842 (void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
845 static int
846 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
848 int rc;
849 processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
851 while (mutex_tryenter(&cpu_lock) == 0) {
852 if (hdl->cmih_flags & CMIH_F_DEAD)
853 return (EBUSY);
854 delay(1);
856 rc = p_online_internal_locked(cpuid, new_status, old_status);
857 mutex_exit(&cpu_lock);
859 return (rc);
862 #else /* __xpv */
865 * =======================================================
866 * | xVM dom0 methods |
867 * | ---------------- |
868 * | |
869 * | These are used when we are running as dom0 in |
870 * | a Solaris xVM context. |
871 * ---------------------------------------------------------
874 #define HDLPRIV(hdl) ((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
876 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
879 static uint_t
880 xpv_vendor(cmi_hdl_impl_t *hdl)
882 return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
883 HDLPRIV(hdl))));
886 static const char *
887 xpv_vendorstr(cmi_hdl_impl_t *hdl)
889 return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
892 static uint_t
893 xpv_family(cmi_hdl_impl_t *hdl)
895 return (xen_physcpu_family(HDLPRIV(hdl)));
898 static uint_t
899 xpv_model(cmi_hdl_impl_t *hdl)
901 return (xen_physcpu_model(HDLPRIV(hdl)));
904 static uint_t
905 xpv_stepping(cmi_hdl_impl_t *hdl)
907 return (xen_physcpu_stepping(HDLPRIV(hdl)));
910 static uint_t
911 xpv_chipid(cmi_hdl_impl_t *hdl)
913 return (hdl->cmih_chipid);
916 static uint_t
917 xpv_procnodeid(cmi_hdl_impl_t *hdl)
919 return (hdl->cmih_procnodeid);
922 static uint_t
923 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
925 return (hdl->cmih_procnodes_per_pkg);
928 static uint_t
929 xpv_coreid(cmi_hdl_impl_t *hdl)
931 return (hdl->cmih_coreid);
934 static uint_t
935 xpv_strandid(cmi_hdl_impl_t *hdl)
937 return (hdl->cmih_strandid);
940 static uint_t
941 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
943 return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
946 static uint16_t
947 xpv_smbiosid(cmi_hdl_impl_t *hdl)
949 return (hdl->cmih_smbiosid);
952 static uint_t
953 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
955 return (hdl->cmih_smb_chipid);
958 static nvlist_t *
959 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
961 return (hdl->cmih_smb_bboard);
964 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
966 static uint32_t
967 xpv_chiprev(cmi_hdl_impl_t *hdl)
969 return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
970 xpv_model(hdl), xpv_stepping(hdl)));
973 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
975 static const char *
976 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
978 return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
979 xpv_model(hdl), xpv_stepping(hdl)));
982 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
984 static uint32_t
985 xpv_getsockettype(cmi_hdl_impl_t *hdl)
987 return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
988 xpv_model(hdl), xpv_stepping(hdl)));
991 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
993 static const char *
994 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
996 return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
997 xpv_model(hdl), xpv_stepping(hdl)));
1000 static id_t
1001 xpv_logical_id(cmi_hdl_impl_t *hdl)
1003 return (xen_physcpu_logical_id(HDLPRIV(hdl)));
1006 static cmi_errno_t
1007 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1009 switch (msr) {
1010 case IA32_MSR_MCG_CAP:
1011 *valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1012 break;
1014 default:
1015 return (CMIERR_NOTSUP);
1018 return (CMI_SUCCESS);
1022 * Request the hypervisor to write an MSR for us. The hypervisor
1023 * will only accept MCA-related MSRs, as this is for MCA error
1024 * simulation purposes alone. We will pre-screen MSRs for injection
1025 * so we don't bother the HV with bogus requests. We will permit
1026 * injection to any MCA bank register, and to MCG_STATUS.
1029 #define IS_MCA_INJ_MSR(msr) \
1030 (((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1031 (msr) == IA32_MSR_MCG_STATUS)
1033 static cmi_errno_t
1034 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1036 xen_mc_t xmc;
1037 struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1039 if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1040 return (CMIERR_NOTSUP); /* for injection use only! */
1042 if (!IS_MCA_INJ_MSR(msr))
1043 return (CMIERR_API);
1045 if (panicstr)
1046 return (CMIERR_DEADLOCK);
1048 mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1049 mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1050 mci->mcinj_count = 1; /* learn to batch sometime */
1051 mci->mcinj_msr[0].reg = msr;
1052 mci->mcinj_msr[0].value = val;
1054 return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1055 0 ? CMI_SUCCESS : CMIERR_NOTSUP);
1058 static cmi_errno_t
1059 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1061 return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1065 static cmi_errno_t
1066 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1068 return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1071 static void
1072 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1074 xen_mc_t xmc;
1075 struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1077 if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1078 return;
1080 if (int_no != T_MCE) {
1081 cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1082 int_no);
1085 mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1087 (void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1090 static int
1091 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1093 xen_sysctl_t xs;
1094 int op, rc, status;
1096 new_status &= ~P_FORCED;
1098 switch (new_status) {
1099 case P_STATUS:
1100 op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1101 break;
1102 case P_FAULTED:
1103 case P_OFFLINE:
1104 op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1105 break;
1106 case P_ONLINE:
1107 op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1108 break;
1109 default:
1110 return (-1);
1113 xs.cmd = XEN_SYSCTL_cpu_hotplug;
1114 xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1115 xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1116 xs.u.cpu_hotplug.op = op;
1118 if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1119 status = rc;
1120 rc = 0;
1121 switch (status) {
1122 case XEN_CPU_HOTPLUG_STATUS_NEW:
1123 *old_status = P_OFFLINE;
1124 break;
1125 case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1126 *old_status = P_FAULTED;
1127 break;
1128 case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1129 *old_status = P_ONLINE;
1130 break;
1131 default:
1132 return (-1);
1136 return (-rc);
1139 #endif
1141 /*ARGSUSED*/
1142 static void *
1143 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1144 uint_t strandid)
1146 #ifdef __xpv
1147 xen_mc_lcpu_cookie_t cpi;
1149 for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1150 cpi = xen_physcpu_next(cpi)) {
1151 if (xen_physcpu_chipid(cpi) == chipid &&
1152 xen_physcpu_coreid(cpi) == coreid &&
1153 xen_physcpu_strandid(cpi) == strandid)
1154 return ((void *)cpi);
1156 return (NULL);
1158 #else /* __xpv */
1160 cpu_t *cp, *startcp;
1162 kpreempt_disable();
1163 cp = startcp = CPU;
1164 do {
1165 if (cmi_ntv_hwchipid(cp) == chipid &&
1166 cmi_ntv_hwcoreid(cp) == coreid &&
1167 cmi_ntv_hwstrandid(cp) == strandid) {
1168 kpreempt_enable();
1169 return ((void *)cp);
1172 cp = cp->cpu_next;
1173 } while (cp != startcp);
1174 kpreempt_enable();
1175 return (NULL);
1176 #endif /* __ xpv */
1179 static boolean_t
1180 cpu_is_cmt(void *priv)
1182 #ifdef __xpv
1183 return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1184 #else /* __xpv */
1185 cpu_t *cp = (cpu_t *)priv;
1187 int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1188 cpuid_get_ncore_per_chip(cp);
1190 return (strands_per_core > 1);
1191 #endif /* __xpv */
1195 * Find the handle entry of a given cpu identified by a <chip,core,strand>
1196 * tuple.
1198 static cmi_hdl_ent_t *
1199 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1201 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1202 cmi_strand_nbits);
1205 * Allocate per-chip table which contains a list of handle of
1206 * all strands of the chip.
1208 if (cmi_chip_tab[chipid] == NULL) {
1209 size_t sz;
1210 cmi_hdl_ent_t *pg;
1212 sz = max_strands * sizeof (cmi_hdl_ent_t);
1213 pg = kmem_zalloc(sz, KM_SLEEP);
1215 /* test and set the per-chip table if it is not allocated */
1216 if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1217 kmem_free(pg, sz); /* someone beats us */
1220 return (cmi_chip_tab[chipid] +
1221 ((((coreid) & CMI_MAX_COREID(cmi_core_nbits)) << cmi_strand_nbits) |
1222 ((strandid) & CMI_MAX_STRANDID(cmi_strand_nbits))));
1225 extern void cpuid_get_ext_topo(uint_t, uint_t *, uint_t *);
1227 cmi_hdl_t
1228 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1229 uint_t strandid)
1231 cmi_hdl_impl_t *hdl;
1232 void *priv;
1233 cmi_hdl_ent_t *ent;
1234 uint_t vendor;
1236 #ifdef __xpv
1237 ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1238 #else
1239 ASSERT(class == CMI_HDL_NATIVE);
1240 #endif
1242 if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1243 return (NULL);
1246 * Assume all chips in the system are the same type.
1247 * For Intel, attempt to check if extended topology is available
1248 * CPUID.EAX=0xB. If so, get the number of core and strand bits.
1250 #ifdef __xpv
1251 vendor = _cpuid_vendorstr_to_vendorcode(
1252 (char *)xen_physcpu_vendorstr((xen_mc_lcpu_cookie_t)priv));
1253 #else
1254 vendor = cpuid_getvendor((cpu_t *)priv);
1255 #endif
1256 if (vendor == X86_VENDOR_Intel && cmi_ext_topo_check == 0) {
1257 cpuid_get_ext_topo(vendor, &cmi_core_nbits, &cmi_strand_nbits);
1258 cmi_ext_topo_check = 1;
1261 if (chipid > CMI_MAX_CHIPID ||
1262 coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1263 strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1264 return (NULL);
1266 hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1268 hdl->cmih_class = class;
1269 HDLOPS(hdl) = &cmi_hdl_ops;
1270 hdl->cmih_chipid = chipid;
1271 hdl->cmih_coreid = coreid;
1272 hdl->cmih_strandid = strandid;
1273 hdl->cmih_mstrand = cpu_is_cmt(priv);
1274 hdl->cmih_hdlpriv = priv;
1275 #ifdef __xpv
1276 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1277 CMI_MSR_FLAG_WR_INTERPOSEOK;
1280 * XXX: need hypervisor support for procnodeid, for now assume
1281 * single-node processors (procnodeid = chipid)
1283 hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1284 hdl->cmih_procnodes_per_pkg = 1;
1285 #else /* __xpv */
1286 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1287 CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1288 hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1289 hdl->cmih_procnodes_per_pkg =
1290 cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1291 #endif /* __xpv */
1293 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1294 if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1296 * Somehow this (chipid, coreid, strandid) id tuple has
1297 * already been assigned! This indicates that the
1298 * callers logic in determining these values is busted,
1299 * or perhaps undermined by bad BIOS setup. Complain,
1300 * and refuse to initialize this tuple again as bad things
1301 * will happen.
1303 cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1304 "strandid %d handle already allocated!",
1305 chipid, coreid, strandid);
1306 kmem_free(hdl, sizeof (*hdl));
1307 return (NULL);
1311 * Once we store a nonzero reference count others can find this
1312 * handle via cmi_hdl_lookup etc. This initial hold on the handle
1313 * is to be dropped only if some other part of cmi initialization
1314 * fails or, if it succeeds, at later cpu deconfigure. Note the
1315 * the module private data we hold in cmih_cmi and cmih_cmidata
1316 * is still NULL at this point (the caller will fill it with
1317 * cmi_hdl_setcmi if it initializes) so consumers of handles
1318 * should always be ready for that possibility.
1320 ent->cmae_hdlp = hdl;
1321 hdl->cmih_refcntp = &ent->cmae_refcnt;
1322 ent->cmae_refcnt = 1;
1324 return ((cmi_hdl_t)hdl);
1327 void
1328 cmi_read_smbios(cmi_hdl_t ophdl)
1331 uint_t strand_apicid = UINT_MAX;
1332 uint_t chip_inst = UINT_MAX;
1333 uint16_t smb_id = USHRT_MAX;
1334 int rc = 0;
1336 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1338 /* set x86gentopo compatibility */
1339 fm_smb_fmacompat();
1341 #ifndef __xpv
1342 strand_apicid = ntv_strand_apicid(hdl);
1343 #else
1344 strand_apicid = xpv_strand_apicid(hdl);
1345 #endif
1347 if (!x86gentopo_legacy) {
1349 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1350 * topo reverts to legacy mode
1352 rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1353 if (rc == 0) {
1354 hdl->cmih_smb_chipid = chip_inst;
1355 hdl->cmih_smbiosid = smb_id;
1356 } else {
1357 #ifdef DEBUG
1358 cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1359 #endif /* DEBUG */
1360 return;
1363 hdl->cmih_smb_bboard = fm_smb_bboard(strand_apicid);
1364 #ifdef DEBUG
1365 if (hdl->cmih_smb_bboard == NULL)
1366 cmn_err(CE_NOTE,
1367 "!cmi reads smbios base boards info failed");
1368 #endif /* DEBUG */
1372 void
1373 cmi_hdl_hold(cmi_hdl_t ophdl)
1375 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1377 ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1379 atomic_inc_32(hdl->cmih_refcntp);
1382 static int
1383 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1385 volatile uint32_t *refcntp;
1386 uint32_t refcnt;
1388 refcntp = &ent->cmae_refcnt;
1389 refcnt = *refcntp;
1391 if (refcnt == 0) {
1393 * Associated object never existed, is being destroyed,
1394 * or has been destroyed.
1396 return (0);
1400 * We cannot use atomic increment here because once the reference
1401 * count reaches zero it must never be bumped up again.
1403 while (refcnt != 0) {
1404 if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1405 return (1);
1406 refcnt = *refcntp;
1410 * Somebody dropped the reference count to 0 after our initial
1411 * check.
1413 return (0);
1417 void
1418 cmi_hdl_rele(cmi_hdl_t ophdl)
1420 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1422 ASSERT(*hdl->cmih_refcntp > 0);
1423 atomic_dec_32(hdl->cmih_refcntp);
1426 void
1427 cmi_hdl_destroy(cmi_hdl_t ophdl)
1429 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1430 cmi_hdl_ent_t *ent;
1432 /* Release the reference count held by cmi_hdl_create(). */
1433 ASSERT(*hdl->cmih_refcntp > 0);
1434 atomic_dec_32(hdl->cmih_refcntp);
1435 hdl->cmih_flags |= CMIH_F_DEAD;
1437 ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1438 hdl->cmih_strandid);
1440 * Use busy polling instead of condition variable here because
1441 * cmi_hdl_rele() may be called from #MC handler.
1443 while (cmi_hdl_canref(ent)) {
1444 cmi_hdl_rele(ophdl);
1445 delay(1);
1447 ent->cmae_hdlp = NULL;
1449 kmem_free(hdl, sizeof (*hdl));
1452 void
1453 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1455 IMPLHDL(ophdl)->cmih_spec = arg;
1458 void *
1459 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1461 return (IMPLHDL(ophdl)->cmih_spec);
1464 void
1465 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1467 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1469 ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1470 hdl->cmih_mcops = mcops;
1471 hdl->cmih_mcdata = mcdata;
1474 const struct cmi_mc_ops *
1475 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1477 return (IMPLHDL(ophdl)->cmih_mcops);
1480 void *
1481 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1483 return (IMPLHDL(ophdl)->cmih_mcdata);
1486 cmi_hdl_t
1487 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1488 uint_t strandid)
1490 cmi_hdl_ent_t *ent;
1492 if (chipid > CMI_MAX_CHIPID ||
1493 coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1494 strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1495 return (NULL);
1497 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1499 if (class == CMI_HDL_NEUTRAL)
1500 #ifdef __xpv
1501 class = CMI_HDL_SOLARIS_xVM_MCA;
1502 #else
1503 class = CMI_HDL_NATIVE;
1504 #endif
1506 if (!cmi_hdl_canref(ent))
1507 return (NULL);
1509 if (ent->cmae_hdlp->cmih_class != class) {
1510 cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1511 return (NULL);
1514 return ((cmi_hdl_t)ent->cmae_hdlp);
1517 cmi_hdl_t
1518 cmi_hdl_any(void)
1520 int i, j;
1521 cmi_hdl_ent_t *ent;
1522 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1523 cmi_strand_nbits);
1525 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1526 if (cmi_chip_tab[i] == NULL)
1527 continue;
1528 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1529 j++, ent++) {
1530 if (cmi_hdl_canref(ent))
1531 return ((cmi_hdl_t)ent->cmae_hdlp);
1535 return (NULL);
1538 void
1539 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1540 void *arg1, void *arg2, void *arg3)
1542 int i, j;
1543 cmi_hdl_ent_t *ent;
1544 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1545 cmi_strand_nbits);
1547 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1548 if (cmi_chip_tab[i] == NULL)
1549 continue;
1550 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1551 j++, ent++) {
1552 if (cmi_hdl_canref(ent)) {
1553 cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1554 if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1555 == CMI_HDL_WALK_DONE) {
1556 cmi_hdl_rele((cmi_hdl_t)hdl);
1557 return;
1559 cmi_hdl_rele((cmi_hdl_t)hdl);
1565 void
1566 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1568 IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1569 IMPLHDL(ophdl)->cmih_cmi = cmi;
1572 void *
1573 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1575 return (IMPLHDL(ophdl)->cmih_cmi);
1578 void *
1579 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1581 return (IMPLHDL(ophdl)->cmih_cmidata);
1584 enum cmi_hdl_class
1585 cmi_hdl_class(cmi_hdl_t ophdl)
1587 return (IMPLHDL(ophdl)->cmih_class);
1590 #define CMI_HDL_OPFUNC(what, type) \
1591 type \
1592 cmi_hdl_##what(cmi_hdl_t ophdl) \
1594 return (HDLOPS(IMPLHDL(ophdl))-> \
1595 cmio_##what(IMPLHDL(ophdl))); \
1598 CMI_HDL_OPFUNC(vendor, uint_t)
1599 CMI_HDL_OPFUNC(vendorstr, const char *)
1600 CMI_HDL_OPFUNC(family, uint_t)
1601 CMI_HDL_OPFUNC(model, uint_t)
1602 CMI_HDL_OPFUNC(stepping, uint_t)
1603 CMI_HDL_OPFUNC(chipid, uint_t)
1604 CMI_HDL_OPFUNC(procnodeid, uint_t)
1605 CMI_HDL_OPFUNC(coreid, uint_t)
1606 CMI_HDL_OPFUNC(strandid, uint_t)
1607 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1608 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1609 CMI_HDL_OPFUNC(chiprev, uint32_t)
1610 CMI_HDL_OPFUNC(chiprevstr, const char *)
1611 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1612 CMI_HDL_OPFUNC(getsocketstr, const char *)
1613 CMI_HDL_OPFUNC(logical_id, id_t)
1614 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1615 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1616 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1618 boolean_t
1619 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1621 return (IMPLHDL(ophdl)->cmih_mstrand);
1624 void
1625 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1627 if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1628 return;
1630 cmi_hdl_inj_begin(ophdl);
1631 HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1632 cmi_hdl_inj_end(NULL);
1636 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1638 return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1639 new_status, old_status));
1642 #ifndef __xpv
1644 * Return hardware chip instance; cpuid_get_chipid provides this directly.
1646 uint_t
1647 cmi_ntv_hwchipid(cpu_t *cp)
1649 return (cpuid_get_chipid(cp));
1653 * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1655 uint_t
1656 cmi_ntv_hwprocnodeid(cpu_t *cp)
1658 return (cpuid_get_procnodeid(cp));
1662 * Return core instance within a single chip.
1664 uint_t
1665 cmi_ntv_hwcoreid(cpu_t *cp)
1667 return (cpuid_get_pkgcoreid(cp));
1671 * Return strand number within a single core. cpuid_get_clogid numbers
1672 * all execution units (strands, or cores in unstranded models) sequentially
1673 * within a single chip.
1675 uint_t
1676 cmi_ntv_hwstrandid(cpu_t *cp)
1678 int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1679 cpuid_get_ncore_per_chip(cp);
1681 return (cpuid_get_clogid(cp) % strands_per_core);
1684 static void
1685 cmi_ntv_hwdisable_mce_xc(void)
1687 ulong_t cr4;
1689 cr4 = getcr4();
1690 cr4 = cr4 & (~CR4_MCE);
1691 setcr4(cr4);
1694 void
1695 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)
1697 cpuset_t set;
1698 cmi_hdl_impl_t *thdl = IMPLHDL(hdl);
1699 cpu_t *cp = HDLPRIV(thdl);
1701 if (CPU->cpu_id == cp->cpu_id) {
1702 cmi_ntv_hwdisable_mce_xc();
1703 } else {
1704 CPUSET_ONLY(set, cp->cpu_id);
1705 xc_call(NULL, NULL, NULL, CPUSET2BV(set),
1706 (xc_func_t)cmi_ntv_hwdisable_mce_xc);
1710 #endif /* __xpv */
1712 void
1713 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1715 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1717 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1720 void
1721 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1723 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1725 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1728 cmi_errno_t
1729 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1731 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1734 * Regardless of the handle class, we first check for am
1735 * interposed value. In the xVM case you probably want to
1736 * place interposed values within the hypervisor itself, but
1737 * we still allow interposing them in dom0 for test and bringup
1738 * purposes.
1740 if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1741 msri_lookup(hdl, msr, valp))
1742 return (CMI_SUCCESS);
1744 if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1745 return (CMIERR_NOTSUP);
1747 return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1750 cmi_errno_t
1751 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1753 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1755 /* Invalidate any interposed value */
1756 msri_rment(hdl, msr);
1758 if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1759 return (CMI_SUCCESS); /* pretend all is ok */
1761 return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1764 void
1765 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1767 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1768 ulong_t cr4;
1770 if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1771 HDLOPS(hdl)->cmio_setcr4 == NULL)
1772 return;
1774 cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1776 HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1779 void
1780 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1782 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1783 int i;
1785 if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1786 return;
1788 cmi_hdl_inj_begin(ophdl);
1790 for (i = 0; i < nregs; i++, regs++)
1791 HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1792 regs->cmr_msrval);
1794 cmi_hdl_inj_end(ophdl);
1797 /*ARGSUSED*/
1798 void
1799 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1801 #ifdef __xpv
1802 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1803 int i;
1805 for (i = 0; i < nregs; i++, regs++)
1806 msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1807 #endif
1811 void
1812 cmi_pcird_nohw(void)
1814 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1817 void
1818 cmi_pciwr_nohw(void)
1820 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1823 static uint32_t
1824 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1825 int *interpose, ddi_acc_handle_t hdl)
1827 uint32_t val;
1829 if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1830 pcii_lookup(bus, dev, func, reg, asz, &val)) {
1831 if (interpose)
1832 *interpose = 1;
1833 return (val);
1835 if (interpose)
1836 *interpose = 0;
1838 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1839 return (0);
1841 switch (asz) {
1842 case 1:
1843 if (hdl)
1844 val = pci_config_get8(hdl, (off_t)reg);
1845 else
1846 val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1847 reg);
1848 break;
1849 case 2:
1850 if (hdl)
1851 val = pci_config_get16(hdl, (off_t)reg);
1852 else
1853 val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1854 reg);
1855 break;
1856 case 4:
1857 if (hdl)
1858 val = pci_config_get32(hdl, (off_t)reg);
1859 else
1860 val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1861 reg);
1862 break;
1863 default:
1864 val = 0;
1866 return (val);
1869 uint8_t
1870 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1871 ddi_acc_handle_t hdl)
1873 return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1874 hdl));
1877 uint16_t
1878 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1879 ddi_acc_handle_t hdl)
1881 return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1882 hdl));
1885 uint32_t
1886 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1887 ddi_acc_handle_t hdl)
1889 return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1892 void
1893 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1895 pcii_addent(bus, dev, func, reg, val, 1);
1898 void
1899 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1901 pcii_addent(bus, dev, func, reg, val, 2);
1904 void
1905 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1907 pcii_addent(bus, dev, func, reg, val, 4);
1910 static void
1911 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1912 ddi_acc_handle_t hdl, uint32_t val)
1915 * If there is an interposed value for this register invalidate it.
1917 pcii_rment(bus, dev, func, reg, asz);
1919 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1920 return;
1922 switch (asz) {
1923 case 1:
1924 if (hdl)
1925 pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1926 else
1927 pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1928 (uint8_t)val);
1929 break;
1931 case 2:
1932 if (hdl)
1933 pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1934 else
1935 pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
1936 (uint16_t)val);
1937 break;
1939 case 4:
1940 if (hdl)
1941 pci_config_put32(hdl, (off_t)reg, val);
1942 else
1943 pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
1944 val);
1945 break;
1947 default:
1948 break;
1952 void
1953 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1954 uint8_t val)
1956 cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1959 void
1960 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1961 uint16_t val)
1963 cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1966 void
1967 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1968 uint32_t val)
1970 cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1973 static const struct cmi_hdl_ops cmi_hdl_ops = {
1974 #ifdef __xpv
1976 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
1978 xpv_vendor, /* cmio_vendor */
1979 xpv_vendorstr, /* cmio_vendorstr */
1980 xpv_family, /* cmio_family */
1981 xpv_model, /* cmio_model */
1982 xpv_stepping, /* cmio_stepping */
1983 xpv_chipid, /* cmio_chipid */
1984 xpv_procnodeid, /* cmio_procnodeid */
1985 xpv_coreid, /* cmio_coreid */
1986 xpv_strandid, /* cmio_strandid */
1987 xpv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */
1988 xpv_strand_apicid, /* cmio_strand_apicid */
1989 xpv_chiprev, /* cmio_chiprev */
1990 xpv_chiprevstr, /* cmio_chiprevstr */
1991 xpv_getsockettype, /* cmio_getsockettype */
1992 xpv_getsocketstr, /* cmio_getsocketstr */
1993 xpv_logical_id, /* cmio_logical_id */
1994 NULL, /* cmio_getcr4 */
1995 NULL, /* cmio_setcr4 */
1996 xpv_rdmsr, /* cmio_rdmsr */
1997 xpv_wrmsr, /* cmio_wrmsr */
1998 xpv_msrinterpose, /* cmio_msrinterpose */
1999 xpv_int, /* cmio_int */
2000 xpv_online, /* cmio_online */
2001 xpv_smbiosid, /* cmio_smbiosid */
2002 xpv_smb_chipid, /* cmio_smb_chipid */
2003 xpv_smb_bboard /* cmio_smb_bboard */
2005 #else /* __xpv */
2008 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
2010 ntv_vendor, /* cmio_vendor */
2011 ntv_vendorstr, /* cmio_vendorstr */
2012 ntv_family, /* cmio_family */
2013 ntv_model, /* cmio_model */
2014 ntv_stepping, /* cmio_stepping */
2015 ntv_chipid, /* cmio_chipid */
2016 ntv_procnodeid, /* cmio_procnodeid */
2017 ntv_coreid, /* cmio_coreid */
2018 ntv_strandid, /* cmio_strandid */
2019 ntv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */
2020 ntv_strand_apicid, /* cmio_strand_apicid */
2021 ntv_chiprev, /* cmio_chiprev */
2022 ntv_chiprevstr, /* cmio_chiprevstr */
2023 ntv_getsockettype, /* cmio_getsockettype */
2024 ntv_getsocketstr, /* cmio_getsocketstr */
2025 ntv_logical_id, /* cmio_logical_id */
2026 ntv_getcr4, /* cmio_getcr4 */
2027 ntv_setcr4, /* cmio_setcr4 */
2028 ntv_rdmsr, /* cmio_rdmsr */
2029 ntv_wrmsr, /* cmio_wrmsr */
2030 ntv_msrinterpose, /* cmio_msrinterpose */
2031 ntv_int, /* cmio_int */
2032 ntv_online, /* cmio_online */
2033 ntv_smbiosid, /* cmio_smbiosid */
2034 ntv_smb_chipid, /* cmio_smb_chipid */
2035 ntv_smb_bboard /* cmio_smb_bboard */
2036 #endif