Merge commit '5e2cca1843c61ee0ef1bb95c5dddc9b450b790c6'
[unleashed.git] / arch / x86 / kernel / os / cmi_hw.c
bloba24edde647ce86c17c0b5a0b56f72e53ce5e75bb
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
31 * CPU Module Interface - hardware abstraction.
35 #include <sys/types.h>
36 #include <sys/cpu_module.h>
37 #include <sys/kmem.h>
38 #include <sys/x86_archext.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ksynch.h>
41 #include <sys/x_call.h>
42 #include <sys/pghw.h>
43 #include <sys/pci_cfgacc.h>
44 #include <sys/pci_cfgspace.h>
45 #include <sys/archsystm.h>
46 #include <sys/ontrap.h>
47 #include <sys/controlregs.h>
48 #include <sys/sunddi.h>
49 #include <sys/trap.h>
50 #include <sys/mca_x86.h>
51 #include <sys/processor.h>
52 #include <sys/cmn_err.h>
53 #include <sys/nvpair.h>
54 #include <sys/fm/util.h>
55 #include <sys/fm/protocol.h>
56 #include <sys/fm/smb/fmsmb.h>
57 #include <sys/cpu_module_impl.h>
60 * Variable which determines if the SMBIOS supports x86 generic topology; or
61 * if legacy topolgy enumeration will occur.
63 extern int x86gentopo_legacy;
66 * Outside of this file consumers use the opaque cmi_hdl_t. This
67 * definition is duplicated in the generic_cpu mdb module, so keep
68 * them in-sync when making changes.
70 typedef struct cmi_hdl_impl {
71 enum cmi_hdl_class cmih_class; /* Handle nature */
72 const struct cmi_hdl_ops *cmih_ops; /* Operations vector */
73 uint_t cmih_chipid; /* Chipid of cpu resource */
74 uint_t cmih_procnodeid; /* Nodeid of cpu resource */
75 uint_t cmih_coreid; /* Core within die */
76 uint_t cmih_strandid; /* Thread within core */
77 uint_t cmih_procnodes_per_pkg; /* Nodes in a processor */
78 boolean_t cmih_mstrand; /* cores are multithreaded */
79 volatile uint32_t *cmih_refcntp; /* Reference count pointer */
80 uint64_t cmih_msrsrc; /* MSR data source flags */
81 void *cmih_hdlpriv; /* cmi_hw.c private data */
82 void *cmih_spec; /* cmi_hdl_{set,get}_specific */
83 void *cmih_cmi; /* cpu mod control structure */
84 void *cmih_cmidata; /* cpu mod private data */
85 const struct cmi_mc_ops *cmih_mcops; /* Memory-controller ops */
86 void *cmih_mcdata; /* Memory-controller data */
87 uint64_t cmih_flags; /* See CMIH_F_* below */
88 uint16_t cmih_smbiosid; /* SMBIOS Type 4 struct ID */
89 uint_t cmih_smb_chipid; /* SMBIOS factored chipid */
90 nvlist_t *cmih_smb_bboard; /* SMBIOS bboard nvlist */
91 } cmi_hdl_impl_t;
93 #define IMPLHDL(ophdl) ((cmi_hdl_impl_t *)ophdl)
94 #define HDLOPS(hdl) ((hdl)->cmih_ops)
96 #define CMIH_F_INJACTV 0x1ULL
97 #define CMIH_F_DEAD 0x2ULL
100 * Ops structure for handle operations.
102 struct cmi_hdl_ops {
104 * These ops are required in an implementation.
106 uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
107 const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
108 uint_t (*cmio_family)(cmi_hdl_impl_t *);
109 uint_t (*cmio_model)(cmi_hdl_impl_t *);
110 uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
111 uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
112 uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
113 uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
114 uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
115 uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
116 uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
117 uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
118 const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
119 uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
120 const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
122 id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
124 * These ops are optional in an implementation.
126 ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
127 void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
128 cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
129 cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
130 cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
131 void (*cmio_int)(cmi_hdl_impl_t *, int);
132 int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
133 uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
134 uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
135 nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
138 static const struct cmi_hdl_ops cmi_hdl_ops;
141 * Handles are looked up from contexts such as polling, injection etc
142 * where the context is reasonably well defined (although a poller could
143 * interrupt any old thread holding any old lock). They are also looked
144 * up by machine check handlers, which may strike at inconvenient times
145 * such as during handle initialization or destruction or during handle
146 * lookup (which the #MC handler itself will also have to perform).
148 * So keeping handles in a linked list makes locking difficult when we
149 * consider #MC handlers. Our solution is to have a look-up table indexed
150 * by that which uniquely identifies a handle - chip/core/strand id -
151 * with each entry a structure including a pointer to a handle
152 * structure for the resource, and a reference count for the handle.
153 * Reference counts are modified atomically. The public cmi_hdl_hold
154 * always succeeds because this can only be used after handle creation
155 * and before the call to destruct, so the hold count is already at least one.
156 * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
157 * we must be certain that the count has not already decrmented to zero
158 * before applying our hold.
160 * The table is an array of maximum number of chips defined in
161 * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
162 * entry is NULL. Each entry is a pointer to another array which contains a
163 * list of all strands of the chip. This first level table is allocated when
164 * first we want to populate an entry. The size of the latter (per chip) table
165 * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
167 * Ideally we should only allocate to the actual number of chips, cores per
168 * chip and strand per core. The number of chips is not available until all
169 * of them are passed. The number of cores and strands are partially available.
170 * For now we stick with the above approach.
172 #define CMI_MAX_CHIPID_NBITS 6 /* max chipid of 63 */
173 #define CMI_MAX_CORES_PER_CHIP_NBITS 4 /* 16 cores per chip max */
174 #define CMI_MAX_STRANDS_PER_CORE_NBITS 3 /* 8 strands per core max */
176 #define CMI_MAX_CHIPID ((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
177 #define CMI_MAX_CORES_PER_CHIP(cbits) (1 << (cbits))
178 #define CMI_MAX_COREID(cbits) ((1 << (cbits)) - 1)
179 #define CMI_MAX_STRANDS_PER_CORE(sbits) (1 << (sbits))
180 #define CMI_MAX_STRANDID(sbits) ((1 << (sbits)) - 1)
181 #define CMI_MAX_STRANDS_PER_CHIP(cbits, sbits) \
182 (CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits))
184 #define CMI_CHIPID_ARR_SZ (1 << CMI_MAX_CHIPID_NBITS)
186 typedef struct cmi_hdl_ent {
187 volatile uint32_t cmae_refcnt;
188 cmi_hdl_impl_t *cmae_hdlp;
189 } cmi_hdl_ent_t;
191 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
194 * Default values for the number of core and strand bits.
196 uint_t cmi_core_nbits = CMI_MAX_CORES_PER_CHIP_NBITS;
197 uint_t cmi_strand_nbits = CMI_MAX_STRANDS_PER_CORE_NBITS;
198 static int cmi_ext_topo_check = 0;
201 * Controls where we will source PCI config space data.
203 #define CMI_PCICFG_FLAG_RD_HWOK 0x0001
204 #define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002
205 #define CMI_PCICFG_FLAG_WR_HWOK 0x0004
206 #define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008
208 static uint64_t cmi_pcicfg_flags =
209 CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
210 CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
213 * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
215 #define CMI_MSR_FLAG_RD_HWOK 0x0001
216 #define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002
217 #define CMI_MSR_FLAG_WR_HWOK 0x0004
218 #define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008
220 int cmi_call_func_ntv_tries = 3;
222 static cmi_errno_t
223 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
225 cmi_errno_t rc = -1;
226 int i;
228 kpreempt_disable();
230 if (CPU->cpu_id == cpuid) {
231 (*func)(arg1, arg2, (xc_arg_t)&rc);
232 } else {
234 * This should not happen for a #MC trap or a poll, so
235 * this is likely an error injection or similar.
236 * We will try to cross call with xc_trycall - we
237 * can't guarantee success with xc_call because
238 * the interrupt code in the case of a #MC may
239 * already hold the xc mutex.
241 for (i = 0; i < cmi_call_func_ntv_tries; i++) {
242 cpuset_t cpus;
244 CPUSET_ONLY(cpus, cpuid);
245 xc_priority(arg1, arg2, (xc_arg_t)&rc,
246 CPUSET2BV(cpus), func);
247 if (rc != -1)
248 break;
250 DELAY(1);
254 kpreempt_enable();
256 return (rc != -1 ? rc : CMIERR_DEADLOCK);
259 static uint64_t injcnt;
261 void
262 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
264 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
266 if (hdl != NULL)
267 hdl->cmih_flags |= CMIH_F_INJACTV;
268 if (injcnt++ == 0) {
269 cmn_err(CE_NOTE, "Hardware error injection/simulation "
270 "activity noted");
274 void
275 cmi_hdl_inj_end(cmi_hdl_t ophdl)
277 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
279 ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
280 if (hdl != NULL)
281 hdl->cmih_flags &= ~CMIH_F_INJACTV;
284 boolean_t
285 cmi_inj_tainted(void)
287 return (injcnt != 0 ? B_TRUE : B_FALSE);
291 * =======================================================
292 * | MSR Interposition |
293 * | ----------------- |
294 * | |
295 * -------------------------------------------------------
298 #define CMI_MSRI_HASHSZ 16
299 #define CMI_MSRI_HASHIDX(hdl, msr) \
300 (((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
302 struct cmi_msri_bkt {
303 kmutex_t msrib_lock;
304 struct cmi_msri_hashent *msrib_head;
307 struct cmi_msri_hashent {
308 struct cmi_msri_hashent *msrie_next;
309 struct cmi_msri_hashent *msrie_prev;
310 cmi_hdl_impl_t *msrie_hdl;
311 uint_t msrie_msrnum;
312 uint64_t msrie_msrval;
315 #define CMI_MSRI_MATCH(ent, hdl, req_msr) \
316 ((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
318 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
320 static void
321 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
323 int idx = CMI_MSRI_HASHIDX(hdl, msr);
324 struct cmi_msri_bkt *hbp = &msrihash[idx];
325 struct cmi_msri_hashent *hep;
327 mutex_enter(&hbp->msrib_lock);
329 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
330 if (CMI_MSRI_MATCH(hep, hdl, msr))
331 break;
334 if (hep != NULL) {
335 hep->msrie_msrval = val;
336 } else {
337 hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
338 hep->msrie_hdl = hdl;
339 hep->msrie_msrnum = msr;
340 hep->msrie_msrval = val;
342 if (hbp->msrib_head != NULL)
343 hbp->msrib_head->msrie_prev = hep;
344 hep->msrie_next = hbp->msrib_head;
345 hep->msrie_prev = NULL;
346 hbp->msrib_head = hep;
349 mutex_exit(&hbp->msrib_lock);
353 * Look for a match for the given hanlde and msr. Return 1 with valp
354 * filled if a match is found, otherwise return 0 with valp untouched.
356 static int
357 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
359 int idx = CMI_MSRI_HASHIDX(hdl, msr);
360 struct cmi_msri_bkt *hbp = &msrihash[idx];
361 struct cmi_msri_hashent *hep;
364 * This function is called during #MC trap handling, so we should
365 * consider the possibility that the hash mutex is held by the
366 * interrupted thread. This should not happen because interposition
367 * is an artificial injection mechanism and the #MC is requested
368 * after adding entries, but just in case of a real #MC at an
369 * unlucky moment we'll use mutex_tryenter here.
371 if (!mutex_tryenter(&hbp->msrib_lock))
372 return (0);
374 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
375 if (CMI_MSRI_MATCH(hep, hdl, msr)) {
376 *valp = hep->msrie_msrval;
377 break;
381 mutex_exit(&hbp->msrib_lock);
383 return (hep != NULL);
387 * Remove any interposed value that matches.
389 static void
390 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
393 int idx = CMI_MSRI_HASHIDX(hdl, msr);
394 struct cmi_msri_bkt *hbp = &msrihash[idx];
395 struct cmi_msri_hashent *hep;
397 if (!mutex_tryenter(&hbp->msrib_lock))
398 return;
400 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
401 if (CMI_MSRI_MATCH(hep, hdl, msr)) {
402 if (hep->msrie_prev != NULL)
403 hep->msrie_prev->msrie_next = hep->msrie_next;
405 if (hep->msrie_next != NULL)
406 hep->msrie_next->msrie_prev = hep->msrie_prev;
408 if (hbp->msrib_head == hep)
409 hbp->msrib_head = hep->msrie_next;
411 kmem_free(hep, sizeof (*hep));
412 break;
416 mutex_exit(&hbp->msrib_lock);
420 * =======================================================
421 * | PCI Config Space Interposition |
422 * | ------------------------------ |
423 * | |
424 * -------------------------------------------------------
428 * Hash for interposed PCI config space values. We lookup on bus/dev/fun/offset
429 * and then record whether the value stashed was made with a byte, word or
430 * doubleword access; we will only return a hit for an access of the
431 * same size. If you access say a 32-bit register using byte accesses
432 * and then attempt to read the full 32-bit value back you will not obtain
433 * any sort of merged result - you get a lookup miss.
436 #define CMI_PCII_HASHSZ 16
437 #define CMI_PCII_HASHIDX(b, d, f, o) \
438 (((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
440 struct cmi_pcii_bkt {
441 kmutex_t pciib_lock;
442 struct cmi_pcii_hashent *pciib_head;
445 struct cmi_pcii_hashent {
446 struct cmi_pcii_hashent *pcii_next;
447 struct cmi_pcii_hashent *pcii_prev;
448 int pcii_bus;
449 int pcii_dev;
450 int pcii_func;
451 int pcii_reg;
452 int pcii_asize;
453 uint32_t pcii_val;
456 #define CMI_PCII_MATCH(ent, b, d, f, r, asz) \
457 ((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
458 (ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
459 (ent)->pcii_asize == (asz))
461 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
465 * Add a new entry to the PCI interpose hash, overwriting any existing
466 * entry that is found.
468 static void
469 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
471 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
472 struct cmi_pcii_bkt *hbp = &pciihash[idx];
473 struct cmi_pcii_hashent *hep;
475 cmi_hdl_inj_begin(NULL);
477 mutex_enter(&hbp->pciib_lock);
479 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
480 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
481 break;
484 if (hep != NULL) {
485 hep->pcii_val = val;
486 } else {
487 hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
488 hep->pcii_bus = bus;
489 hep->pcii_dev = dev;
490 hep->pcii_func = func;
491 hep->pcii_reg = reg;
492 hep->pcii_asize = asz;
493 hep->pcii_val = val;
495 if (hbp->pciib_head != NULL)
496 hbp->pciib_head->pcii_prev = hep;
497 hep->pcii_next = hbp->pciib_head;
498 hep->pcii_prev = NULL;
499 hbp->pciib_head = hep;
502 mutex_exit(&hbp->pciib_lock);
504 cmi_hdl_inj_end(NULL);
508 * Look for a match for the given bus/dev/func/reg; return 1 with valp
509 * filled if a match is found, otherwise return 0 with valp untouched.
511 static int
512 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
514 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
515 struct cmi_pcii_bkt *hbp = &pciihash[idx];
516 struct cmi_pcii_hashent *hep;
518 if (!mutex_tryenter(&hbp->pciib_lock))
519 return (0);
521 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
522 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
523 *valp = hep->pcii_val;
524 break;
528 mutex_exit(&hbp->pciib_lock);
530 return (hep != NULL);
533 static void
534 pcii_rment(int bus, int dev, int func, int reg, int asz)
536 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
537 struct cmi_pcii_bkt *hbp = &pciihash[idx];
538 struct cmi_pcii_hashent *hep;
540 mutex_enter(&hbp->pciib_lock);
542 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
543 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
544 if (hep->pcii_prev != NULL)
545 hep->pcii_prev->pcii_next = hep->pcii_next;
547 if (hep->pcii_next != NULL)
548 hep->pcii_next->pcii_prev = hep->pcii_prev;
550 if (hbp->pciib_head == hep)
551 hbp->pciib_head = hep->pcii_next;
553 kmem_free(hep, sizeof (*hep));
554 break;
558 mutex_exit(&hbp->pciib_lock);
563 * =======================================================
564 * | Native methods |
565 * | -------------- |
566 * | |
567 * | These are used when we are running native on bare- |
568 * | metal, or simply don't know any better. |
569 * ---------------------------------------------------------
572 #define HDLPRIV(hdl) ((cpu_t *)(hdl)->cmih_hdlpriv)
574 static uint_t
575 ntv_vendor(cmi_hdl_impl_t *hdl)
577 return (cpuid_getvendor(HDLPRIV(hdl)));
580 static const char *
581 ntv_vendorstr(cmi_hdl_impl_t *hdl)
583 return (cpuid_getvendorstr(HDLPRIV(hdl)));
586 static uint_t
587 ntv_family(cmi_hdl_impl_t *hdl)
589 return (cpuid_getfamily(HDLPRIV(hdl)));
592 static uint_t
593 ntv_model(cmi_hdl_impl_t *hdl)
595 return (cpuid_getmodel(HDLPRIV(hdl)));
598 static uint_t
599 ntv_stepping(cmi_hdl_impl_t *hdl)
601 return (cpuid_getstep(HDLPRIV(hdl)));
604 static uint_t
605 ntv_chipid(cmi_hdl_impl_t *hdl)
607 return (hdl->cmih_chipid);
611 static uint_t
612 ntv_procnodeid(cmi_hdl_impl_t *hdl)
614 return (hdl->cmih_procnodeid);
617 static uint_t
618 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
620 return (hdl->cmih_procnodes_per_pkg);
623 static uint_t
624 ntv_coreid(cmi_hdl_impl_t *hdl)
626 return (hdl->cmih_coreid);
629 static uint_t
630 ntv_strandid(cmi_hdl_impl_t *hdl)
632 return (hdl->cmih_strandid);
635 static uint_t
636 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
638 return (cpuid_get_apicid(HDLPRIV(hdl)));
641 static uint16_t
642 ntv_smbiosid(cmi_hdl_impl_t *hdl)
644 return (hdl->cmih_smbiosid);
647 static uint_t
648 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
650 return (hdl->cmih_smb_chipid);
653 static nvlist_t *
654 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
656 return (hdl->cmih_smb_bboard);
659 static uint32_t
660 ntv_chiprev(cmi_hdl_impl_t *hdl)
662 return (cpuid_getchiprev(HDLPRIV(hdl)));
665 static const char *
666 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
668 return (cpuid_getchiprevstr(HDLPRIV(hdl)));
671 static uint32_t
672 ntv_getsockettype(cmi_hdl_impl_t *hdl)
674 return (cpuid_getsockettype(HDLPRIV(hdl)));
677 static const char *
678 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
680 return (cpuid_getsocketstr(HDLPRIV(hdl)));
683 static id_t
684 ntv_logical_id(cmi_hdl_impl_t *hdl)
686 return (HDLPRIV(hdl)->cpu_id);
689 /*ARGSUSED*/
690 static int
691 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
693 ulong_t *dest = (ulong_t *)arg1;
694 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
696 *dest = getcr4();
697 *rcp = CMI_SUCCESS;
699 return (0);
702 static ulong_t
703 ntv_getcr4(cmi_hdl_impl_t *hdl)
705 cpu_t *cp = HDLPRIV(hdl);
706 ulong_t val;
708 (void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val,
709 (uintptr_t)NULL);
711 return (val);
714 /*ARGSUSED*/
715 static int
716 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
718 ulong_t val = (ulong_t)arg1;
719 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
721 setcr4(val);
722 *rcp = CMI_SUCCESS;
724 return (0);
727 static void
728 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
730 cpu_t *cp = HDLPRIV(hdl);
732 (void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val,
733 (uintptr_t)NULL);
736 volatile uint32_t cmi_trapped_rdmsr;
738 /*ARGSUSED*/
739 static int
740 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
742 uint_t msr = (uint_t)arg1;
743 uint64_t *valp = (uint64_t *)arg2;
744 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
746 on_trap_data_t otd;
748 if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
749 if (checked_rdmsr(msr, valp) == 0)
750 *rcp = CMI_SUCCESS;
751 else
752 *rcp = CMIERR_NOTSUP;
753 } else {
754 *rcp = CMIERR_MSRGPF;
755 atomic_inc_32(&cmi_trapped_rdmsr);
757 no_trap();
759 return (0);
762 static cmi_errno_t
763 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
765 cpu_t *cp = HDLPRIV(hdl);
767 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
768 return (CMIERR_INTERPOSE);
770 return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
771 (xc_arg_t)msr, (xc_arg_t)valp));
774 volatile uint32_t cmi_trapped_wrmsr;
776 /*ARGSUSED*/
777 static int
778 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
780 uint_t msr = (uint_t)arg1;
781 uint64_t val = *((uint64_t *)arg2);
782 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
783 on_trap_data_t otd;
785 if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
786 if (checked_wrmsr(msr, val) == 0)
787 *rcp = CMI_SUCCESS;
788 else
789 *rcp = CMIERR_NOTSUP;
790 } else {
791 *rcp = CMIERR_MSRGPF;
792 atomic_inc_32(&cmi_trapped_wrmsr);
794 no_trap();
796 return (0);
800 static cmi_errno_t
801 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
803 cpu_t *cp = HDLPRIV(hdl);
805 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
806 return (CMI_SUCCESS);
808 return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
809 (xc_arg_t)msr, (xc_arg_t)&val));
812 static cmi_errno_t
813 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
815 msri_addent(hdl, msr, val);
816 return (CMI_SUCCESS);
819 /*ARGSUSED*/
820 static int
821 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
823 cmi_errno_t *rcp = (cmi_errno_t *)arg3;
824 int int_no = (int)arg1;
826 if (int_no == T_MCE)
827 int18();
828 else
829 int_cmci();
830 *rcp = CMI_SUCCESS;
832 return (0);
835 static void
836 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
838 cpu_t *cp = HDLPRIV(hdl);
840 (void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no,
841 (uintptr_t)NULL);
844 static int
845 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
847 int rc;
848 processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
850 while (mutex_tryenter(&cpu_lock) == 0) {
851 if (hdl->cmih_flags & CMIH_F_DEAD)
852 return (EBUSY);
853 delay(1);
855 rc = p_online_internal_locked(cpuid, new_status, old_status);
856 mutex_exit(&cpu_lock);
858 return (rc);
862 /*ARGSUSED*/
863 static void *
864 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
865 uint_t strandid)
868 cpu_t *cp, *startcp;
870 kpreempt_disable();
871 cp = startcp = CPU;
872 do {
873 if (cmi_ntv_hwchipid(cp) == chipid &&
874 cmi_ntv_hwcoreid(cp) == coreid &&
875 cmi_ntv_hwstrandid(cp) == strandid) {
876 kpreempt_enable();
877 return ((void *)cp);
880 cp = cp->cpu_next;
881 } while (cp != startcp);
882 kpreempt_enable();
883 return (NULL);
886 static boolean_t
887 cpu_is_cmt(void *priv)
889 cpu_t *cp = (cpu_t *)priv;
891 int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
892 cpuid_get_ncore_per_chip(cp);
894 return (strands_per_core > 1);
898 * Find the handle entry of a given cpu identified by a <chip,core,strand>
899 * tuple.
901 static cmi_hdl_ent_t *
902 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
904 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
905 cmi_strand_nbits);
908 * Allocate per-chip table which contains a list of handle of
909 * all strands of the chip.
911 if (cmi_chip_tab[chipid] == NULL) {
912 size_t sz;
913 cmi_hdl_ent_t *pg;
915 sz = max_strands * sizeof (cmi_hdl_ent_t);
916 pg = kmem_zalloc(sz, KM_SLEEP);
918 /* test and set the per-chip table if it is not allocated */
919 if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
920 kmem_free(pg, sz); /* someone beats us */
923 return (cmi_chip_tab[chipid] +
924 ((((coreid) & CMI_MAX_COREID(cmi_core_nbits)) << cmi_strand_nbits) |
925 ((strandid) & CMI_MAX_STRANDID(cmi_strand_nbits))));
928 extern void cpuid_get_ext_topo(uint_t, uint_t *, uint_t *);
930 cmi_hdl_t
931 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
932 uint_t strandid)
934 cmi_hdl_impl_t *hdl;
935 void *priv;
936 cmi_hdl_ent_t *ent;
937 uint_t vendor;
939 ASSERT(class == CMI_HDL_NATIVE);
941 if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
942 return (NULL);
945 * Assume all chips in the system are the same type.
946 * For Intel, attempt to check if extended topology is available
947 * CPUID.EAX=0xB. If so, get the number of core and strand bits.
949 vendor = cpuid_getvendor((cpu_t *)priv);
950 if (vendor == X86_VENDOR_Intel && cmi_ext_topo_check == 0) {
951 cpuid_get_ext_topo(vendor, &cmi_core_nbits, &cmi_strand_nbits);
952 cmi_ext_topo_check = 1;
955 if (chipid > CMI_MAX_CHIPID ||
956 coreid > CMI_MAX_COREID(cmi_core_nbits) ||
957 strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
958 return (NULL);
960 hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
962 hdl->cmih_class = class;
963 HDLOPS(hdl) = &cmi_hdl_ops;
964 hdl->cmih_chipid = chipid;
965 hdl->cmih_coreid = coreid;
966 hdl->cmih_strandid = strandid;
967 hdl->cmih_mstrand = cpu_is_cmt(priv);
968 hdl->cmih_hdlpriv = priv;
969 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
970 CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
971 hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
972 hdl->cmih_procnodes_per_pkg =
973 cpuid_get_procnodes_per_pkg((cpu_t *)priv);
975 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
976 if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
978 * Somehow this (chipid, coreid, strandid) id tuple has
979 * already been assigned! This indicates that the
980 * callers logic in determining these values is busted,
981 * or perhaps undermined by bad BIOS setup. Complain,
982 * and refuse to initialize this tuple again as bad things
983 * will happen.
985 cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
986 "strandid %d handle already allocated!",
987 chipid, coreid, strandid);
988 kmem_free(hdl, sizeof (*hdl));
989 return (NULL);
993 * Once we store a nonzero reference count others can find this
994 * handle via cmi_hdl_lookup etc. This initial hold on the handle
995 * is to be dropped only if some other part of cmi initialization
996 * fails or, if it succeeds, at later cpu deconfigure. Note the
997 * the module private data we hold in cmih_cmi and cmih_cmidata
998 * is still NULL at this point (the caller will fill it with
999 * cmi_hdl_setcmi if it initializes) so consumers of handles
1000 * should always be ready for that possibility.
1002 ent->cmae_hdlp = hdl;
1003 hdl->cmih_refcntp = &ent->cmae_refcnt;
1004 ent->cmae_refcnt = 1;
1006 return ((cmi_hdl_t)hdl);
1009 void
1010 cmi_read_smbios(cmi_hdl_t ophdl)
1013 uint_t strand_apicid = UINT_MAX;
1014 uint_t chip_inst = UINT_MAX;
1015 uint16_t smb_id = USHRT_MAX;
1016 int rc = 0;
1018 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1020 /* set x86gentopo compatibility */
1021 fm_smb_fmacompat();
1023 strand_apicid = ntv_strand_apicid(hdl);
1025 if (!x86gentopo_legacy) {
1027 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1028 * topo reverts to legacy mode
1030 rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1031 if (rc == 0) {
1032 hdl->cmih_smb_chipid = chip_inst;
1033 hdl->cmih_smbiosid = smb_id;
1034 } else {
1035 #ifdef DEBUG
1036 cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1037 #endif /* DEBUG */
1038 return;
1041 hdl->cmih_smb_bboard = fm_smb_bboard(strand_apicid);
1042 #ifdef DEBUG
1043 if (hdl->cmih_smb_bboard == NULL)
1044 cmn_err(CE_NOTE,
1045 "!cmi reads smbios base boards info failed");
1046 #endif /* DEBUG */
1050 void
1051 cmi_hdl_hold(cmi_hdl_t ophdl)
1053 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1055 ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1057 atomic_inc_32(hdl->cmih_refcntp);
1060 static int
1061 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1063 volatile uint32_t *refcntp;
1064 uint32_t refcnt;
1066 refcntp = &ent->cmae_refcnt;
1067 refcnt = *refcntp;
1069 if (refcnt == 0) {
1071 * Associated object never existed, is being destroyed,
1072 * or has been destroyed.
1074 return (0);
1078 * We cannot use atomic increment here because once the reference
1079 * count reaches zero it must never be bumped up again.
1081 while (refcnt != 0) {
1082 if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1083 return (1);
1084 refcnt = *refcntp;
1088 * Somebody dropped the reference count to 0 after our initial
1089 * check.
1091 return (0);
1095 void
1096 cmi_hdl_rele(cmi_hdl_t ophdl)
1098 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1100 ASSERT(*hdl->cmih_refcntp > 0);
1101 atomic_dec_32(hdl->cmih_refcntp);
1104 void
1105 cmi_hdl_destroy(cmi_hdl_t ophdl)
1107 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1108 cmi_hdl_ent_t *ent;
1110 /* Release the reference count held by cmi_hdl_create(). */
1111 ASSERT(*hdl->cmih_refcntp > 0);
1112 atomic_dec_32(hdl->cmih_refcntp);
1113 hdl->cmih_flags |= CMIH_F_DEAD;
1115 ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1116 hdl->cmih_strandid);
1118 * Use busy polling instead of condition variable here because
1119 * cmi_hdl_rele() may be called from #MC handler.
1121 while (cmi_hdl_canref(ent)) {
1122 cmi_hdl_rele(ophdl);
1123 delay(1);
1125 ent->cmae_hdlp = NULL;
1127 kmem_free(hdl, sizeof (*hdl));
1130 void
1131 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1133 IMPLHDL(ophdl)->cmih_spec = arg;
1136 void *
1137 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1139 return (IMPLHDL(ophdl)->cmih_spec);
1142 void
1143 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1145 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1147 ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1148 hdl->cmih_mcops = mcops;
1149 hdl->cmih_mcdata = mcdata;
1152 const struct cmi_mc_ops *
1153 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1155 return (IMPLHDL(ophdl)->cmih_mcops);
1158 void *
1159 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1161 return (IMPLHDL(ophdl)->cmih_mcdata);
1164 cmi_hdl_t
1165 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1166 uint_t strandid)
1168 cmi_hdl_ent_t *ent;
1170 if (chipid > CMI_MAX_CHIPID ||
1171 coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1172 strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1173 return (NULL);
1175 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1177 if (class == CMI_HDL_NEUTRAL)
1178 class = CMI_HDL_NATIVE;
1180 if (!cmi_hdl_canref(ent))
1181 return (NULL);
1183 if (ent->cmae_hdlp->cmih_class != class) {
1184 cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1185 return (NULL);
1188 return ((cmi_hdl_t)ent->cmae_hdlp);
1191 cmi_hdl_t
1192 cmi_hdl_any(void)
1194 int i, j;
1195 cmi_hdl_ent_t *ent;
1196 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1197 cmi_strand_nbits);
1199 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1200 if (cmi_chip_tab[i] == NULL)
1201 continue;
1202 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1203 j++, ent++) {
1204 if (cmi_hdl_canref(ent))
1205 return ((cmi_hdl_t)ent->cmae_hdlp);
1209 return (NULL);
1212 void
1213 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1214 void *arg1, void *arg2, void *arg3)
1216 int i, j;
1217 cmi_hdl_ent_t *ent;
1218 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1219 cmi_strand_nbits);
1221 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1222 if (cmi_chip_tab[i] == NULL)
1223 continue;
1224 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1225 j++, ent++) {
1226 if (cmi_hdl_canref(ent)) {
1227 cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1228 if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1229 == CMI_HDL_WALK_DONE) {
1230 cmi_hdl_rele((cmi_hdl_t)hdl);
1231 return;
1233 cmi_hdl_rele((cmi_hdl_t)hdl);
1239 void
1240 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1242 IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1243 IMPLHDL(ophdl)->cmih_cmi = cmi;
1246 void *
1247 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1249 return (IMPLHDL(ophdl)->cmih_cmi);
1252 void *
1253 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1255 return (IMPLHDL(ophdl)->cmih_cmidata);
1258 enum cmi_hdl_class
1259 cmi_hdl_class(cmi_hdl_t ophdl)
1261 return (IMPLHDL(ophdl)->cmih_class);
1264 #define CMI_HDL_OPFUNC(what, type) \
1265 type \
1266 cmi_hdl_##what(cmi_hdl_t ophdl) \
1268 return (HDLOPS(IMPLHDL(ophdl))-> \
1269 cmio_##what(IMPLHDL(ophdl))); \
1272 /* BEGIN CSTYLED */
1273 CMI_HDL_OPFUNC(vendor, uint_t)
1274 CMI_HDL_OPFUNC(vendorstr, const char *)
1275 CMI_HDL_OPFUNC(family, uint_t)
1276 CMI_HDL_OPFUNC(model, uint_t)
1277 CMI_HDL_OPFUNC(stepping, uint_t)
1278 CMI_HDL_OPFUNC(chipid, uint_t)
1279 CMI_HDL_OPFUNC(procnodeid, uint_t)
1280 CMI_HDL_OPFUNC(coreid, uint_t)
1281 CMI_HDL_OPFUNC(strandid, uint_t)
1282 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1283 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1284 CMI_HDL_OPFUNC(chiprev, uint32_t)
1285 CMI_HDL_OPFUNC(chiprevstr, const char *)
1286 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1287 CMI_HDL_OPFUNC(getsocketstr, const char *)
1288 CMI_HDL_OPFUNC(logical_id, id_t)
1289 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1290 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1291 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1292 /* END CSTYLED */
1294 boolean_t
1295 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1297 return (IMPLHDL(ophdl)->cmih_mstrand);
1300 void
1301 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1303 if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1304 return;
1306 cmi_hdl_inj_begin(ophdl);
1307 HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1308 cmi_hdl_inj_end(NULL);
1312 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1314 return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1315 new_status, old_status));
1319 * Return hardware chip instance; cpuid_get_chipid provides this directly.
1321 uint_t
1322 cmi_ntv_hwchipid(cpu_t *cp)
1324 return (cpuid_get_chipid(cp));
1328 * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1330 uint_t
1331 cmi_ntv_hwprocnodeid(cpu_t *cp)
1333 return (cpuid_get_procnodeid(cp));
1337 * Return core instance within a single chip.
1339 uint_t
1340 cmi_ntv_hwcoreid(cpu_t *cp)
1342 return (cpuid_get_pkgcoreid(cp));
1346 * Return strand number within a single core. cpuid_get_clogid numbers
1347 * all execution units (strands, or cores in unstranded models) sequentially
1348 * within a single chip.
1350 uint_t
1351 cmi_ntv_hwstrandid(cpu_t *cp)
1353 int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1354 cpuid_get_ncore_per_chip(cp);
1356 return (cpuid_get_clogid(cp) % strands_per_core);
1359 static void
1360 cmi_ntv_hwdisable_mce_xc(void)
1362 ulong_t cr4;
1364 cr4 = getcr4();
1365 cr4 = cr4 & (~CR4_MCE);
1366 setcr4(cr4);
1369 void
1370 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)
1372 cpuset_t set;
1373 cmi_hdl_impl_t *thdl = IMPLHDL(hdl);
1374 cpu_t *cp = HDLPRIV(thdl);
1376 if (CPU->cpu_id == cp->cpu_id) {
1377 cmi_ntv_hwdisable_mce_xc();
1378 } else {
1379 CPUSET_ONLY(set, cp->cpu_id);
1380 xc_call((uintptr_t)NULL, (uintptr_t)NULL, (uintptr_t)NULL,
1381 CPUSET2BV(set), (xc_func_t)cmi_ntv_hwdisable_mce_xc);
1386 void
1387 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1389 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1391 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1394 void
1395 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1397 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1399 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1402 cmi_errno_t
1403 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1405 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1408 * Regardless of the handle class, we first check for am
1409 * interposed value. In the xVM case you probably want to
1410 * place interposed values within the hypervisor itself, but
1411 * we still allow interposing them in dom0 for test and bringup
1412 * purposes.
1414 if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1415 msri_lookup(hdl, msr, valp))
1416 return (CMI_SUCCESS);
1418 if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1419 return (CMIERR_NOTSUP);
1421 return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1424 cmi_errno_t
1425 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1427 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1429 /* Invalidate any interposed value */
1430 msri_rment(hdl, msr);
1432 if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1433 return (CMI_SUCCESS); /* pretend all is ok */
1435 return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1438 void
1439 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1441 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1442 ulong_t cr4;
1444 if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1445 HDLOPS(hdl)->cmio_setcr4 == NULL)
1446 return;
1448 cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1450 HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1453 void
1454 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1456 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1457 int i;
1459 if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1460 return;
1462 cmi_hdl_inj_begin(ophdl);
1464 for (i = 0; i < nregs; i++, regs++)
1465 HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1466 regs->cmr_msrval);
1468 cmi_hdl_inj_end(ophdl);
1471 /*ARGSUSED*/
1472 void
1473 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1478 void
1479 cmi_pcird_nohw(void)
1481 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1484 void
1485 cmi_pciwr_nohw(void)
1487 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1490 static uint32_t
1491 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1492 int *interpose, ddi_acc_handle_t hdl)
1494 uint32_t val;
1496 if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1497 pcii_lookup(bus, dev, func, reg, asz, &val)) {
1498 if (interpose)
1499 *interpose = 1;
1500 return (val);
1502 if (interpose)
1503 *interpose = 0;
1505 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1506 return (0);
1508 switch (asz) {
1509 case 1:
1510 if (hdl)
1511 val = pci_config_get8(hdl, (off_t)reg);
1512 else
1513 val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1514 reg);
1515 break;
1516 case 2:
1517 if (hdl)
1518 val = pci_config_get16(hdl, (off_t)reg);
1519 else
1520 val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1521 reg);
1522 break;
1523 case 4:
1524 if (hdl)
1525 val = pci_config_get32(hdl, (off_t)reg);
1526 else
1527 val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1528 reg);
1529 break;
1530 default:
1531 val = 0;
1533 return (val);
1536 uint8_t
1537 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1538 ddi_acc_handle_t hdl)
1540 return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1541 hdl));
1544 uint16_t
1545 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1546 ddi_acc_handle_t hdl)
1548 return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1549 hdl));
1552 uint32_t
1553 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1554 ddi_acc_handle_t hdl)
1556 return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1559 void
1560 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1562 pcii_addent(bus, dev, func, reg, val, 1);
1565 void
1566 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1568 pcii_addent(bus, dev, func, reg, val, 2);
1571 void
1572 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1574 pcii_addent(bus, dev, func, reg, val, 4);
1577 static void
1578 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1579 ddi_acc_handle_t hdl, uint32_t val)
1582 * If there is an interposed value for this register invalidate it.
1584 pcii_rment(bus, dev, func, reg, asz);
1586 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1587 return;
1589 switch (asz) {
1590 case 1:
1591 if (hdl)
1592 pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1593 else
1594 pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1595 (uint8_t)val);
1596 break;
1598 case 2:
1599 if (hdl)
1600 pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1601 else
1602 pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
1603 (uint16_t)val);
1604 break;
1606 case 4:
1607 if (hdl)
1608 pci_config_put32(hdl, (off_t)reg, val);
1609 else
1610 pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
1611 val);
1612 break;
1614 default:
1615 break;
1619 void
1620 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1621 uint8_t val)
1623 cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1626 void
1627 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1628 uint16_t val)
1630 cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1633 void
1634 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1635 uint32_t val)
1637 cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1640 static const struct cmi_hdl_ops cmi_hdl_ops = {
1643 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
1645 ntv_vendor, /* cmio_vendor */
1646 ntv_vendorstr, /* cmio_vendorstr */
1647 ntv_family, /* cmio_family */
1648 ntv_model, /* cmio_model */
1649 ntv_stepping, /* cmio_stepping */
1650 ntv_chipid, /* cmio_chipid */
1651 ntv_procnodeid, /* cmio_procnodeid */
1652 ntv_coreid, /* cmio_coreid */
1653 ntv_strandid, /* cmio_strandid */
1654 ntv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */
1655 ntv_strand_apicid, /* cmio_strand_apicid */
1656 ntv_chiprev, /* cmio_chiprev */
1657 ntv_chiprevstr, /* cmio_chiprevstr */
1658 ntv_getsockettype, /* cmio_getsockettype */
1659 ntv_getsocketstr, /* cmio_getsocketstr */
1660 ntv_logical_id, /* cmio_logical_id */
1661 ntv_getcr4, /* cmio_getcr4 */
1662 ntv_setcr4, /* cmio_setcr4 */
1663 ntv_rdmsr, /* cmio_rdmsr */
1664 ntv_wrmsr, /* cmio_wrmsr */
1665 ntv_msrinterpose, /* cmio_msrinterpose */
1666 ntv_int, /* cmio_int */
1667 ntv_online, /* cmio_online */
1668 ntv_smbiosid, /* cmio_smbiosid */
1669 ntv_smb_chipid, /* cmio_smb_chipid */
1670 ntv_smb_bboard /* cmio_smb_bboard */