Merge illumos-gate
[unleashed/lotheac.git] / usr / src / uts / i86pc / cpu / generic_cpu / gcpu_main.c
blob438abc15b4d094565ac0e474b9888c6eeadb6650
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
32 * Copyright (c) 2018, Joyent, Inc.
36 * Generic x86 CPU Module
38 * This CPU module is used for generic x86 CPUs when Solaris has no other
39 * CPU-specific support module available. Code in this module should be the
40 * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
43 #include <sys/types.h>
44 #include <sys/cpu_module_impl.h>
45 #include <sys/cpuvar.h>
46 #include <sys/kmem.h>
47 #include <sys/modctl.h>
48 #include <sys/pghw.h>
49 #include <sys/x86_archext.h>
51 #include "gcpu.h"
54 * Prevent generic cpu support from loading.
56 int gcpu_disable = 0;
58 #define GCPU_MAX_CHIPID 32
59 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
60 #ifdef DEBUG
61 int gcpu_id_disable = 0;
62 static const char *gcpu_id_override[GCPU_MAX_CHIPID] = { NULL };
63 #endif
66 * This should probably be delegated to a CPU specific module. However, as those
67 * haven't been developed as actively for recent CPUs, we should revisit this
68 * when we do have it and move this out of gcpu.
70 * This method is only supported on Intel Xeon platforms. It relies on a
71 * combination of the PPIN and the cpuid signature. Both are required to form
72 * the synthetic ID. This ID is preceded with iv0-INTC to represent that this is
73 * an Intel synthetic ID. The iv0 is the illumos version zero of the ID for
74 * Intel. If we have a new scheme for a new generation of processors, then that
75 * should rev the version field, otherwise for a given processor, this synthetic
76 * ID should not change. For more information on PPIN and these MSRS, see the
77 * relevant processor external design specification.
79 static char *
80 gcpu_init_ident_intc(cmi_hdl_t hdl)
82 uint64_t msr;
85 * This list should be extended as new Intel Xeon family processors come
86 * out.
88 switch (cmi_hdl_model(hdl)) {
89 case INTC_MODEL_IVYBRIDGE_XEON:
90 case INTC_MODEL_HASWELL_XEON:
91 case INTC_MODEL_BROADWELL_XEON:
92 case INTC_MODEL_BROADWELL_XEON_D:
93 case INTC_MODEL_SKYLAKE_XEON:
94 break;
95 default:
96 return (NULL);
99 if (cmi_hdl_rdmsr(hdl, MSR_PLATFORM_INFO, &msr) != CMI_SUCCESS) {
100 return (NULL);
103 if ((msr & MSR_PLATFORM_INFO_PPIN) == 0) {
104 return (NULL);
107 if (cmi_hdl_rdmsr(hdl, MSR_PPIN_CTL, &msr) != CMI_SUCCESS) {
108 return (NULL);
111 if ((msr & MSR_PPIN_CTL_ENABLED) == 0) {
112 if ((msr & MSR_PPIN_CTL_LOCKED) != 0) {
113 return (NULL);
116 if (cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_ENABLED) !=
117 CMI_SUCCESS) {
118 return (NULL);
122 if (cmi_hdl_rdmsr(hdl, MSR_PPIN, &msr) != CMI_SUCCESS) {
123 return (NULL);
127 * Now that we've read data, lock the PPIN. Don't worry about success or
128 * failure of this part, as we will have gotten everything that we need.
129 * It is possible that it locked open, for example.
131 (void) cmi_hdl_wrmsr(hdl, MSR_PPIN_CTL, MSR_PPIN_CTL_LOCKED);
133 return (kmem_asprintf("iv0-INTC-%x-%llx", cmi_hdl_chipsig(hdl), msr));
136 static void
137 gcpu_init_ident(cmi_hdl_t hdl, struct gcpu_chipshared *sp)
139 #ifdef DEBUG
140 uint_t chipid;
143 * On debug, allow a developer to override the string to more
144 * easily test CPU autoreplace without needing to physically
145 * replace a CPU.
147 if (gcpu_id_disable != 0) {
148 return;
151 chipid = cmi_hdl_chipid(hdl);
152 if (gcpu_id_override[chipid] != NULL) {
153 sp->gcpus_ident = strdup(gcpu_id_override[chipid]);
154 return;
156 #endif
158 switch (cmi_hdl_vendor(hdl)) {
159 case X86_VENDOR_Intel:
160 sp->gcpus_ident = gcpu_init_ident_intc(hdl);
161 default:
162 break;
167 * Our cmi_init entry point, called during startup of each cpu instance.
170 gcpu_init(cmi_hdl_t hdl, void **datap)
172 uint_t chipid = cmi_hdl_chipid(hdl);
173 struct gcpu_chipshared *sp, *osp;
174 gcpu_data_t *gcpu;
176 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
177 return (ENOTSUP);
180 * Allocate the state structure for this cpu. We will only
181 * allocate the bank logout areas in gcpu_mca_init once we
182 * know how many banks there are.
184 gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
185 cmi_hdl_hold(hdl); /* release in gcpu_fini */
186 gcpu->gcpu_hdl = hdl;
189 * Allocate a chipshared structure if no sibling cpu has already
190 * allocated it, but allow for the fact that a sibling core may
191 * be starting up in parallel.
193 if ((sp = gcpu_shared[chipid]) == NULL) {
194 sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
195 mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
196 mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
197 osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
198 if (osp != NULL) {
199 mutex_destroy(&sp->gcpus_cfglock);
200 mutex_destroy(&sp->gcpus_poll_lock);
201 kmem_free(sp, sizeof (struct gcpu_chipshared));
202 sp = osp;
203 } else {
204 gcpu_init_ident(hdl, sp);
208 atomic_inc_32(&sp->gcpus_actv_cnt);
209 gcpu->gcpu_shared = sp;
211 return (0);
215 * deconfigure gcpu_init()
217 void
218 gcpu_fini(cmi_hdl_t hdl)
220 uint_t chipid = cmi_hdl_chipid(hdl);
221 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
222 struct gcpu_chipshared *sp;
224 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
225 return;
227 gcpu_mca_fini(hdl);
230 * Keep shared data in cache for reuse.
232 sp = gcpu_shared[chipid];
233 ASSERT(sp != NULL);
234 atomic_dec_32(&sp->gcpus_actv_cnt);
236 if (gcpu != NULL)
237 kmem_free(gcpu, sizeof (gcpu_data_t));
239 /* Release reference count held in gcpu_init(). */
240 cmi_hdl_rele(hdl);
243 void
244 gcpu_post_startup(cmi_hdl_t hdl)
246 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
248 if (gcpu_disable)
249 return;
251 if (gcpu != NULL)
252 cms_post_startup(hdl);
255 void
256 gcpu_post_mpstartup(cmi_hdl_t hdl)
258 if (gcpu_disable)
259 return;
261 cms_post_mpstartup(hdl);
264 * All cpu handles are initialized only once all cpus are started, so we
265 * can begin polling post mp startup.
267 gcpu_mca_poll_start(hdl);
270 const char *
271 gcpu_ident(cmi_hdl_t hdl)
273 uint_t chipid;
274 struct gcpu_chipshared *sp;
276 if (gcpu_disable)
277 return (NULL);
279 chipid = cmi_hdl_chipid(hdl);
280 if (chipid >= GCPU_MAX_CHIPID)
281 return (NULL);
283 if (cmi_hdl_getcmidata(hdl) == NULL)
284 return (NULL);
286 sp = gcpu_shared[cmi_hdl_chipid(hdl)];
287 return (sp->gcpus_ident);
290 #define GCPU_OP(ntvop, xpvop) ntvop
292 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
294 const cmi_ops_t _cmi_ops = {
295 gcpu_init, /* cmi_init */
296 gcpu_post_startup, /* cmi_post_startup */
297 gcpu_post_mpstartup, /* cmi_post_mpstartup */
298 gcpu_faulted_enter, /* cmi_faulted_enter */
299 gcpu_faulted_exit, /* cmi_faulted_exit */
300 gcpu_mca_init, /* cmi_mca_init */
301 GCPU_OP(gcpu_mca_trap, NULL), /* cmi_mca_trap */
302 GCPU_OP(gcpu_cmci_trap, NULL), /* cmi_cmci_trap */
303 gcpu_msrinject, /* cmi_msrinject */
304 GCPU_OP(gcpu_hdl_poke, NULL), /* cmi_hdl_poke */
305 gcpu_fini, /* cmi_fini */
306 GCPU_OP(NULL, gcpu_xpv_panic_callback), /* cmi_panic_callback */
307 gcpu_ident /* cmi_ident */
310 static struct modlcpu modlcpu = {
311 &mod_cpuops,
312 "Generic x86 CPU Module"
315 static struct modlinkage modlinkage = {
316 MODREV_1,
317 (void *)&modlcpu,
318 NULL
322 _init(void)
324 return (mod_install(&modlinkage));
328 _info(struct modinfo *modinfop)
330 return (mod_info(&modlinkage, modinfop));
334 _fini(void)
336 return (mod_remove(&modlinkage));