4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
31 * Portions Copyright 2009 Advanced Micro Devices, Inc.
34 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
37 * Various routines to handle identification
38 * and classification of x86 processors.
41 #include <sys/types.h>
42 #include <sys/archsystm.h>
43 #include <sys/x86_archext.h>
45 #include <sys/systm.h>
46 #include <sys/cmn_err.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/cpuvar.h>
50 #include <sys/processor.h>
51 #include <sys/sysmacros.h>
54 #include <sys/controlregs.h>
55 #include <sys/bitmap.h>
56 #include <sys/auxv_386.h>
57 #include <sys/memnode.h>
58 #include <sys/pci_cfgspace.h>
61 #include <sys/hypervisor.h>
63 #include <sys/ontrap.h>
67 * Pass 0 of cpuid feature analysis happens in locore. It contains special code
68 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
69 * them accordingly. For most modern processors, feature detection occurs here
72 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
73 * for the boot CPU and does the basic analysis that the early kernel needs.
74 * x86_featureset is set based on the return value of cpuid_pass1() of the boot
79 * o Determining vendor/model/family/stepping and setting x86_type and
80 * x86_vendor accordingly.
81 * o Processing the feature flags returned by the cpuid instruction while
82 * applying any workarounds or tricks for the specific processor.
83 * o Mapping the feature flags into Solaris feature bits (X86_*).
84 * o Processing extended feature flags if supported by the processor,
85 * again while applying specific processor knowledge.
86 * o Determining the CMT characteristics of the system.
88 * Pass 1 is done on non-boot CPUs during their initialization and the results
89 * are used only as a meager attempt at ensuring that all processors within the
90 * system support the same features.
92 * Pass 2 of cpuid feature analysis happens just at the beginning
93 * of startup(). It just copies in and corrects the remainder
94 * of the cpuid data we depend on: standard cpuid functions that we didn't
95 * need for pass1 feature analysis, and extended cpuid functions beyond the
96 * simple feature processing done in pass1.
98 * Pass 3 of cpuid analysis is invoked after basic kernel services; in
99 * particular kernel memory allocation has been made available. It creates a
100 * readable brand string based on the data collected in the first two passes.
102 * Pass 4 of cpuid analysis is invoked after post_startup() when all
103 * the support infrastructure for various hardware features has been
104 * initialized. It determines which processor features will be reported
105 * to userland via the aux vector.
107 * All passes are executed on all CPUs, but only the boot CPU determines what
108 * features the kernel will use.
110 * Much of the worst junk in this file is for the support of processors
111 * that didn't really implement the cpuid instruction properly.
113 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
114 * the pass numbers. Accordingly, changes to the pass code may require changes
115 * to the accessor code.
118 uint_t x86_vendor
= X86_VENDOR_IntelClone
;
119 uint_t x86_type
= X86_TYPE_OTHER
;
120 uint_t x86_clflush_size
= 0;
122 uint_t pentiumpro_bug4046376
;
124 uchar_t x86_featureset
[BT_SIZEOFMAP(NUM_X86_FEATURES
)];
126 static char *x86_feature_names
[NUM_X86_FEATURES
] = {
170 is_x86_feature(void *featureset
, uint_t feature
)
172 ASSERT(feature
< NUM_X86_FEATURES
);
173 return (BT_TEST((ulong_t
*)featureset
, feature
));
177 add_x86_feature(void *featureset
, uint_t feature
)
179 ASSERT(feature
< NUM_X86_FEATURES
);
180 BT_SET((ulong_t
*)featureset
, feature
);
184 remove_x86_feature(void *featureset
, uint_t feature
)
186 ASSERT(feature
< NUM_X86_FEATURES
);
187 BT_CLEAR((ulong_t
*)featureset
, feature
);
191 compare_x86_featureset(void *setA
, void *setB
)
194 * We assume that the unused bits of the bitmap are always zero.
196 if (memcmp(setA
, setB
, BT_SIZEOFMAP(NUM_X86_FEATURES
)) == 0) {
204 print_x86_featureset(void *featureset
)
208 for (i
= 0; i
< NUM_X86_FEATURES
; i
++) {
209 if (is_x86_feature(featureset
, i
)) {
210 cmn_err(CE_CONT
, "?x86_feature: %s\n",
211 x86_feature_names
[i
]);
216 static size_t xsave_state_size
= 0;
217 uint64_t xsave_bv_all
= (XFEATURE_LEGACY_FP
| XFEATURE_SSE
);
218 boolean_t xsave_force_disable
= B_FALSE
;
221 * This is set to platform type we are running on.
223 static int platform_type
= -1;
227 * Variable to patch if hypervisor platform detection needs to be
228 * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
230 int enable_platform_detection
= 1;
234 * monitor/mwait info.
236 * size_actual and buf_actual are the real address and size allocated to get
237 * proper mwait_buf alignement. buf_actual and size_actual should be passed
238 * to kmem_free(). Currently kmem_alloc() and mwait happen to both use
239 * processor cache-line alignment, but this is not guarantied in the furture.
242 size_t mon_min
; /* min size to avoid missed wakeups */
243 size_t mon_max
; /* size to avoid false wakeups */
244 size_t size_actual
; /* size actually allocated */
245 void *buf_actual
; /* memory actually allocated */
246 uint32_t support
; /* processor support of monitor/mwait */
250 * xsave/xrestor info.
252 * This structure contains HW feature bits and size of the xsave save area.
253 * Note: the kernel will use the maximum size required for all hardware
254 * features. It is not optimize for potential memory savings if features at
255 * the end of the save area are not enabled.
258 uint32_t xsav_hw_features_low
; /* Supported HW features */
259 uint32_t xsav_hw_features_high
; /* Supported HW features */
260 size_t xsav_max_size
; /* max size save area for HW features */
261 size_t ymm_size
; /* AVX: size of ymm save area */
262 size_t ymm_offset
; /* AVX: offset for ymm save area */
267 * These constants determine how many of the elements of the
268 * cpuid we cache in the cpuid_info data structure; the
269 * remaining elements are accessible via the cpuid instruction.
272 #define NMAX_CPI_STD 6 /* eax = 0 .. 5 */
273 #define NMAX_CPI_EXTD 0x1f /* eax = 0x80000000 .. 0x8000001e */
276 * Some terminology needs to be explained:
277 * - Socket: Something that can be plugged into a motherboard.
278 * - Package: Same as socket
279 * - Chip: Same as socket. Note that AMD's documentation uses term "chip"
280 * differently: there, chip is the same as processor node (below)
281 * - Processor node: Some AMD processors have more than one
282 * "subprocessor" embedded in a package. These subprocessors (nodes)
283 * are fully-functional processors themselves with cores, caches,
284 * memory controllers, PCI configuration spaces. They are connected
285 * inside the package with Hypertransport links. On single-node
286 * processors, processor node is equivalent to chip/socket/package.
287 * - Compute Unit: Some AMD processors pair cores in "compute units" that
288 * share the FPU and the I$ and L2 caches.
292 uint_t cpi_pass
; /* last pass completed */
294 * standard function information
296 uint_t cpi_maxeax
; /* fn 0: %eax */
297 char cpi_vendorstr
[13]; /* fn 0: %ebx:%ecx:%edx */
298 uint_t cpi_vendor
; /* enum of cpi_vendorstr */
300 uint_t cpi_family
; /* fn 1: extended family */
301 uint_t cpi_model
; /* fn 1: extended model */
302 uint_t cpi_step
; /* fn 1: stepping */
303 chipid_t cpi_chipid
; /* fn 1: %ebx: Intel: chip # */
304 /* AMD: package/socket # */
305 uint_t cpi_brandid
; /* fn 1: %ebx: brand ID */
306 int cpi_clogid
; /* fn 1: %ebx: thread # */
307 uint_t cpi_ncpu_per_chip
; /* fn 1: %ebx: logical cpu count */
308 uint8_t cpi_cacheinfo
[16]; /* fn 2: intel-style cache desc */
309 uint_t cpi_ncache
; /* fn 2: number of elements */
310 uint_t cpi_ncpu_shr_last_cache
; /* fn 4: %eax: ncpus sharing cache */
311 id_t cpi_last_lvl_cacheid
; /* fn 4: %eax: derived cache id */
312 uint_t cpi_std_4_size
; /* fn 4: number of fn 4 elements */
313 struct cpuid_regs
**cpi_std_4
; /* fn 4: %ecx == 0 .. fn4_size */
314 struct cpuid_regs cpi_std
[NMAX_CPI_STD
]; /* 0 .. 5 */
316 * extended function information
318 uint_t cpi_xmaxeax
; /* fn 0x80000000: %eax */
319 char cpi_brandstr
[49]; /* fn 0x8000000[234] */
320 uint8_t cpi_pabits
; /* fn 0x80000006: %eax */
321 uint8_t cpi_vabits
; /* fn 0x80000006: %eax */
322 struct cpuid_regs cpi_extd
[NMAX_CPI_EXTD
]; /* 0x800000XX */
324 id_t cpi_coreid
; /* same coreid => strands share core */
325 int cpi_pkgcoreid
; /* core number within single package */
326 uint_t cpi_ncore_per_chip
; /* AMD: fn 0x80000008: %ecx[7-0] */
327 /* Intel: fn 4: %eax[31-26] */
329 * supported feature information
331 uint32_t cpi_support
[5];
332 #define STD_EDX_FEATURES 0
333 #define AMD_EDX_FEATURES 1
334 #define TM_EDX_FEATURES 2
335 #define STD_ECX_FEATURES 3
336 #define AMD_ECX_FEATURES 4
338 * Synthesized information, where known.
340 uint32_t cpi_chiprev
; /* See X86_CHIPREV_* in x86_archext.h */
341 const char *cpi_chiprevstr
; /* May be NULL if chiprev unknown */
342 uint32_t cpi_socket
; /* Chip package/socket type */
344 struct mwait_info cpi_mwait
; /* fn 5: monitor/mwait info */
346 uint_t cpi_procnodeid
; /* AMD: nodeID on HT, Intel: chipid */
347 uint_t cpi_procnodes_per_pkg
; /* AMD: # of nodes in the package */
349 uint_t cpi_compunitid
; /* AMD: ComputeUnit ID, Intel: coreid */
350 uint_t cpi_cores_per_compunit
; /* AMD: # of cores in the ComputeUnit */
352 struct xsave_info cpi_xsave
; /* fn D: xsave/xrestor info */
356 static struct cpuid_info cpuid_info0
;
359 * These bit fields are defined by the Intel Application Note AP-485
360 * "Intel Processor Identification and the CPUID Instruction"
362 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
363 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
364 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
365 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
366 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
367 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
369 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
370 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
371 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
372 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
374 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
375 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
376 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
377 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
379 #define CPI_MAXEAX_MAX 0x100 /* sanity control */
380 #define CPI_XMAXEAX_MAX 0x80000100
381 #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */
382 #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */
385 * Function 4 (Deterministic Cache Parameters) macros
386 * Defined by Intel Application Note AP-485
388 #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26)
389 #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14)
390 #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9)
391 #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8)
392 #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5)
393 #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0)
394 #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8)
396 #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22)
397 #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12)
398 #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0)
400 #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0)
402 #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0)
406 * A couple of shorthand macros to identify "later" P6-family chips
407 * like the Pentium M and Core. First, the "older" P6-based stuff
408 * (loosely defined as "pre-Pentium-4"):
409 * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
412 #define IS_LEGACY_P6(cpi) ( \
413 cpi->cpi_family == 6 && \
414 (cpi->cpi_model == 1 || \
415 cpi->cpi_model == 3 || \
416 cpi->cpi_model == 5 || \
417 cpi->cpi_model == 6 || \
418 cpi->cpi_model == 7 || \
419 cpi->cpi_model == 8 || \
420 cpi->cpi_model == 0xA || \
421 cpi->cpi_model == 0xB) \
424 /* A "new F6" is everything with family 6 that's not the above */
425 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
427 /* Extended family/model support */
428 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
429 cpi->cpi_family >= 0xf)
432 * Info for monitor/mwait idle loop.
434 * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
435 * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
437 * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
438 * Documentation Updates" #33633, Rev 2.05, December 2006.
440 #define MWAIT_SUPPORT (0x00000001) /* mwait supported */
441 #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */
442 #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */
443 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
444 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
445 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
446 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
447 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
449 * Number of sub-cstates for a given c-state.
451 #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \
452 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
455 * XSAVE leaf 0xD enumeration
457 #define CPUID_LEAFD_2_YMM_OFFSET 576
458 #define CPUID_LEAFD_2_YMM_SIZE 256
461 * Functions we consune from cpuid_subr.c; don't publish these in a header
462 * file to try and keep people using the expected cpuid_* interfaces.
464 extern uint32_t _cpuid_skt(uint_t
, uint_t
, uint_t
, uint_t
);
465 extern const char *_cpuid_sktstr(uint_t
, uint_t
, uint_t
, uint_t
);
466 extern uint32_t _cpuid_chiprev(uint_t
, uint_t
, uint_t
, uint_t
);
467 extern const char *_cpuid_chiprevstr(uint_t
, uint_t
, uint_t
, uint_t
);
468 extern uint_t
_cpuid_vendorstr_to_vendorcode(char *);
471 * Apply up various platform-dependent restrictions where the
472 * underlying platform restrictions mean the CPU can be marked
473 * as less capable than its cpuid instruction would imply.
477 platform_cpuid_mangle(uint_t vendor
, uint32_t eax
, struct cpuid_regs
*cp
)
481 uint32_t mcamask
= DOMAIN_IS_INITDOMAIN(xen_info
) ?
482 0 : CPUID_INTC_EDX_MCA
;
486 CPUID_INTC_EDX_VME
| CPUID_INTC_EDX_DE
|
487 CPUID_INTC_EDX_SEP
| CPUID_INTC_EDX_MTRR
|
488 CPUID_INTC_EDX_PGE
| CPUID_INTC_EDX_PAT
|
489 CPUID_AMD_EDX_SYSC
| CPUID_INTC_EDX_SEP
|
490 CPUID_INTC_EDX_PSE36
| CPUID_INTC_EDX_HTT
);
496 ~(CPUID_AMD_EDX_PSE
|
497 CPUID_INTC_EDX_VME
| CPUID_INTC_EDX_DE
|
498 CPUID_AMD_EDX_MTRR
| CPUID_AMD_EDX_PGE
|
499 CPUID_AMD_EDX_PAT
| CPUID_AMD_EDX_PSE36
|
500 CPUID_AMD_EDX_SYSC
| CPUID_INTC_EDX_SEP
|
502 cp
->cp_ecx
&= ~CPUID_AMD_ECX_CMP_LGCY
;
509 case X86_VENDOR_Intel
:
513 * Zero out the (ncores-per-chip - 1) field
515 cp
->cp_eax
&= 0x03fffffff;
525 cp
->cp_ecx
&= ~CPUID_AMD_ECX_CR8D
;
530 * Zero out the (ncores-per-chip - 1) field
532 cp
->cp_ecx
&= 0xffffff00;
543 #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */
547 * Some undocumented ways of patching the results of the cpuid
548 * instruction to permit running Solaris 10 on future cpus that
549 * we don't currently support. Could be set to non-zero values
550 * via settings in eeprom.
553 uint32_t cpuid_feature_ecx_include
;
554 uint32_t cpuid_feature_ecx_exclude
;
555 uint32_t cpuid_feature_edx_include
;
556 uint32_t cpuid_feature_edx_exclude
;
559 * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
562 cpuid_alloc_space(cpu_t
*cpu
)
565 * By convention, cpu0 is the boot cpu, which is set up
566 * before memory allocation is available. All other cpus get
567 * their cpuid_info struct allocated here.
569 ASSERT(cpu
->cpu_id
!= 0);
570 ASSERT(cpu
->cpu_m
.mcpu_cpi
== NULL
);
571 cpu
->cpu_m
.mcpu_cpi
=
572 kmem_zalloc(sizeof (*cpu
->cpu_m
.mcpu_cpi
), KM_SLEEP
);
576 cpuid_free_space(cpu_t
*cpu
)
578 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
582 ASSERT(cpi
!= &cpuid_info0
);
585 * Free up any function 4 related dynamic storage
587 for (i
= 1; i
< cpi
->cpi_std_4_size
; i
++)
588 kmem_free(cpi
->cpi_std_4
[i
], sizeof (struct cpuid_regs
));
589 if (cpi
->cpi_std_4_size
> 0)
590 kmem_free(cpi
->cpi_std_4
,
591 cpi
->cpi_std_4_size
* sizeof (struct cpuid_regs
*));
593 kmem_free(cpi
, sizeof (*cpi
));
594 cpu
->cpu_m
.mcpu_cpi
= NULL
;
599 * Determine the type of the underlying platform. This is used to customize
600 * initialization of various subsystems (e.g. TSC). determine_platform() must
601 * only ever be called once to prevent two processors from seeing different
602 * values of platform_type. Must be called before cpuid_pass1(), the earliest
603 * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
606 determine_platform(void)
608 struct cpuid_regs cp
;
611 char *hvstr
= (char *)regs
;
613 ASSERT(platform_type
== -1);
615 platform_type
= HW_NATIVE
;
617 if (!enable_platform_detection
)
621 * If Hypervisor CPUID bit is set, try to determine hypervisor
622 * vendor signature, and set platform type accordingly.
625 * http://lkml.org/lkml/2008/10/1/246
626 * http://kb.vmware.com/kb/1009458
629 (void) __cpuid_insn(&cp
);
630 if ((cp
.cp_ecx
& CPUID_INTC_ECX_HV
) != 0) {
631 cp
.cp_eax
= 0x40000000;
632 (void) __cpuid_insn(&cp
);
637 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0) {
638 platform_type
= HW_XEN_HVM
;
641 if (strcmp(hvstr
, HVSIG_VMWARE
) == 0) {
642 platform_type
= HW_VMWARE
;
645 if (strcmp(hvstr
, HVSIG_KVM
) == 0) {
646 platform_type
= HW_KVM
;
649 if (strcmp(hvstr
, HVSIG_MICROSOFT
) == 0)
650 platform_type
= HW_MICROSOFT
;
653 * Check older VMware hardware versions. VMware hypervisor is
654 * detected by performing an IN operation to VMware hypervisor
655 * port and checking that value returned in %ebx is VMware
656 * hypervisor magic value.
658 * References: http://kb.vmware.com/kb/1009458
660 vmware_port(VMWARE_HVCMD_GETVERSION
, regs
);
661 if (regs
[1] == VMWARE_HVMAGIC
) {
662 platform_type
= HW_VMWARE
;
668 * Check Xen hypervisor. In a fully virtualized domain,
669 * Xen's pseudo-cpuid function returns a string representing the
670 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
671 * supported cpuid function. We need at least a (base + 2) leaf value
672 * to do what we want to do. Try different base values, since the
673 * hypervisor might use a different one depending on whether Hyper-V
674 * emulation is switched on by default or not.
676 for (base
= 0x40000000; base
< 0x40010000; base
+= 0x100) {
678 (void) __cpuid_insn(&cp
);
683 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0 &&
684 cp
.cp_eax
>= (base
+ 2)) {
685 platform_type
&= ~HW_NATIVE
;
686 platform_type
|= HW_XEN_HVM
;
695 ASSERT(platform_type
!= -1);
696 return (platform_type
);
716 return (DOMAIN_IS_INITDOMAIN(xen_info
));
722 cpuid_intel_getids(cpu_t
*cpu
, void *feature
)
725 uint_t chipid_shift
= 0;
726 uint_t coreid_shift
= 0;
727 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
729 for (i
= 1; i
< cpi
->cpi_ncpu_per_chip
; i
<<= 1)
732 cpi
->cpi_chipid
= cpi
->cpi_apicid
>> chipid_shift
;
733 cpi
->cpi_clogid
= cpi
->cpi_apicid
& ((1 << chipid_shift
) - 1);
735 if (is_x86_feature(feature
, X86FSET_CMP
)) {
737 * Multi-core (and possibly multi-threaded)
740 uint_t ncpu_per_core
;
741 if (cpi
->cpi_ncore_per_chip
== 1)
742 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
;
743 else if (cpi
->cpi_ncore_per_chip
> 1)
744 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
/
745 cpi
->cpi_ncore_per_chip
;
747 * 8bit APIC IDs on dual core Pentiums
750 * +-----------------------+------+------+
751 * | Physical Package ID | MC | HT |
752 * +-----------------------+------+------+
753 * <------- chipid -------->
754 * <------- coreid --------------->
759 * Where the number of bits necessary to
760 * represent MC and HT fields together equals
761 * to the minimum number of bits necessary to
762 * store the value of cpi->cpi_ncpu_per_chip.
763 * Of those bits, the MC part uses the number
764 * of bits necessary to store the value of
765 * cpi->cpi_ncore_per_chip.
767 for (i
= 1; i
< ncpu_per_core
; i
<<= 1)
769 cpi
->cpi_coreid
= cpi
->cpi_apicid
>> coreid_shift
;
770 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
771 } else if (is_x86_feature(feature
, X86FSET_HTT
)) {
773 * Single-core multi-threaded processors.
775 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
776 cpi
->cpi_pkgcoreid
= 0;
778 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
779 cpi
->cpi_compunitid
= cpi
->cpi_coreid
;
783 cpuid_amd_getids(cpu_t
*cpu
)
785 int i
, first_half
, coreidsz
;
786 uint32_t nb_caps_reg
;
788 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
789 struct cpuid_regs
*cp
;
792 * AMD CMP chips currently have a single thread per core.
794 * Since no two cpus share a core we must assign a distinct coreid
795 * per cpu, and we do this by using the cpu_id. This scheme does not,
796 * however, guarantee that sibling cores of a chip will have sequential
797 * coreids starting at a multiple of the number of cores per chip -
798 * that is usually the case, but if the ACPI MADT table is presented
799 * in a different order then we need to perform a few more gymnastics
802 * All processors in the system have the same number of enabled
803 * cores. Cores within a processor are always numbered sequentially
804 * from 0 regardless of how many or which are disabled, and there
805 * is no way for operating system to discover the real core id when some
808 * In family 0x15, the cores come in pairs called compute units. They
809 * share I$ and L2 caches and the FPU. Enumeration of this feature is
810 * simplified by the new topology extensions CPUID leaf, indicated by
811 * the X86 feature X86FSET_TOPOEXT.
814 cpi
->cpi_coreid
= cpu
->cpu_id
;
815 cpi
->cpi_compunitid
= cpu
->cpu_id
;
817 if (cpi
->cpi_xmaxeax
>= 0x80000008) {
819 coreidsz
= BITX((cpi
)->cpi_extd
[8].cp_ecx
, 15, 12);
822 * In AMD parlance chip is really a node while Solaris
823 * sees chip as equivalent to socket/package.
825 cpi
->cpi_ncore_per_chip
=
826 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
828 /* Use legacy method */
829 for (i
= 1; i
< cpi
->cpi_ncore_per_chip
; i
<<= 1)
835 /* Assume single-core part */
836 cpi
->cpi_ncore_per_chip
= 1;
840 cpi
->cpi_clogid
= cpi
->cpi_pkgcoreid
=
841 cpi
->cpi_apicid
& ((1<<coreidsz
) - 1);
842 cpi
->cpi_ncpu_per_chip
= cpi
->cpi_ncore_per_chip
;
844 /* Get node ID, compute unit ID */
845 if (is_x86_feature(x86_featureset
, X86FSET_TOPOEXT
) &&
846 cpi
->cpi_xmaxeax
>= 0x8000001e) {
847 cp
= &cpi
->cpi_extd
[0x1e];
848 cp
->cp_eax
= 0x8000001e;
849 (void) __cpuid_insn(cp
);
851 cpi
->cpi_procnodes_per_pkg
= BITX(cp
->cp_ecx
, 10, 8) + 1;
852 cpi
->cpi_procnodeid
= BITX(cp
->cp_ecx
, 7, 0);
853 cpi
->cpi_cores_per_compunit
= BITX(cp
->cp_ebx
, 15, 8) + 1;
854 cpi
->cpi_compunitid
= BITX(cp
->cp_ebx
, 7, 0)
855 + (cpi
->cpi_ncore_per_chip
/ cpi
->cpi_cores_per_compunit
)
856 * (cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
);
857 } else if (cpi
->cpi_family
== 0xf || cpi
->cpi_family
>= 0x11) {
858 cpi
->cpi_procnodeid
= (cpi
->cpi_apicid
>> coreidsz
) & 7;
859 } else if (cpi
->cpi_family
== 0x10) {
861 * See if we are a multi-node processor.
862 * All processors in the system have the same number of nodes
864 nb_caps_reg
= pci_getl_func(0, 24, 3, 0xe8);
865 if ((cpi
->cpi_model
< 8) || BITX(nb_caps_reg
, 29, 29) == 0) {
867 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 5,
872 * Multi-node revision D (2 nodes per package
875 cpi
->cpi_procnodes_per_pkg
= 2;
877 first_half
= (cpi
->cpi_pkgcoreid
<=
878 (cpi
->cpi_ncore_per_chip
/2 - 1));
880 if (cpi
->cpi_apicid
== cpi
->cpi_pkgcoreid
) {
882 cpi
->cpi_procnodeid
= (first_half
? 0 : 1);
886 /* NodeId[2:1] bits to use for reading F3xe8 */
887 node2_1
= BITX(cpi
->cpi_apicid
, 5, 4) << 1;
890 pci_getl_func(0, 24 + node2_1
, 3, 0xe8);
893 * Check IntNodeNum bit (31:30, but bit 31 is
894 * always 0 on dual-node processors)
896 if (BITX(nb_caps_reg
, 30, 30) == 0)
897 cpi
->cpi_procnodeid
= node2_1
+
900 cpi
->cpi_procnodeid
= node2_1
+
905 cpi
->cpi_procnodeid
= 0;
909 cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
;
913 * Setup XFeature_Enabled_Mask register. Required by xsave feature.
918 uint64_t flags
= XFEATURE_LEGACY_FP
;
920 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
922 if (is_x86_feature(x86_featureset
, X86FSET_SSE
))
923 flags
|= XFEATURE_SSE
;
925 if (is_x86_feature(x86_featureset
, X86FSET_AVX
))
926 flags
|= XFEATURE_AVX
;
928 set_xcr(XFEATURE_ENABLED_MASK
, flags
);
930 xsave_bv_all
= flags
;
934 cpuid_pass1(cpu_t
*cpu
, uchar_t
*featureset
)
936 uint32_t mask_ecx
, mask_edx
;
937 struct cpuid_info
*cpi
;
938 struct cpuid_regs
*cp
;
941 extern int idle_cpu_prefer_mwait
;
945 * Space statically allocated for BSP, ensure pointer is set
947 if (cpu
->cpu_id
== 0) {
948 if (cpu
->cpu_m
.mcpu_cpi
== NULL
)
949 cpu
->cpu_m
.mcpu_cpi
= &cpuid_info0
;
952 add_x86_feature(featureset
, X86FSET_CPUID
);
954 cpi
= cpu
->cpu_m
.mcpu_cpi
;
956 cp
= &cpi
->cpi_std
[0];
958 cpi
->cpi_maxeax
= __cpuid_insn(cp
);
960 uint32_t *iptr
= (uint32_t *)cpi
->cpi_vendorstr
;
961 *iptr
++ = cp
->cp_ebx
;
962 *iptr
++ = cp
->cp_edx
;
963 *iptr
++ = cp
->cp_ecx
;
964 *(char *)&cpi
->cpi_vendorstr
[12] = '\0';
967 cpi
->cpi_vendor
= _cpuid_vendorstr_to_vendorcode(cpi
->cpi_vendorstr
);
968 x86_vendor
= cpi
->cpi_vendor
; /* for compatibility */
971 * Limit the range in case of weird hardware
973 if (cpi
->cpi_maxeax
> CPI_MAXEAX_MAX
)
974 cpi
->cpi_maxeax
= CPI_MAXEAX_MAX
;
975 if (cpi
->cpi_maxeax
< 1)
978 cp
= &cpi
->cpi_std
[1];
980 (void) __cpuid_insn(cp
);
983 * Extract identifying constants for easy access.
985 cpi
->cpi_model
= CPI_MODEL(cpi
);
986 cpi
->cpi_family
= CPI_FAMILY(cpi
);
988 if (cpi
->cpi_family
== 0xf)
989 cpi
->cpi_family
+= CPI_FAMILY_XTD(cpi
);
992 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
993 * Intel, and presumably everyone else, uses model == 0xf, as
994 * one would expect (max value means possible overflow). Sigh.
997 switch (cpi
->cpi_vendor
) {
998 case X86_VENDOR_Intel
:
999 if (IS_EXTENDED_MODEL_INTEL(cpi
))
1000 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1002 case X86_VENDOR_AMD
:
1003 if (CPI_FAMILY(cpi
) == 0xf)
1004 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1007 if (cpi
->cpi_model
== 0xf)
1008 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1012 cpi
->cpi_step
= CPI_STEP(cpi
);
1013 cpi
->cpi_brandid
= CPI_BRANDID(cpi
);
1016 * *default* assumptions:
1017 * - believe %edx feature word
1018 * - ignore %ecx feature word
1019 * - 32-bit virtual and physical addressing
1021 mask_edx
= 0xffffffff;
1024 cpi
->cpi_pabits
= cpi
->cpi_vabits
= 32;
1026 switch (cpi
->cpi_vendor
) {
1027 case X86_VENDOR_Intel
:
1028 if (cpi
->cpi_family
== 5)
1029 x86_type
= X86_TYPE_P5
;
1030 else if (IS_LEGACY_P6(cpi
)) {
1031 x86_type
= X86_TYPE_P6
;
1032 pentiumpro_bug4046376
= 1;
1034 * Clear the SEP bit when it was set erroneously
1036 if (cpi
->cpi_model
< 3 && cpi
->cpi_step
< 3)
1037 cp
->cp_edx
&= ~CPUID_INTC_EDX_SEP
;
1038 } else if (IS_NEW_F6(cpi
) || cpi
->cpi_family
== 0xf) {
1039 x86_type
= X86_TYPE_P4
;
1041 * We don't currently depend on any of the %ecx
1042 * features until Prescott, so we'll only check
1043 * this from P4 onwards. We might want to revisit
1046 mask_ecx
= 0xffffffff;
1047 } else if (cpi
->cpi_family
> 0xf)
1048 mask_ecx
= 0xffffffff;
1050 * We don't support MONITOR/MWAIT if leaf 5 is not available
1051 * to obtain the monitor linesize.
1053 if (cpi
->cpi_maxeax
< 5)
1054 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1056 case X86_VENDOR_IntelClone
:
1059 case X86_VENDOR_AMD
:
1060 #if defined(OPTERON_ERRATUM_108)
1061 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 0xe) {
1062 cp
->cp_eax
= (0xf0f & cp
->cp_eax
) | 0xc0;
1063 cpi
->cpi_model
= 0xc;
1066 if (cpi
->cpi_family
== 5) {
1070 * These CPUs have an incomplete implementation
1071 * of MCA/MCE which we mask away.
1073 mask_edx
&= ~(CPUID_INTC_EDX_MCE
| CPUID_INTC_EDX_MCA
);
1076 * Model 0 uses the wrong (APIC) bit
1077 * to indicate PGE. Fix it here.
1079 if (cpi
->cpi_model
== 0) {
1080 if (cp
->cp_edx
& 0x200) {
1081 cp
->cp_edx
&= ~0x200;
1082 cp
->cp_edx
|= CPUID_INTC_EDX_PGE
;
1087 * Early models had problems w/ MMX; disable.
1089 if (cpi
->cpi_model
< 6)
1090 mask_edx
&= ~CPUID_INTC_EDX_MMX
;
1094 * For newer families, SSE3 and CX16, at least, are valid;
1097 if (cpi
->cpi_family
>= 0xf)
1098 mask_ecx
= 0xffffffff;
1100 * We don't support MONITOR/MWAIT if leaf 5 is not available
1101 * to obtain the monitor linesize.
1103 if (cpi
->cpi_maxeax
< 5)
1104 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1108 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1109 * processors. AMD does not intend MWAIT to be used in the cpu
1110 * idle loop on current and future processors. 10h and future
1111 * AMD processors use more power in MWAIT than HLT.
1112 * Pre-family-10h Opterons do not have the MWAIT instruction.
1114 idle_cpu_prefer_mwait
= 0;
1120 * workaround the NT workaround in CMS 4.1
1122 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4 &&
1123 (cpi
->cpi_step
== 2 || cpi
->cpi_step
== 3))
1124 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1126 case X86_VENDOR_Centaur
:
1128 * workaround the NT workarounds again
1130 if (cpi
->cpi_family
== 6)
1131 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1133 case X86_VENDOR_Cyrix
:
1135 * We rely heavily on the probing in locore
1136 * to actually figure out what parts, if any,
1137 * of the Cyrix cpuid instruction to believe.
1140 case X86_TYPE_CYRIX_486
:
1143 case X86_TYPE_CYRIX_6x86
:
1146 case X86_TYPE_CYRIX_6x86L
:
1151 case X86_TYPE_CYRIX_6x86MX
:
1154 CPUID_INTC_EDX_MSR
|
1155 CPUID_INTC_EDX_CX8
|
1156 CPUID_INTC_EDX_PGE
|
1157 CPUID_INTC_EDX_CMOV
|
1160 case X86_TYPE_CYRIX_GXm
:
1162 CPUID_INTC_EDX_MSR
|
1163 CPUID_INTC_EDX_CX8
|
1164 CPUID_INTC_EDX_CMOV
|
1167 case X86_TYPE_CYRIX_MediaGX
:
1169 case X86_TYPE_CYRIX_MII
:
1170 case X86_TYPE_VIA_CYRIX_III
:
1173 CPUID_INTC_EDX_TSC
|
1174 CPUID_INTC_EDX_MSR
|
1175 CPUID_INTC_EDX_CX8
|
1176 CPUID_INTC_EDX_PGE
|
1177 CPUID_INTC_EDX_CMOV
|
1188 * Do not support MONITOR/MWAIT under a hypervisor
1190 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1192 * Do not support XSAVE under a hypervisor for now
1194 xsave_force_disable
= B_TRUE
;
1198 if (xsave_force_disable
) {
1199 mask_ecx
&= ~CPUID_INTC_ECX_XSAVE
;
1200 mask_ecx
&= ~CPUID_INTC_ECX_AVX
;
1201 mask_ecx
&= ~CPUID_INTC_ECX_F16C
;
1205 * Now we've figured out the masks that determine
1206 * which bits we choose to believe, apply the masks
1207 * to the feature words, then map the kernel's view
1208 * of these feature words into its feature word.
1210 cp
->cp_edx
&= mask_edx
;
1211 cp
->cp_ecx
&= mask_ecx
;
1214 * apply any platform restrictions (we don't call this
1215 * immediately after __cpuid_insn here, because we need the
1216 * workarounds applied above first)
1218 platform_cpuid_mangle(cpi
->cpi_vendor
, 1, cp
);
1221 * fold in overrides from the "eeprom" mechanism
1223 cp
->cp_edx
|= cpuid_feature_edx_include
;
1224 cp
->cp_edx
&= ~cpuid_feature_edx_exclude
;
1226 cp
->cp_ecx
|= cpuid_feature_ecx_include
;
1227 cp
->cp_ecx
&= ~cpuid_feature_ecx_exclude
;
1229 if (cp
->cp_edx
& CPUID_INTC_EDX_PSE
) {
1230 add_x86_feature(featureset
, X86FSET_LARGEPAGE
);
1232 if (cp
->cp_edx
& CPUID_INTC_EDX_TSC
) {
1233 add_x86_feature(featureset
, X86FSET_TSC
);
1235 if (cp
->cp_edx
& CPUID_INTC_EDX_MSR
) {
1236 add_x86_feature(featureset
, X86FSET_MSR
);
1238 if (cp
->cp_edx
& CPUID_INTC_EDX_MTRR
) {
1239 add_x86_feature(featureset
, X86FSET_MTRR
);
1241 if (cp
->cp_edx
& CPUID_INTC_EDX_PGE
) {
1242 add_x86_feature(featureset
, X86FSET_PGE
);
1244 if (cp
->cp_edx
& CPUID_INTC_EDX_CMOV
) {
1245 add_x86_feature(featureset
, X86FSET_CMOV
);
1247 if (cp
->cp_edx
& CPUID_INTC_EDX_MMX
) {
1248 add_x86_feature(featureset
, X86FSET_MMX
);
1250 if ((cp
->cp_edx
& CPUID_INTC_EDX_MCE
) != 0 &&
1251 (cp
->cp_edx
& CPUID_INTC_EDX_MCA
) != 0) {
1252 add_x86_feature(featureset
, X86FSET_MCA
);
1254 if (cp
->cp_edx
& CPUID_INTC_EDX_PAE
) {
1255 add_x86_feature(featureset
, X86FSET_PAE
);
1257 if (cp
->cp_edx
& CPUID_INTC_EDX_CX8
) {
1258 add_x86_feature(featureset
, X86FSET_CX8
);
1260 if (cp
->cp_ecx
& CPUID_INTC_ECX_CX16
) {
1261 add_x86_feature(featureset
, X86FSET_CX16
);
1263 if (cp
->cp_edx
& CPUID_INTC_EDX_PAT
) {
1264 add_x86_feature(featureset
, X86FSET_PAT
);
1266 if (cp
->cp_edx
& CPUID_INTC_EDX_SEP
) {
1267 add_x86_feature(featureset
, X86FSET_SEP
);
1269 if (cp
->cp_edx
& CPUID_INTC_EDX_FXSR
) {
1271 * In our implementation, fxsave/fxrstor
1272 * are prerequisites before we'll even
1273 * try and do SSE things.
1275 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE
) {
1276 add_x86_feature(featureset
, X86FSET_SSE
);
1278 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE2
) {
1279 add_x86_feature(featureset
, X86FSET_SSE2
);
1281 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE3
) {
1282 add_x86_feature(featureset
, X86FSET_SSE3
);
1284 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSSE3
) {
1285 add_x86_feature(featureset
, X86FSET_SSSE3
);
1287 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_1
) {
1288 add_x86_feature(featureset
, X86FSET_SSE4_1
);
1290 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_2
) {
1291 add_x86_feature(featureset
, X86FSET_SSE4_2
);
1293 if (cp
->cp_ecx
& CPUID_INTC_ECX_AES
) {
1294 add_x86_feature(featureset
, X86FSET_AES
);
1296 if (cp
->cp_ecx
& CPUID_INTC_ECX_PCLMULQDQ
) {
1297 add_x86_feature(featureset
, X86FSET_PCLMULQDQ
);
1300 if (cp
->cp_ecx
& CPUID_INTC_ECX_XSAVE
) {
1301 add_x86_feature(featureset
, X86FSET_XSAVE
);
1303 /* We only test AVX when there is XSAVE */
1304 if (cp
->cp_ecx
& CPUID_INTC_ECX_AVX
) {
1305 add_x86_feature(featureset
,
1308 if (cp
->cp_ecx
& CPUID_INTC_ECX_F16C
)
1309 add_x86_feature(featureset
,
1314 if (cp
->cp_edx
& CPUID_INTC_EDX_DE
) {
1315 add_x86_feature(featureset
, X86FSET_DE
);
1318 if (cp
->cp_ecx
& CPUID_INTC_ECX_MON
) {
1321 * We require the CLFLUSH instruction for erratum workaround
1322 * to use MONITOR/MWAIT.
1324 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1325 cpi
->cpi_mwait
.support
|= MWAIT_SUPPORT
;
1326 add_x86_feature(featureset
, X86FSET_MWAIT
);
1328 extern int idle_cpu_assert_cflush_monitor
;
1331 * All processors we are aware of which have
1332 * MONITOR/MWAIT also have CLFLUSH.
1334 if (idle_cpu_assert_cflush_monitor
) {
1335 ASSERT((cp
->cp_ecx
& CPUID_INTC_ECX_MON
) &&
1336 (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
));
1342 if (cp
->cp_ecx
& CPUID_INTC_ECX_VMX
) {
1343 add_x86_feature(featureset
, X86FSET_VMX
);
1346 if (cp
->cp_ecx
& CPUID_INTC_ECX_RDRAND
)
1347 add_x86_feature(featureset
, X86FSET_RDRAND
);
1350 * Only need it first time, rest of the cpus would follow suit.
1351 * we only capture this for the bootcpu.
1353 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1354 add_x86_feature(featureset
, X86FSET_CLFSH
);
1355 x86_clflush_size
= (BITX(cp
->cp_ebx
, 15, 8) * 8);
1357 if (is_x86_feature(featureset
, X86FSET_PAE
))
1358 cpi
->cpi_pabits
= 36;
1361 * Hyperthreading configuration is slightly tricky on Intel
1362 * and pure clones, and even trickier on AMD.
1364 * (AMD chose to set the HTT bit on their CMP processors,
1365 * even though they're not actually hyperthreaded. Thus it
1366 * takes a bit more work to figure out what's really going
1367 * on ... see the handling of the CMP_LGCY bit below)
1369 if (cp
->cp_edx
& CPUID_INTC_EDX_HTT
) {
1370 cpi
->cpi_ncpu_per_chip
= CPI_CPU_COUNT(cpi
);
1371 if (cpi
->cpi_ncpu_per_chip
> 1)
1372 add_x86_feature(featureset
, X86FSET_HTT
);
1374 cpi
->cpi_ncpu_per_chip
= 1;
1378 * Work on the "extended" feature information, doing
1379 * some basic initialization for cpuid_pass2()
1382 switch (cpi
->cpi_vendor
) {
1383 case X86_VENDOR_Intel
:
1384 if (IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf)
1387 case X86_VENDOR_AMD
:
1388 if (cpi
->cpi_family
> 5 ||
1389 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
1392 case X86_VENDOR_Cyrix
:
1394 * Only these Cyrix CPUs are -known- to support
1395 * extended cpuid operations.
1397 if (x86_type
== X86_TYPE_VIA_CYRIX_III
||
1398 x86_type
== X86_TYPE_CYRIX_GXm
)
1401 case X86_VENDOR_Centaur
:
1409 cp
= &cpi
->cpi_extd
[0];
1410 cp
->cp_eax
= 0x80000000;
1411 cpi
->cpi_xmaxeax
= __cpuid_insn(cp
);
1414 if (cpi
->cpi_xmaxeax
& 0x80000000) {
1416 if (cpi
->cpi_xmaxeax
> CPI_XMAXEAX_MAX
)
1417 cpi
->cpi_xmaxeax
= CPI_XMAXEAX_MAX
;
1419 switch (cpi
->cpi_vendor
) {
1420 case X86_VENDOR_Intel
:
1421 case X86_VENDOR_AMD
:
1422 if (cpi
->cpi_xmaxeax
< 0x80000001)
1424 cp
= &cpi
->cpi_extd
[1];
1425 cp
->cp_eax
= 0x80000001;
1426 (void) __cpuid_insn(cp
);
1428 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1429 cpi
->cpi_family
== 5 &&
1430 cpi
->cpi_model
== 6 &&
1431 cpi
->cpi_step
== 6) {
1433 * K6 model 6 uses bit 10 to indicate SYSC
1434 * Later models use bit 11. Fix it here.
1436 if (cp
->cp_edx
& 0x400) {
1437 cp
->cp_edx
&= ~0x400;
1438 cp
->cp_edx
|= CPUID_AMD_EDX_SYSC
;
1442 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000001, cp
);
1445 * Compute the additions to the kernel's feature word.
1447 if (cp
->cp_edx
& CPUID_AMD_EDX_NX
) {
1448 add_x86_feature(featureset
, X86FSET_NX
);
1452 * Regardless whether or not we boot 64-bit,
1453 * we should have a way to identify whether
1454 * the CPU is capable of running 64-bit.
1456 if (cp
->cp_edx
& CPUID_AMD_EDX_LM
) {
1457 add_x86_feature(featureset
, X86FSET_64
);
1460 #if defined(__amd64)
1461 /* 1 GB large page - enable only for 64 bit kernel */
1462 if (cp
->cp_edx
& CPUID_AMD_EDX_1GPG
) {
1463 add_x86_feature(featureset
, X86FSET_1GPG
);
1467 if ((cpi
->cpi_vendor
== X86_VENDOR_AMD
) &&
1468 (cpi
->cpi_std
[1].cp_edx
& CPUID_INTC_EDX_FXSR
) &&
1469 (cp
->cp_ecx
& CPUID_AMD_ECX_SSE4A
)) {
1470 add_x86_feature(featureset
, X86FSET_SSE4A
);
1474 * If both the HTT and CMP_LGCY bits are set,
1475 * then we're not actually HyperThreaded. Read
1476 * "AMD CPUID Specification" for more details.
1478 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1479 is_x86_feature(featureset
, X86FSET_HTT
) &&
1480 (cp
->cp_ecx
& CPUID_AMD_ECX_CMP_LGCY
)) {
1481 remove_x86_feature(featureset
, X86FSET_HTT
);
1482 add_x86_feature(featureset
, X86FSET_CMP
);
1484 #if defined(__amd64)
1486 * It's really tricky to support syscall/sysret in
1487 * the i386 kernel; we rely on sysenter/sysexit
1488 * instead. In the amd64 kernel, things are -way-
1491 if (cp
->cp_edx
& CPUID_AMD_EDX_SYSC
) {
1492 add_x86_feature(featureset
, X86FSET_ASYSC
);
1496 * While we're thinking about system calls, note
1497 * that AMD processors don't support sysenter
1498 * in long mode at all, so don't try to program them.
1500 if (x86_vendor
== X86_VENDOR_AMD
) {
1501 remove_x86_feature(featureset
, X86FSET_SEP
);
1504 if (cp
->cp_edx
& CPUID_AMD_EDX_TSCP
) {
1505 add_x86_feature(featureset
, X86FSET_TSCP
);
1508 if (cp
->cp_ecx
& CPUID_AMD_ECX_SVM
) {
1509 add_x86_feature(featureset
, X86FSET_SVM
);
1512 if (cp
->cp_ecx
& CPUID_AMD_ECX_TOPOEXT
) {
1513 add_x86_feature(featureset
, X86FSET_TOPOEXT
);
1521 * Get CPUID data about processor cores and hyperthreads.
1523 switch (cpi
->cpi_vendor
) {
1524 case X86_VENDOR_Intel
:
1525 if (cpi
->cpi_maxeax
>= 4) {
1526 cp
= &cpi
->cpi_std
[4];
1529 (void) __cpuid_insn(cp
);
1530 platform_cpuid_mangle(cpi
->cpi_vendor
, 4, cp
);
1533 case X86_VENDOR_AMD
:
1534 if (cpi
->cpi_xmaxeax
< 0x80000008)
1536 cp
= &cpi
->cpi_extd
[8];
1537 cp
->cp_eax
= 0x80000008;
1538 (void) __cpuid_insn(cp
);
1539 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000008, cp
);
1542 * Virtual and physical address limits from
1543 * cpuid override previously guessed values.
1545 cpi
->cpi_pabits
= BITX(cp
->cp_eax
, 7, 0);
1546 cpi
->cpi_vabits
= BITX(cp
->cp_eax
, 15, 8);
1553 * Derive the number of cores per chip
1555 switch (cpi
->cpi_vendor
) {
1556 case X86_VENDOR_Intel
:
1557 if (cpi
->cpi_maxeax
< 4) {
1558 cpi
->cpi_ncore_per_chip
= 1;
1561 cpi
->cpi_ncore_per_chip
=
1562 BITX((cpi
)->cpi_std
[4].cp_eax
, 31, 26) + 1;
1565 case X86_VENDOR_AMD
:
1566 if (cpi
->cpi_xmaxeax
< 0x80000008) {
1567 cpi
->cpi_ncore_per_chip
= 1;
1571 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1572 * 1 less than the number of physical cores on
1573 * the chip. In family 0x10 this value can
1574 * be affected by "downcoring" - it reflects
1575 * 1 less than the number of cores actually
1576 * enabled on this node.
1578 cpi
->cpi_ncore_per_chip
=
1579 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
1583 cpi
->cpi_ncore_per_chip
= 1;
1588 * Get CPUID data about TSC Invariance in Deep C-State.
1590 switch (cpi
->cpi_vendor
) {
1591 case X86_VENDOR_Intel
:
1592 if (cpi
->cpi_maxeax
>= 7) {
1593 cp
= &cpi
->cpi_extd
[7];
1594 cp
->cp_eax
= 0x80000007;
1596 (void) __cpuid_insn(cp
);
1603 cpi
->cpi_ncore_per_chip
= 1;
1607 * If more than one core, then this processor is CMP.
1609 if (cpi
->cpi_ncore_per_chip
> 1) {
1610 add_x86_feature(featureset
, X86FSET_CMP
);
1614 * If the number of cores is the same as the number
1615 * of CPUs, then we cannot have HyperThreading.
1617 if (cpi
->cpi_ncpu_per_chip
== cpi
->cpi_ncore_per_chip
) {
1618 remove_x86_feature(featureset
, X86FSET_HTT
);
1621 cpi
->cpi_apicid
= CPI_APIC_ID(cpi
);
1622 cpi
->cpi_procnodes_per_pkg
= 1;
1623 cpi
->cpi_cores_per_compunit
= 1;
1624 if (is_x86_feature(featureset
, X86FSET_HTT
) == B_FALSE
&&
1625 is_x86_feature(featureset
, X86FSET_CMP
) == B_FALSE
) {
1627 * Single-core single-threaded processors.
1629 cpi
->cpi_chipid
= -1;
1630 cpi
->cpi_clogid
= 0;
1631 cpi
->cpi_coreid
= cpu
->cpu_id
;
1632 cpi
->cpi_pkgcoreid
= 0;
1633 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1634 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 3, 0);
1636 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1637 } else if (cpi
->cpi_ncpu_per_chip
> 1) {
1638 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
)
1639 cpuid_intel_getids(cpu
, featureset
);
1640 else if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1641 cpuid_amd_getids(cpu
);
1644 * All other processors are currently
1645 * assumed to have single cores.
1647 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
1648 cpi
->cpi_pkgcoreid
= 0;
1649 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1650 cpi
->cpi_compunitid
= cpi
->cpi_chipid
;
1655 * Synthesize chip "revision" and socket type
1657 cpi
->cpi_chiprev
= _cpuid_chiprev(cpi
->cpi_vendor
, cpi
->cpi_family
,
1658 cpi
->cpi_model
, cpi
->cpi_step
);
1659 cpi
->cpi_chiprevstr
= _cpuid_chiprevstr(cpi
->cpi_vendor
,
1660 cpi
->cpi_family
, cpi
->cpi_model
, cpi
->cpi_step
);
1661 cpi
->cpi_socket
= _cpuid_skt(cpi
->cpi_vendor
, cpi
->cpi_family
,
1662 cpi
->cpi_model
, cpi
->cpi_step
);
1669 * Make copies of the cpuid table entries we depend on, in
1670 * part for ease of parsing now, in part so that we have only
1671 * one place to correct any of it, in part for ease of
1672 * later export to userland, and in part so we can look at
1673 * this stuff in a crash dump.
1678 cpuid_pass2(cpu_t
*cpu
)
1682 struct cpuid_regs
*cp
;
1685 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
1687 ASSERT(cpi
->cpi_pass
== 1);
1689 if (cpi
->cpi_maxeax
< 1)
1692 if ((nmax
= cpi
->cpi_maxeax
+ 1) > NMAX_CPI_STD
)
1693 nmax
= NMAX_CPI_STD
;
1695 * (We already handled n == 0 and n == 1 in pass 1)
1697 for (n
= 2, cp
= &cpi
->cpi_std
[2]; n
< nmax
; n
++, cp
++) {
1701 * CPUID function 4 expects %ecx to be initialized
1702 * with an index which indicates which cache to return
1703 * information about. The OS is expected to call function 4
1704 * with %ecx set to 0, 1, 2, ... until it returns with
1705 * EAX[4:0] set to 0, which indicates there are no more
1708 * Here, populate cpi_std[4] with the information returned by
1709 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1710 * when dynamic memory allocation becomes available.
1712 * Note: we need to explicitly initialize %ecx here, since
1713 * function 4 may have been previously invoked.
1718 (void) __cpuid_insn(cp
);
1719 platform_cpuid_mangle(cpi
->cpi_vendor
, n
, cp
);
1723 * "the lower 8 bits of the %eax register
1724 * contain a value that identifies the number
1725 * of times the cpuid [instruction] has to be
1726 * executed to obtain a complete image of the
1727 * processor's caching systems."
1729 * How *do* they make this stuff up?
1731 cpi
->cpi_ncache
= sizeof (*cp
) *
1732 BITX(cp
->cp_eax
, 7, 0);
1733 if (cpi
->cpi_ncache
== 0)
1735 cpi
->cpi_ncache
--; /* skip count byte */
1738 * Well, for now, rather than attempt to implement
1739 * this slightly dubious algorithm, we just look
1740 * at the first 15 ..
1742 if (cpi
->cpi_ncache
> (sizeof (*cp
) - 1))
1743 cpi
->cpi_ncache
= sizeof (*cp
) - 1;
1745 dp
= cpi
->cpi_cacheinfo
;
1746 if (BITX(cp
->cp_eax
, 31, 31) == 0) {
1747 uint8_t *p
= (void *)&cp
->cp_eax
;
1748 for (i
= 1; i
< 4; i
++)
1752 if (BITX(cp
->cp_ebx
, 31, 31) == 0) {
1753 uint8_t *p
= (void *)&cp
->cp_ebx
;
1754 for (i
= 0; i
< 4; i
++)
1758 if (BITX(cp
->cp_ecx
, 31, 31) == 0) {
1759 uint8_t *p
= (void *)&cp
->cp_ecx
;
1760 for (i
= 0; i
< 4; i
++)
1764 if (BITX(cp
->cp_edx
, 31, 31) == 0) {
1765 uint8_t *p
= (void *)&cp
->cp_edx
;
1766 for (i
= 0; i
< 4; i
++)
1772 case 3: /* Processor serial number, if PSN supported */
1775 case 4: /* Deterministic cache parameters */
1778 case 5: /* Monitor/Mwait parameters */
1783 * check cpi_mwait.support which was set in cpuid_pass1
1785 if (!(cpi
->cpi_mwait
.support
& MWAIT_SUPPORT
))
1789 * Protect ourself from insane mwait line size.
1790 * Workaround for incomplete hardware emulator(s).
1792 mwait_size
= (size_t)MWAIT_SIZE_MAX(cpi
);
1793 if (mwait_size
< sizeof (uint32_t) ||
1794 !ISP2(mwait_size
)) {
1796 cmn_err(CE_NOTE
, "Cannot handle cpu %d mwait "
1797 "size %ld", cpu
->cpu_id
, (long)mwait_size
);
1802 cpi
->cpi_mwait
.mon_min
= (size_t)MWAIT_SIZE_MIN(cpi
);
1803 cpi
->cpi_mwait
.mon_max
= mwait_size
;
1804 if (MWAIT_EXTENSION(cpi
)) {
1805 cpi
->cpi_mwait
.support
|= MWAIT_EXTENSIONS
;
1806 if (MWAIT_INT_ENABLE(cpi
))
1807 cpi
->cpi_mwait
.support
|=
1808 MWAIT_ECX_INT_ENABLE
;
1817 if (cpi
->cpi_maxeax
>= 0xB && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
1818 struct cpuid_regs regs
;
1822 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
1824 (void) __cpuid_insn(cp
);
1827 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1828 * indicates that the extended topology enumeration leaf is
1833 uint_t coreid_shift
= 0;
1834 uint_t ncpu_per_core
= 1;
1835 uint_t chipid_shift
= 0;
1836 uint_t ncpu_per_chip
= 1;
1840 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
1844 (void) __cpuid_insn(cp
);
1845 level
= CPI_CPU_LEVEL_TYPE(cp
);
1848 x2apic_id
= cp
->cp_edx
;
1849 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
1850 ncpu_per_core
= BITX(cp
->cp_ebx
, 15, 0);
1851 } else if (level
== 2) {
1852 x2apic_id
= cp
->cp_edx
;
1853 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
1854 ncpu_per_chip
= BITX(cp
->cp_ebx
, 15, 0);
1858 cpi
->cpi_apicid
= x2apic_id
;
1859 cpi
->cpi_ncpu_per_chip
= ncpu_per_chip
;
1860 cpi
->cpi_ncore_per_chip
= ncpu_per_chip
/
1862 cpi
->cpi_chipid
= x2apic_id
>> chipid_shift
;
1863 cpi
->cpi_clogid
= x2apic_id
& ((1 << chipid_shift
) - 1);
1864 cpi
->cpi_coreid
= x2apic_id
>> coreid_shift
;
1865 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
1868 /* Make cp NULL so that we don't stumble on others */
1875 if (cpi
->cpi_maxeax
>= 0xD) {
1876 struct cpuid_regs regs
;
1877 boolean_t cpuid_d_valid
= B_TRUE
;
1881 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
1883 (void) __cpuid_insn(cp
);
1886 * Sanity checks for debug
1888 if ((cp
->cp_eax
& XFEATURE_LEGACY_FP
) == 0 ||
1889 (cp
->cp_eax
& XFEATURE_SSE
) == 0) {
1890 cpuid_d_valid
= B_FALSE
;
1893 cpi
->cpi_xsave
.xsav_hw_features_low
= cp
->cp_eax
;
1894 cpi
->cpi_xsave
.xsav_hw_features_high
= cp
->cp_edx
;
1895 cpi
->cpi_xsave
.xsav_max_size
= cp
->cp_ecx
;
1898 * If the hw supports AVX, get the size and offset in the save
1899 * area for the ymm state.
1901 if (cpi
->cpi_xsave
.xsav_hw_features_low
& XFEATURE_AVX
) {
1904 cp
->cp_edx
= cp
->cp_ebx
= 0;
1906 (void) __cpuid_insn(cp
);
1908 if (cp
->cp_ebx
!= CPUID_LEAFD_2_YMM_OFFSET
||
1909 cp
->cp_eax
!= CPUID_LEAFD_2_YMM_SIZE
) {
1910 cpuid_d_valid
= B_FALSE
;
1913 cpi
->cpi_xsave
.ymm_size
= cp
->cp_eax
;
1914 cpi
->cpi_xsave
.ymm_offset
= cp
->cp_ebx
;
1917 if (is_x86_feature(x86_featureset
, X86FSET_XSAVE
)) {
1918 xsave_state_size
= 0;
1919 } else if (cpuid_d_valid
) {
1920 xsave_state_size
= cpi
->cpi_xsave
.xsav_max_size
;
1922 /* Broken CPUID 0xD, probably in HVM */
1923 cmn_err(CE_WARN
, "cpu%d: CPUID.0xD returns invalid "
1924 "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1925 ", ymm_size = %d, ymm_offset = %d\n",
1926 cpu
->cpu_id
, cpi
->cpi_xsave
.xsav_hw_features_low
,
1927 cpi
->cpi_xsave
.xsav_hw_features_high
,
1928 (int)cpi
->cpi_xsave
.xsav_max_size
,
1929 (int)cpi
->cpi_xsave
.ymm_size
,
1930 (int)cpi
->cpi_xsave
.ymm_offset
);
1932 if (xsave_state_size
!= 0) {
1934 * This must be a non-boot CPU. We cannot
1935 * continue, because boot cpu has already
1938 ASSERT(cpu
->cpu_id
!= 0);
1939 cmn_err(CE_PANIC
, "cpu%d: we have already "
1940 "enabled XSAVE on boot cpu, cannot "
1941 "continue.", cpu
->cpu_id
);
1944 * Must be from boot CPU, OK to disable XSAVE.
1946 ASSERT(cpu
->cpu_id
== 0);
1947 remove_x86_feature(x86_featureset
,
1949 remove_x86_feature(x86_featureset
, X86FSET_AVX
);
1950 CPI_FEATURES_ECX(cpi
) &= ~CPUID_INTC_ECX_XSAVE
;
1951 CPI_FEATURES_ECX(cpi
) &= ~CPUID_INTC_ECX_AVX
;
1952 CPI_FEATURES_ECX(cpi
) &= ~CPUID_INTC_ECX_F16C
;
1953 xsave_force_disable
= B_TRUE
;
1959 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0)
1962 if ((nmax
= cpi
->cpi_xmaxeax
- 0x80000000 + 1) > NMAX_CPI_EXTD
)
1963 nmax
= NMAX_CPI_EXTD
;
1965 * Copy the extended properties, fixing them as we go.
1966 * (We already handled n == 0 and n == 1 in pass 1)
1968 iptr
= (void *)cpi
->cpi_brandstr
;
1969 for (n
= 2, cp
= &cpi
->cpi_extd
[2]; n
< nmax
; cp
++, n
++) {
1970 cp
->cp_eax
= 0x80000000 + n
;
1971 (void) __cpuid_insn(cp
);
1972 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000000 + n
, cp
);
1978 * Extract the brand string
1980 *iptr
++ = cp
->cp_eax
;
1981 *iptr
++ = cp
->cp_ebx
;
1982 *iptr
++ = cp
->cp_ecx
;
1983 *iptr
++ = cp
->cp_edx
;
1986 switch (cpi
->cpi_vendor
) {
1987 case X86_VENDOR_AMD
:
1989 * The Athlon and Duron were the first
1990 * parts to report the sizes of the
1991 * TLB for large pages. Before then,
1992 * we don't trust the data.
1994 if (cpi
->cpi_family
< 6 ||
1995 (cpi
->cpi_family
== 6 &&
1996 cpi
->cpi_model
< 1))
2004 switch (cpi
->cpi_vendor
) {
2005 case X86_VENDOR_AMD
:
2007 * The Athlon and Duron were the first
2008 * AMD parts with L2 TLB's.
2009 * Before then, don't trust the data.
2011 if (cpi
->cpi_family
< 6 ||
2012 cpi
->cpi_family
== 6 &&
2014 cp
->cp_eax
= cp
->cp_ebx
= 0;
2016 * AMD Duron rev A0 reports L2
2017 * cache size incorrectly as 1K
2018 * when it is really 64K
2020 if (cpi
->cpi_family
== 6 &&
2021 cpi
->cpi_model
== 3 &&
2022 cpi
->cpi_step
== 0) {
2023 cp
->cp_ecx
&= 0xffff;
2024 cp
->cp_ecx
|= 0x400000;
2027 case X86_VENDOR_Cyrix
: /* VIA C3 */
2029 * VIA C3 processors are a bit messed
2030 * up w.r.t. encoding cache sizes in %ecx
2032 if (cpi
->cpi_family
!= 6)
2035 * model 7 and 8 were incorrectly encoded
2037 * xxx is model 8 really broken?
2039 if (cpi
->cpi_model
== 7 ||
2040 cpi
->cpi_model
== 8)
2042 BITX(cp
->cp_ecx
, 31, 24) << 16 |
2043 BITX(cp
->cp_ecx
, 23, 16) << 12 |
2044 BITX(cp
->cp_ecx
, 15, 8) << 8 |
2045 BITX(cp
->cp_ecx
, 7, 0);
2047 * model 9 stepping 1 has wrong associativity
2049 if (cpi
->cpi_model
== 9 && cpi
->cpi_step
== 1)
2050 cp
->cp_ecx
|= 8 << 12;
2052 case X86_VENDOR_Intel
:
2054 * Extended L2 Cache features function.
2055 * First appeared on Prescott.
2071 intel_cpubrand(const struct cpuid_info
*cpi
)
2075 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2076 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2079 switch (cpi
->cpi_family
) {
2081 return ("Intel Pentium(r)");
2083 switch (cpi
->cpi_model
) {
2084 uint_t celeron
, xeon
;
2085 const struct cpuid_regs
*cp
;
2089 return ("Intel Pentium(r) Pro");
2092 return ("Intel Pentium(r) II");
2094 return ("Intel Celeron(r)");
2098 cp
= &cpi
->cpi_std
[2]; /* cache info */
2100 for (i
= 1; i
< 4; i
++) {
2103 tmp
= (cp
->cp_eax
>> (8 * i
)) & 0xff;
2106 if (tmp
>= 0x44 && tmp
<= 0x45)
2110 for (i
= 0; i
< 2; i
++) {
2113 tmp
= (cp
->cp_ebx
>> (8 * i
)) & 0xff;
2116 else if (tmp
>= 0x44 && tmp
<= 0x45)
2120 for (i
= 0; i
< 4; i
++) {
2123 tmp
= (cp
->cp_ecx
>> (8 * i
)) & 0xff;
2126 else if (tmp
>= 0x44 && tmp
<= 0x45)
2130 for (i
= 0; i
< 4; i
++) {
2133 tmp
= (cp
->cp_edx
>> (8 * i
)) & 0xff;
2136 else if (tmp
>= 0x44 && tmp
<= 0x45)
2141 return ("Intel Celeron(r)");
2143 return (cpi
->cpi_model
== 5 ?
2144 "Intel Pentium(r) II Xeon(tm)" :
2145 "Intel Pentium(r) III Xeon(tm)");
2146 return (cpi
->cpi_model
== 5 ?
2147 "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2148 "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2156 /* BrandID is present if the field is nonzero */
2157 if (cpi
->cpi_brandid
!= 0) {
2158 static const struct {
2162 { 0x1, "Intel(r) Celeron(r)" },
2163 { 0x2, "Intel(r) Pentium(r) III" },
2164 { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" },
2165 { 0x4, "Intel(r) Pentium(r) III" },
2166 { 0x6, "Mobile Intel(r) Pentium(r) III" },
2167 { 0x7, "Mobile Intel(r) Celeron(r)" },
2168 { 0x8, "Intel(r) Pentium(r) 4" },
2169 { 0x9, "Intel(r) Pentium(r) 4" },
2170 { 0xa, "Intel(r) Celeron(r)" },
2171 { 0xb, "Intel(r) Xeon(tm)" },
2172 { 0xc, "Intel(r) Xeon(tm) MP" },
2173 { 0xe, "Mobile Intel(r) Pentium(r) 4" },
2174 { 0xf, "Mobile Intel(r) Celeron(r)" },
2175 { 0x11, "Mobile Genuine Intel(r)" },
2176 { 0x12, "Intel(r) Celeron(r) M" },
2177 { 0x13, "Mobile Intel(r) Celeron(r)" },
2178 { 0x14, "Intel(r) Celeron(r)" },
2179 { 0x15, "Mobile Genuine Intel(r)" },
2180 { 0x16, "Intel(r) Pentium(r) M" },
2181 { 0x17, "Mobile Intel(r) Celeron(r)" }
2183 uint_t btblmax
= sizeof (brand_tbl
) / sizeof (brand_tbl
[0]);
2186 sgn
= (cpi
->cpi_family
<< 8) |
2187 (cpi
->cpi_model
<< 4) | cpi
->cpi_step
;
2189 for (i
= 0; i
< btblmax
; i
++)
2190 if (brand_tbl
[i
].bt_bid
== cpi
->cpi_brandid
)
2193 if (sgn
== 0x6b1 && cpi
->cpi_brandid
== 3)
2194 return ("Intel(r) Celeron(r)");
2195 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xb)
2196 return ("Intel(r) Xeon(tm) MP");
2197 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xe)
2198 return ("Intel(r) Xeon(tm)");
2199 return (brand_tbl
[i
].bt_str
);
2207 amd_cpubrand(const struct cpuid_info
*cpi
)
2209 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2210 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2211 return ("i486 compatible");
2213 switch (cpi
->cpi_family
) {
2215 switch (cpi
->cpi_model
) {
2222 return ("AMD-K5(r)");
2225 return ("AMD-K6(r)");
2227 return ("AMD-K6(r)-2");
2229 return ("AMD-K6(r)-III");
2231 return ("AMD (family 5)");
2234 switch (cpi
->cpi_model
) {
2236 return ("AMD-K7(tm)");
2240 return ("AMD Athlon(tm)");
2243 return ("AMD Duron(tm)");
2248 * Use the L2 cache size to distinguish
2250 return ((cpi
->cpi_extd
[6].cp_ecx
>> 16) >= 256 ?
2251 "AMD Athlon(tm)" : "AMD Duron(tm)");
2253 return ("AMD (family 6)");
2259 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 5 &&
2260 cpi
->cpi_brandid
!= 0) {
2261 switch (BITX(cpi
->cpi_brandid
, 7, 5)) {
2263 return ("AMD Opteron(tm) UP 1xx");
2265 return ("AMD Opteron(tm) DP 2xx");
2267 return ("AMD Opteron(tm) MP 8xx");
2269 return ("AMD Opteron(tm)");
2277 cyrix_cpubrand(struct cpuid_info
*cpi
, uint_t type
)
2279 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2280 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5 ||
2281 type
== X86_TYPE_CYRIX_486
)
2282 return ("i486 compatible");
2285 case X86_TYPE_CYRIX_6x86
:
2286 return ("Cyrix 6x86");
2287 case X86_TYPE_CYRIX_6x86L
:
2288 return ("Cyrix 6x86L");
2289 case X86_TYPE_CYRIX_6x86MX
:
2290 return ("Cyrix 6x86MX");
2291 case X86_TYPE_CYRIX_GXm
:
2292 return ("Cyrix GXm");
2293 case X86_TYPE_CYRIX_MediaGX
:
2294 return ("Cyrix MediaGX");
2295 case X86_TYPE_CYRIX_MII
:
2296 return ("Cyrix M2");
2297 case X86_TYPE_VIA_CYRIX_III
:
2298 return ("VIA Cyrix M3");
2301 * Have another wild guess ..
2303 if (cpi
->cpi_family
== 4 && cpi
->cpi_model
== 9)
2304 return ("Cyrix 5x86");
2305 else if (cpi
->cpi_family
== 5) {
2306 switch (cpi
->cpi_model
) {
2308 return ("Cyrix 6x86"); /* Cyrix M1 */
2310 return ("Cyrix MediaGX");
2314 } else if (cpi
->cpi_family
== 6) {
2315 switch (cpi
->cpi_model
) {
2317 return ("Cyrix 6x86MX"); /* Cyrix M2? */
2334 * This only gets called in the case that the CPU extended
2335 * feature brand string (0x80000002, 0x80000003, 0x80000004)
2336 * aren't available, or contain null bytes for some reason.
2339 fabricate_brandstr(struct cpuid_info
*cpi
)
2341 const char *brand
= NULL
;
2343 switch (cpi
->cpi_vendor
) {
2344 case X86_VENDOR_Intel
:
2345 brand
= intel_cpubrand(cpi
);
2347 case X86_VENDOR_AMD
:
2348 brand
= amd_cpubrand(cpi
);
2350 case X86_VENDOR_Cyrix
:
2351 brand
= cyrix_cpubrand(cpi
, x86_type
);
2353 case X86_VENDOR_NexGen
:
2354 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2355 brand
= "NexGen Nx586";
2357 case X86_VENDOR_Centaur
:
2358 if (cpi
->cpi_family
== 5)
2359 switch (cpi
->cpi_model
) {
2361 brand
= "Centaur C6";
2364 brand
= "Centaur C2";
2367 brand
= "Centaur C3";
2373 case X86_VENDOR_Rise
:
2374 if (cpi
->cpi_family
== 5 &&
2375 (cpi
->cpi_model
== 0 || cpi
->cpi_model
== 2))
2378 case X86_VENDOR_SiS
:
2379 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2383 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4)
2384 brand
= "Transmeta Crusoe TM3x00 or TM5x00";
2386 case X86_VENDOR_NSC
:
2387 case X86_VENDOR_UMC
:
2392 (void) strcpy((char *)cpi
->cpi_brandstr
, brand
);
2397 * If all else fails ...
2399 (void) snprintf(cpi
->cpi_brandstr
, sizeof (cpi
->cpi_brandstr
),
2400 "%s %d.%d.%d", cpi
->cpi_vendorstr
, cpi
->cpi_family
,
2401 cpi
->cpi_model
, cpi
->cpi_step
);
2405 * This routine is called just after kernel memory allocation
2406 * becomes available on cpu0, and as part of mp_startup() on
2409 * Fixup the brand string, and collect any information from cpuid
2410 * that requires dynamically allocated storage to represent.
2414 cpuid_pass3(cpu_t
*cpu
)
2416 int i
, max
, shft
, level
, size
;
2417 struct cpuid_regs regs
;
2418 struct cpuid_regs
*cp
;
2419 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2421 ASSERT(cpi
->cpi_pass
== 2);
2424 * Function 4: Deterministic cache parameters
2426 * Take this opportunity to detect the number of threads
2427 * sharing the last level cache, and construct a corresponding
2428 * cache id. The respective cpuid_info members are initialized
2429 * to the default case of "no last level cache sharing".
2431 cpi
->cpi_ncpu_shr_last_cache
= 1;
2432 cpi
->cpi_last_lvl_cacheid
= cpu
->cpu_id
;
2434 if (cpi
->cpi_maxeax
>= 4 && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
2437 * Find the # of elements (size) returned by fn 4, and along
2438 * the way detect last level cache sharing details.
2440 bzero(®s
, sizeof (regs
));
2442 for (i
= 0, max
= 0; i
< CPI_FN4_ECX_MAX
; i
++) {
2446 (void) __cpuid_insn(cp
);
2448 if (CPI_CACHE_TYPE(cp
) == 0)
2450 level
= CPI_CACHE_LVL(cp
);
2453 cpi
->cpi_ncpu_shr_last_cache
=
2454 CPI_NTHR_SHR_CACHE(cp
) + 1;
2457 cpi
->cpi_std_4_size
= size
= i
;
2460 * Allocate the cpi_std_4 array. The first element
2461 * references the regs for fn 4, %ecx == 0, which
2462 * cpuid_pass2() stashed in cpi->cpi_std[4].
2466 kmem_alloc(size
* sizeof (cp
), KM_SLEEP
);
2467 cpi
->cpi_std_4
[0] = &cpi
->cpi_std
[4];
2470 * Allocate storage to hold the additional regs
2471 * for function 4, %ecx == 1 .. cpi_std_4_size.
2473 * The regs for fn 4, %ecx == 0 has already
2474 * been allocated as indicated above.
2476 for (i
= 1; i
< size
; i
++) {
2477 cp
= cpi
->cpi_std_4
[i
] =
2478 kmem_zalloc(sizeof (regs
), KM_SLEEP
);
2482 (void) __cpuid_insn(cp
);
2486 * Determine the number of bits needed to represent
2487 * the number of CPUs sharing the last level cache.
2489 * Shift off that number of bits from the APIC id to
2490 * derive the cache id.
2493 for (i
= 1; i
< cpi
->cpi_ncpu_shr_last_cache
; i
<<= 1)
2495 cpi
->cpi_last_lvl_cacheid
= cpi
->cpi_apicid
>> shft
;
2499 * Now fixup the brand string
2501 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0) {
2502 fabricate_brandstr(cpi
);
2506 * If we successfully extracted a brand string from the cpuid
2507 * instruction, clean it up by removing leading spaces and
2510 if (cpi
->cpi_brandstr
[0]) {
2511 size_t maxlen
= sizeof (cpi
->cpi_brandstr
);
2514 dst
= src
= (char *)cpi
->cpi_brandstr
;
2515 src
[maxlen
- 1] = '\0';
2517 * strip leading spaces
2522 * Remove any 'Genuine' or "Authentic" prefixes
2524 if (strncmp(src
, "Genuine ", 8) == 0)
2526 if (strncmp(src
, "Authentic ", 10) == 0)
2530 * Now do an in-place copy.
2531 * Map (R) to (r) and (TM) to (tm).
2532 * The era of teletypes is long gone, and there's
2533 * -really- no need to shout.
2535 while (*src
!= '\0') {
2536 if (src
[0] == '(') {
2537 if (strncmp(src
+ 1, "R)", 2) == 0) {
2538 (void) strncpy(dst
, "(r)", 3);
2543 if (strncmp(src
+ 1, "TM)", 3) == 0) {
2544 (void) strncpy(dst
, "(tm)", 4);
2555 * Finally, remove any trailing spaces
2557 while (--dst
> cpi
->cpi_brandstr
)
2563 fabricate_brandstr(cpi
);
2569 * This routine is called out of bind_hwcap() much later in the life
2570 * of the kernel (post_startup()). The job of this routine is to resolve
2571 * the hardware feature support and kernel support for those features into
2572 * what we're actually going to tell applications via the aux vector.
2575 cpuid_pass4(cpu_t
*cpu
, uint_t
*hwcap_out
)
2577 struct cpuid_info
*cpi
;
2578 uint_t hwcap_flags
= 0, hwcap_flags_2
= 0;
2582 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2584 ASSERT(cpi
->cpi_pass
== 3);
2586 if (cpi
->cpi_maxeax
>= 1) {
2587 uint32_t *edx
= &cpi
->cpi_support
[STD_EDX_FEATURES
];
2588 uint32_t *ecx
= &cpi
->cpi_support
[STD_ECX_FEATURES
];
2590 *edx
= CPI_FEATURES_EDX(cpi
);
2591 *ecx
= CPI_FEATURES_ECX(cpi
);
2594 * [these require explicit kernel support]
2596 if (!is_x86_feature(x86_featureset
, X86FSET_SEP
))
2597 *edx
&= ~CPUID_INTC_EDX_SEP
;
2599 if (!is_x86_feature(x86_featureset
, X86FSET_SSE
))
2600 *edx
&= ~(CPUID_INTC_EDX_FXSR
|CPUID_INTC_EDX_SSE
);
2601 if (!is_x86_feature(x86_featureset
, X86FSET_SSE2
))
2602 *edx
&= ~CPUID_INTC_EDX_SSE2
;
2604 if (!is_x86_feature(x86_featureset
, X86FSET_HTT
))
2605 *edx
&= ~CPUID_INTC_EDX_HTT
;
2607 if (!is_x86_feature(x86_featureset
, X86FSET_SSE3
))
2608 *ecx
&= ~CPUID_INTC_ECX_SSE3
;
2610 if (!is_x86_feature(x86_featureset
, X86FSET_SSSE3
))
2611 *ecx
&= ~CPUID_INTC_ECX_SSSE3
;
2612 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_1
))
2613 *ecx
&= ~CPUID_INTC_ECX_SSE4_1
;
2614 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_2
))
2615 *ecx
&= ~CPUID_INTC_ECX_SSE4_2
;
2616 if (!is_x86_feature(x86_featureset
, X86FSET_AES
))
2617 *ecx
&= ~CPUID_INTC_ECX_AES
;
2618 if (!is_x86_feature(x86_featureset
, X86FSET_PCLMULQDQ
))
2619 *ecx
&= ~CPUID_INTC_ECX_PCLMULQDQ
;
2620 if (!is_x86_feature(x86_featureset
, X86FSET_XSAVE
))
2621 *ecx
&= ~(CPUID_INTC_ECX_XSAVE
|
2622 CPUID_INTC_ECX_OSXSAVE
);
2623 if (!is_x86_feature(x86_featureset
, X86FSET_AVX
))
2624 *ecx
&= ~CPUID_INTC_ECX_AVX
;
2625 if (!is_x86_feature(x86_featureset
, X86FSET_F16C
))
2626 *ecx
&= ~CPUID_INTC_ECX_F16C
;
2629 * [no explicit support required beyond x87 fp context]
2632 *edx
&= ~(CPUID_INTC_EDX_FPU
| CPUID_INTC_EDX_MMX
);
2635 * Now map the supported feature vector to things that we
2636 * think userland will care about.
2638 if (*edx
& CPUID_INTC_EDX_SEP
)
2639 hwcap_flags
|= AV_386_SEP
;
2640 if (*edx
& CPUID_INTC_EDX_SSE
)
2641 hwcap_flags
|= AV_386_FXSR
| AV_386_SSE
;
2642 if (*edx
& CPUID_INTC_EDX_SSE2
)
2643 hwcap_flags
|= AV_386_SSE2
;
2644 if (*ecx
& CPUID_INTC_ECX_SSE3
)
2645 hwcap_flags
|= AV_386_SSE3
;
2646 if (*ecx
& CPUID_INTC_ECX_SSSE3
)
2647 hwcap_flags
|= AV_386_SSSE3
;
2648 if (*ecx
& CPUID_INTC_ECX_SSE4_1
)
2649 hwcap_flags
|= AV_386_SSE4_1
;
2650 if (*ecx
& CPUID_INTC_ECX_SSE4_2
)
2651 hwcap_flags
|= AV_386_SSE4_2
;
2652 if (*ecx
& CPUID_INTC_ECX_MOVBE
)
2653 hwcap_flags
|= AV_386_MOVBE
;
2654 if (*ecx
& CPUID_INTC_ECX_AES
)
2655 hwcap_flags
|= AV_386_AES
;
2656 if (*ecx
& CPUID_INTC_ECX_PCLMULQDQ
)
2657 hwcap_flags
|= AV_386_PCLMULQDQ
;
2658 if ((*ecx
& CPUID_INTC_ECX_XSAVE
) &&
2659 (*ecx
& CPUID_INTC_ECX_OSXSAVE
)) {
2660 hwcap_flags
|= AV_386_XSAVE
;
2662 if (*ecx
& CPUID_INTC_ECX_AVX
) {
2663 hwcap_flags
|= AV_386_AVX
;
2664 if (*ecx
& CPUID_INTC_ECX_F16C
)
2665 hwcap_flags_2
|= AV_386_2_F16C
;
2668 if (*ecx
& CPUID_INTC_ECX_VMX
)
2669 hwcap_flags
|= AV_386_VMX
;
2670 if (*ecx
& CPUID_INTC_ECX_POPCNT
)
2671 hwcap_flags
|= AV_386_POPCNT
;
2672 if (*edx
& CPUID_INTC_EDX_FPU
)
2673 hwcap_flags
|= AV_386_FPU
;
2674 if (*edx
& CPUID_INTC_EDX_MMX
)
2675 hwcap_flags
|= AV_386_MMX
;
2677 if (*edx
& CPUID_INTC_EDX_TSC
)
2678 hwcap_flags
|= AV_386_TSC
;
2679 if (*edx
& CPUID_INTC_EDX_CX8
)
2680 hwcap_flags
|= AV_386_CX8
;
2681 if (*edx
& CPUID_INTC_EDX_CMOV
)
2682 hwcap_flags
|= AV_386_CMOV
;
2683 if (*ecx
& CPUID_INTC_ECX_CX16
)
2684 hwcap_flags
|= AV_386_CX16
;
2686 if (*ecx
& CPUID_INTC_ECX_RDRAND
)
2687 hwcap_flags_2
|= AV_386_2_RDRAND
;
2690 if (cpi
->cpi_xmaxeax
< 0x80000001)
2693 switch (cpi
->cpi_vendor
) {
2694 struct cpuid_regs cp
;
2695 uint32_t *edx
, *ecx
;
2697 case X86_VENDOR_Intel
:
2699 * Seems like Intel duplicated what we necessary
2700 * here to make the initial crop of 64-bit OS's work.
2701 * Hopefully, those are the only "extended" bits
2706 case X86_VENDOR_AMD
:
2707 edx
= &cpi
->cpi_support
[AMD_EDX_FEATURES
];
2708 ecx
= &cpi
->cpi_support
[AMD_ECX_FEATURES
];
2710 *edx
= CPI_FEATURES_XTD_EDX(cpi
);
2711 *ecx
= CPI_FEATURES_XTD_ECX(cpi
);
2714 * [these features require explicit kernel support]
2716 switch (cpi
->cpi_vendor
) {
2717 case X86_VENDOR_Intel
:
2718 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
2719 *edx
&= ~CPUID_AMD_EDX_TSCP
;
2722 case X86_VENDOR_AMD
:
2723 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
2724 *edx
&= ~CPUID_AMD_EDX_TSCP
;
2725 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4A
))
2726 *ecx
&= ~CPUID_AMD_ECX_SSE4A
;
2734 * [no explicit support required beyond
2735 * x87 fp context and exception handlers]
2738 *edx
&= ~(CPUID_AMD_EDX_MMXamd
|
2739 CPUID_AMD_EDX_3DNow
| CPUID_AMD_EDX_3DNowx
);
2741 if (!is_x86_feature(x86_featureset
, X86FSET_NX
))
2742 *edx
&= ~CPUID_AMD_EDX_NX
;
2743 #if !defined(__amd64)
2744 *edx
&= ~CPUID_AMD_EDX_LM
;
2747 * Now map the supported feature vector to
2748 * things that we think userland will care about.
2750 #if defined(__amd64)
2751 if (*edx
& CPUID_AMD_EDX_SYSC
)
2752 hwcap_flags
|= AV_386_AMD_SYSC
;
2754 if (*edx
& CPUID_AMD_EDX_MMXamd
)
2755 hwcap_flags
|= AV_386_AMD_MMX
;
2756 if (*edx
& CPUID_AMD_EDX_3DNow
)
2757 hwcap_flags
|= AV_386_AMD_3DNow
;
2758 if (*edx
& CPUID_AMD_EDX_3DNowx
)
2759 hwcap_flags
|= AV_386_AMD_3DNowx
;
2760 if (*ecx
& CPUID_AMD_ECX_SVM
)
2761 hwcap_flags
|= AV_386_AMD_SVM
;
2763 switch (cpi
->cpi_vendor
) {
2764 case X86_VENDOR_AMD
:
2765 if (*edx
& CPUID_AMD_EDX_TSCP
)
2766 hwcap_flags
|= AV_386_TSCP
;
2767 if (*ecx
& CPUID_AMD_ECX_AHF64
)
2768 hwcap_flags
|= AV_386_AHF
;
2769 if (*ecx
& CPUID_AMD_ECX_SSE4A
)
2770 hwcap_flags
|= AV_386_AMD_SSE4A
;
2771 if (*ecx
& CPUID_AMD_ECX_LZCNT
)
2772 hwcap_flags
|= AV_386_AMD_LZCNT
;
2775 case X86_VENDOR_Intel
:
2776 if (*edx
& CPUID_AMD_EDX_TSCP
)
2777 hwcap_flags
|= AV_386_TSCP
;
2780 * Intel uses a different bit in the same word.
2782 if (*ecx
& CPUID_INTC_ECX_AHF64
)
2783 hwcap_flags
|= AV_386_AHF
;
2792 cp
.cp_eax
= 0x80860001;
2793 (void) __cpuid_insn(&cp
);
2794 cpi
->cpi_support
[TM_EDX_FEATURES
] = cp
.cp_edx
;
2803 if (hwcap_out
!= NULL
) {
2804 hwcap_out
[0] = hwcap_flags
;
2805 hwcap_out
[1] = hwcap_flags_2
;
2811 * Simulate the cpuid instruction using the data we previously
2812 * captured about this CPU. We try our best to return the truth
2813 * about the hardware, independently of kernel support.
2816 cpuid_insn(cpu_t
*cpu
, struct cpuid_regs
*cp
)
2818 struct cpuid_info
*cpi
;
2819 struct cpuid_regs
*xcp
;
2823 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2825 ASSERT(cpuid_checkpass(cpu
, 3));
2828 * CPUID data is cached in two separate places: cpi_std for standard
2829 * CPUID functions, and cpi_extd for extended CPUID functions.
2831 if (cp
->cp_eax
<= cpi
->cpi_maxeax
&& cp
->cp_eax
< NMAX_CPI_STD
)
2832 xcp
= &cpi
->cpi_std
[cp
->cp_eax
];
2833 else if (cp
->cp_eax
>= 0x80000000 && cp
->cp_eax
<= cpi
->cpi_xmaxeax
&&
2834 cp
->cp_eax
< 0x80000000 + NMAX_CPI_EXTD
)
2835 xcp
= &cpi
->cpi_extd
[cp
->cp_eax
- 0x80000000];
2838 * The caller is asking for data from an input parameter which
2839 * the kernel has not cached. In this case we go fetch from
2840 * the hardware and return the data directly to the user.
2842 return (__cpuid_insn(cp
));
2844 cp
->cp_eax
= xcp
->cp_eax
;
2845 cp
->cp_ebx
= xcp
->cp_ebx
;
2846 cp
->cp_ecx
= xcp
->cp_ecx
;
2847 cp
->cp_edx
= xcp
->cp_edx
;
2848 return (cp
->cp_eax
);
2852 cpuid_checkpass(cpu_t
*cpu
, int pass
)
2854 return (cpu
!= NULL
&& cpu
->cpu_m
.mcpu_cpi
!= NULL
&&
2855 cpu
->cpu_m
.mcpu_cpi
->cpi_pass
>= pass
);
2859 cpuid_getbrandstr(cpu_t
*cpu
, char *s
, size_t n
)
2861 ASSERT(cpuid_checkpass(cpu
, 3));
2863 return (snprintf(s
, n
, "%s", cpu
->cpu_m
.mcpu_cpi
->cpi_brandstr
));
2867 cpuid_is_cmt(cpu_t
*cpu
)
2872 ASSERT(cpuid_checkpass(cpu
, 1));
2874 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
>= 0);
2878 * AMD and Intel both implement the 64-bit variant of the syscall
2879 * instruction (syscallq), so if there's -any- support for syscall,
2880 * cpuid currently says "yes, we support this".
2882 * However, Intel decided to -not- implement the 32-bit variant of the
2883 * syscall instruction, so we provide a predicate to allow our caller
2884 * to test that subtlety here.
2886 * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor,
2887 * even in the case where the hardware would in fact support it.
2891 cpuid_syscall32_insn(cpu_t
*cpu
)
2893 ASSERT(cpuid_checkpass((cpu
== NULL
? CPU
: cpu
), 1));
2901 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2903 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
2904 cpi
->cpi_xmaxeax
>= 0x80000001 &&
2905 (CPI_FEATURES_XTD_EDX(cpi
) & CPUID_AMD_EDX_SYSC
))
2913 cpuid_getidstr(cpu_t
*cpu
, char *s
, size_t n
)
2915 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2917 static const char fmt
[] =
2918 "x86 (%s %X family %d model %d step %d clock %d MHz)";
2919 static const char fmt_ht
[] =
2920 "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2922 ASSERT(cpuid_checkpass(cpu
, 1));
2924 if (cpuid_is_cmt(cpu
))
2925 return (snprintf(s
, n
, fmt_ht
, cpi
->cpi_chipid
,
2926 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
2927 cpi
->cpi_family
, cpi
->cpi_model
,
2928 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
2929 return (snprintf(s
, n
, fmt
,
2930 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
2931 cpi
->cpi_family
, cpi
->cpi_model
,
2932 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
2936 cpuid_getvendorstr(cpu_t
*cpu
)
2938 ASSERT(cpuid_checkpass(cpu
, 1));
2939 return ((const char *)cpu
->cpu_m
.mcpu_cpi
->cpi_vendorstr
);
2943 cpuid_getvendor(cpu_t
*cpu
)
2945 ASSERT(cpuid_checkpass(cpu
, 1));
2946 return (cpu
->cpu_m
.mcpu_cpi
->cpi_vendor
);
2950 cpuid_getfamily(cpu_t
*cpu
)
2952 ASSERT(cpuid_checkpass(cpu
, 1));
2953 return (cpu
->cpu_m
.mcpu_cpi
->cpi_family
);
2957 cpuid_getmodel(cpu_t
*cpu
)
2959 ASSERT(cpuid_checkpass(cpu
, 1));
2960 return (cpu
->cpu_m
.mcpu_cpi
->cpi_model
);
2964 cpuid_get_ncpu_per_chip(cpu_t
*cpu
)
2966 ASSERT(cpuid_checkpass(cpu
, 1));
2967 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_per_chip
);
2971 cpuid_get_ncore_per_chip(cpu_t
*cpu
)
2973 ASSERT(cpuid_checkpass(cpu
, 1));
2974 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncore_per_chip
);
2978 cpuid_get_ncpu_sharing_last_cache(cpu_t
*cpu
)
2980 ASSERT(cpuid_checkpass(cpu
, 2));
2981 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_shr_last_cache
);
2985 cpuid_get_last_lvl_cacheid(cpu_t
*cpu
)
2987 ASSERT(cpuid_checkpass(cpu
, 2));
2988 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
2992 cpuid_getstep(cpu_t
*cpu
)
2994 ASSERT(cpuid_checkpass(cpu
, 1));
2995 return (cpu
->cpu_m
.mcpu_cpi
->cpi_step
);
2999 cpuid_getsig(struct cpu
*cpu
)
3001 ASSERT(cpuid_checkpass(cpu
, 1));
3002 return (cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_eax
);
3006 cpuid_getchiprev(struct cpu
*cpu
)
3008 ASSERT(cpuid_checkpass(cpu
, 1));
3009 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprev
);
3013 cpuid_getchiprevstr(struct cpu
*cpu
)
3015 ASSERT(cpuid_checkpass(cpu
, 1));
3016 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprevstr
);
3020 cpuid_getsockettype(struct cpu
*cpu
)
3022 ASSERT(cpuid_checkpass(cpu
, 1));
3023 return (cpu
->cpu_m
.mcpu_cpi
->cpi_socket
);
3027 cpuid_getsocketstr(cpu_t
*cpu
)
3029 static const char *socketstr
= NULL
;
3030 struct cpuid_info
*cpi
;
3032 ASSERT(cpuid_checkpass(cpu
, 1));
3033 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3035 /* Assume that socket types are the same across the system */
3036 if (socketstr
== NULL
)
3037 socketstr
= _cpuid_sktstr(cpi
->cpi_vendor
, cpi
->cpi_family
,
3038 cpi
->cpi_model
, cpi
->cpi_step
);
3045 cpuid_get_chipid(cpu_t
*cpu
)
3047 ASSERT(cpuid_checkpass(cpu
, 1));
3049 if (cpuid_is_cmt(cpu
))
3050 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
);
3051 return (cpu
->cpu_id
);
3055 cpuid_get_coreid(cpu_t
*cpu
)
3057 ASSERT(cpuid_checkpass(cpu
, 1));
3058 return (cpu
->cpu_m
.mcpu_cpi
->cpi_coreid
);
3062 cpuid_get_pkgcoreid(cpu_t
*cpu
)
3064 ASSERT(cpuid_checkpass(cpu
, 1));
3065 return (cpu
->cpu_m
.mcpu_cpi
->cpi_pkgcoreid
);
3069 cpuid_get_clogid(cpu_t
*cpu
)
3071 ASSERT(cpuid_checkpass(cpu
, 1));
3072 return (cpu
->cpu_m
.mcpu_cpi
->cpi_clogid
);
3076 cpuid_get_cacheid(cpu_t
*cpu
)
3078 ASSERT(cpuid_checkpass(cpu
, 1));
3079 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3083 cpuid_get_procnodeid(cpu_t
*cpu
)
3085 ASSERT(cpuid_checkpass(cpu
, 1));
3086 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodeid
);
3090 cpuid_get_procnodes_per_pkg(cpu_t
*cpu
)
3092 ASSERT(cpuid_checkpass(cpu
, 1));
3093 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodes_per_pkg
);
3097 cpuid_get_compunitid(cpu_t
*cpu
)
3099 ASSERT(cpuid_checkpass(cpu
, 1));
3100 return (cpu
->cpu_m
.mcpu_cpi
->cpi_compunitid
);
3104 cpuid_get_cores_per_compunit(cpu_t
*cpu
)
3106 ASSERT(cpuid_checkpass(cpu
, 1));
3107 return (cpu
->cpu_m
.mcpu_cpi
->cpi_cores_per_compunit
);
3112 cpuid_have_cr8access(cpu_t
*cpu
)
3114 #if defined(__amd64)
3117 struct cpuid_info
*cpi
;
3119 ASSERT(cpu
!= NULL
);
3120 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3121 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&& cpi
->cpi_maxeax
>= 1 &&
3122 (CPI_FEATURES_XTD_ECX(cpi
) & CPUID_AMD_ECX_CR8D
) != 0)
3129 cpuid_get_apicid(cpu_t
*cpu
)
3131 ASSERT(cpuid_checkpass(cpu
, 1));
3132 if (cpu
->cpu_m
.mcpu_cpi
->cpi_maxeax
< 1) {
3133 return (UINT32_MAX
);
3135 return (cpu
->cpu_m
.mcpu_cpi
->cpi_apicid
);
3140 cpuid_get_addrsize(cpu_t
*cpu
, uint_t
*pabits
, uint_t
*vabits
)
3142 struct cpuid_info
*cpi
;
3146 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3148 ASSERT(cpuid_checkpass(cpu
, 1));
3151 *pabits
= cpi
->cpi_pabits
;
3153 *vabits
= cpi
->cpi_vabits
;
3157 * Returns the number of data TLB entries for a corresponding
3158 * pagesize. If it can't be computed, or isn't known, the
3159 * routine returns zero. If you ask about an architecturally
3160 * impossible pagesize, the routine will panic (so that the
3161 * hat implementor knows that things are inconsistent.)
3164 cpuid_get_dtlb_nent(cpu_t
*cpu
, size_t pagesize
)
3166 struct cpuid_info
*cpi
;
3167 uint_t dtlb_nent
= 0;
3171 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3173 ASSERT(cpuid_checkpass(cpu
, 1));
3176 * Check the L2 TLB info
3178 if (cpi
->cpi_xmaxeax
>= 0x80000006) {
3179 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[6];
3185 * All zero in the top 16 bits of the register
3186 * indicates a unified TLB. Size is in low 16 bits.
3188 if ((cp
->cp_ebx
& 0xffff0000) == 0)
3189 dtlb_nent
= cp
->cp_ebx
& 0x0000ffff;
3191 dtlb_nent
= BITX(cp
->cp_ebx
, 27, 16);
3194 case 2 * 1024 * 1024:
3195 if ((cp
->cp_eax
& 0xffff0000) == 0)
3196 dtlb_nent
= cp
->cp_eax
& 0x0000ffff;
3198 dtlb_nent
= BITX(cp
->cp_eax
, 27, 16);
3202 panic("unknown L2 pagesize");
3211 * No L2 TLB support for this size, try L1.
3213 if (cpi
->cpi_xmaxeax
>= 0x80000005) {
3214 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[5];
3218 dtlb_nent
= BITX(cp
->cp_ebx
, 23, 16);
3220 case 2 * 1024 * 1024:
3221 dtlb_nent
= BITX(cp
->cp_eax
, 23, 16);
3224 panic("unknown L1 d-TLB pagesize");
3233 * Return 0 if the erratum is not present or not applicable, positive
3234 * if it is, and negative if the status of the erratum is unknown.
3236 * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3237 * Processors" #25759, Rev 3.57, August 2005
3240 cpuid_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3242 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3246 * Bail out if this CPU isn't an AMD CPU, or if it's
3247 * a legacy (32-bit) AMD CPU.
3249 if (cpi
->cpi_vendor
!= X86_VENDOR_AMD
||
3250 cpi
->cpi_family
== 4 || cpi
->cpi_family
== 5 ||
3251 cpi
->cpi_family
== 6)
3255 eax
= cpi
->cpi_std
[1].cp_eax
;
3257 #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50)
3258 #define SH_B3(eax) (eax == 0xf51)
3259 #define B(eax) (SH_B0(eax) || SH_B3(eax))
3261 #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58)
3263 #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3264 #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3265 #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2)
3266 #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3268 #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3269 #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0)
3270 #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0)
3271 #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3273 #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3274 #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */
3275 #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0)
3276 #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71)
3277 #define BH_E4(eax) (eax == 0x20fb1)
3278 #define SH_E5(eax) (eax == 0x20f42)
3279 #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2)
3280 #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32)
3281 #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3282 SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3283 DH_E6(eax) || JH_E6(eax))
3285 #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3286 #define DR_B0(eax) (eax == 0x100f20)
3287 #define DR_B1(eax) (eax == 0x100f21)
3288 #define DR_BA(eax) (eax == 0x100f2a)
3289 #define DR_B2(eax) (eax == 0x100f22)
3290 #define DR_B3(eax) (eax == 0x100f23)
3291 #define RB_C0(eax) (eax == 0x100f40)
3295 return (cpi
->cpi_family
< 0x10);
3296 case 51: /* what does the asterisk mean? */
3297 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3301 return (cpi
->cpi_family
<= 0x11);
3305 return (cpi
->cpi_family
<= 0x11);
3318 return (SH_B0(eax
));
3322 return (cpi
->cpi_family
< 0x10);
3326 return (cpi
->cpi_family
<= 0x11);
3328 return (B(eax
) || SH_C0(eax
));
3330 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3336 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3338 return (cpi
->cpi_family
< 0x10);
3340 return (SH_C0(eax
) || CG(eax
));
3342 #if !defined(__amd64)
3345 return (B(eax
) || SH_C0(eax
));
3348 return (cpi
->cpi_family
< 0x10);
3350 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3353 return (B(eax
) || SH_C0(eax
));
3355 return (SH_C0(eax
));
3357 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3359 #if !defined(__amd64)
3362 return (B(eax
) || SH_C0(eax
));
3365 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3368 return (SH_C0(eax
) || CG(eax
));
3370 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3372 return (B(eax
) || SH_C0(eax
));
3375 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3377 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3381 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3383 return (DH_CG(eax
));
3385 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3387 return (D0(eax
) || EX(eax
));
3391 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3393 return (eax
== 0x20fc0);
3395 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3397 return (SH_E0(eax
) || JH_E1(eax
));
3399 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3401 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3403 return (SH_E0(eax
) || JH_E1(eax
) || SH_E4(eax
) || BH_E4(eax
) ||
3406 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3408 return (cpi
->cpi_family
< 0x10 || cpi
->cpi_family
== 0x11);
3410 return (JH_E1(eax
) || BH_E4(eax
) || JH_E6(eax
));
3412 return (cpi
->cpi_family
< 0x10);
3415 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3416 * if this is a K8 family or newer processor
3418 if (CPI_FAMILY(cpi
) == 0xf) {
3419 struct cpuid_regs regs
;
3420 regs
.cp_eax
= 0x80000007;
3421 (void) __cpuid_insn(®s
);
3422 return (!(regs
.cp_edx
& 0x100));
3426 return (((((eax
>> 12) & 0xff00) + (eax
& 0xf00)) |
3427 (((eax
>> 4) & 0xf) | ((eax
>> 12) & 0xf0))) < 0xf40);
3431 * check for processors (pre-Shanghai) that do not provide
3432 * optimal management of 1gb ptes in its tlb.
3434 return (cpi
->cpi_family
== 0x10 && cpi
->cpi_model
< 4);
3437 return (DR_AX(eax
) || DR_B0(eax
) || DR_B1(eax
) || DR_BA(eax
) ||
3438 DR_B2(eax
) || RB_C0(eax
));
3441 #if defined(__amd64)
3442 return (cpi
->cpi_family
== 0x10 || cpi
->cpi_family
== 0x12);
3454 * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3455 * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3458 osvw_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3460 struct cpuid_info
*cpi
;
3462 static int osvwfeature
= -1;
3463 uint64_t osvwlength
;
3466 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3468 /* confirm OSVW supported */
3469 if (osvwfeature
== -1) {
3470 osvwfeature
= cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
;
3472 /* assert that osvw feature setting is consistent on all cpus */
3473 ASSERT(osvwfeature
==
3474 (cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
));
3479 osvwlength
= rdmsr(MSR_AMD_OSVW_ID_LEN
) & OSVW_ID_LEN_MASK
;
3482 case 298: /* osvwid is 0 */
3484 if (osvwlength
<= (uint64_t)osvwid
) {
3485 /* osvwid 0 is unknown */
3490 * Check the OSVW STATUS MSR to determine the state
3491 * of the erratum where:
3493 * 1 - BIOS has applied the workaround when BIOS
3494 * workaround is available. (Or for other errata,
3495 * OS workaround is required.)
3496 * For a value of 1, caller will confirm that the
3497 * erratum 298 workaround has indeed been applied by BIOS.
3499 * A 1 may be set in cpus that have a HW fix
3500 * in a mixed cpu system. Regarding erratum 298:
3501 * In a multiprocessor platform, the workaround above
3502 * should be applied to all processors regardless of
3503 * silicon revision when an affected processor is
3507 return (rdmsr(MSR_AMD_OSVW_STATUS
+
3508 (osvwid
/ OSVW_ID_CNT_PER_MSR
)) &
3509 (1ULL << (osvwid
% OSVW_ID_CNT_PER_MSR
)));
3516 static const char assoc_str
[] = "associativity";
3517 static const char line_str
[] = "line-size";
3518 static const char size_str
[] = "size";
3521 add_cache_prop(dev_info_t
*devi
, const char *label
, const char *type
,
3527 * ndi_prop_update_int() is used because it is desirable for
3528 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3530 if (snprintf(buf
, sizeof (buf
), "%s-%s", label
, type
) < sizeof (buf
))
3531 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, devi
, buf
, val
);
3535 * Intel-style cache/tlb description
3537 * Standard cpuid level 2 gives a randomly ordered
3538 * selection of tags that index into a table that describes
3539 * cache and tlb properties.
3542 static const char l1_icache_str
[] = "l1-icache";
3543 static const char l1_dcache_str
[] = "l1-dcache";
3544 static const char l2_cache_str
[] = "l2-cache";
3545 static const char l3_cache_str
[] = "l3-cache";
3546 static const char itlb4k_str
[] = "itlb-4K";
3547 static const char dtlb4k_str
[] = "dtlb-4K";
3548 static const char itlb2M_str
[] = "itlb-2M";
3549 static const char itlb4M_str
[] = "itlb-4M";
3550 static const char dtlb4M_str
[] = "dtlb-4M";
3551 static const char dtlb24_str
[] = "dtlb0-2M-4M";
3552 static const char itlb424_str
[] = "itlb-4K-2M-4M";
3553 static const char itlb24_str
[] = "itlb-2M-4M";
3554 static const char dtlb44_str
[] = "dtlb-4K-4M";
3555 static const char sl1_dcache_str
[] = "sectored-l1-dcache";
3556 static const char sl2_cache_str
[] = "sectored-l2-cache";
3557 static const char itrace_str
[] = "itrace-cache";
3558 static const char sl3_cache_str
[] = "sectored-l3-cache";
3559 static const char sh_l2_tlb4k_str
[] = "shared-l2-tlb-4k";
3561 static const struct cachetab
{
3564 uint16_t ct_line_size
;
3566 const char *ct_label
;
3569 * maintain descending order!
3571 * Codes ignored - Reason
3572 * ----------------------
3573 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3574 * f0H/f1H - Currently we do not interpret prefetch size by design
3576 { 0xe4, 16, 64, 8*1024*1024, l3_cache_str
},
3577 { 0xe3, 16, 64, 4*1024*1024, l3_cache_str
},
3578 { 0xe2, 16, 64, 2*1024*1024, l3_cache_str
},
3579 { 0xde, 12, 64, 6*1024*1024, l3_cache_str
},
3580 { 0xdd, 12, 64, 3*1024*1024, l3_cache_str
},
3581 { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str
},
3582 { 0xd8, 8, 64, 4*1024*1024, l3_cache_str
},
3583 { 0xd7, 8, 64, 2*1024*1024, l3_cache_str
},
3584 { 0xd6, 8, 64, 1*1024*1024, l3_cache_str
},
3585 { 0xd2, 4, 64, 2*1024*1024, l3_cache_str
},
3586 { 0xd1, 4, 64, 1*1024*1024, l3_cache_str
},
3587 { 0xd0, 4, 64, 512*1024, l3_cache_str
},
3588 { 0xca, 4, 0, 512, sh_l2_tlb4k_str
},
3589 { 0xc0, 4, 0, 8, dtlb44_str
},
3590 { 0xba, 4, 0, 64, dtlb4k_str
},
3591 { 0xb4, 4, 0, 256, dtlb4k_str
},
3592 { 0xb3, 4, 0, 128, dtlb4k_str
},
3593 { 0xb2, 4, 0, 64, itlb4k_str
},
3594 { 0xb0, 4, 0, 128, itlb4k_str
},
3595 { 0x87, 8, 64, 1024*1024, l2_cache_str
},
3596 { 0x86, 4, 64, 512*1024, l2_cache_str
},
3597 { 0x85, 8, 32, 2*1024*1024, l2_cache_str
},
3598 { 0x84, 8, 32, 1024*1024, l2_cache_str
},
3599 { 0x83, 8, 32, 512*1024, l2_cache_str
},
3600 { 0x82, 8, 32, 256*1024, l2_cache_str
},
3601 { 0x80, 8, 64, 512*1024, l2_cache_str
},
3602 { 0x7f, 2, 64, 512*1024, l2_cache_str
},
3603 { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str
},
3604 { 0x7c, 8, 64, 1024*1024, sl2_cache_str
},
3605 { 0x7b, 8, 64, 512*1024, sl2_cache_str
},
3606 { 0x7a, 8, 64, 256*1024, sl2_cache_str
},
3607 { 0x79, 8, 64, 128*1024, sl2_cache_str
},
3608 { 0x78, 8, 64, 1024*1024, l2_cache_str
},
3609 { 0x73, 8, 0, 64*1024, itrace_str
},
3610 { 0x72, 8, 0, 32*1024, itrace_str
},
3611 { 0x71, 8, 0, 16*1024, itrace_str
},
3612 { 0x70, 8, 0, 12*1024, itrace_str
},
3613 { 0x68, 4, 64, 32*1024, sl1_dcache_str
},
3614 { 0x67, 4, 64, 16*1024, sl1_dcache_str
},
3615 { 0x66, 4, 64, 8*1024, sl1_dcache_str
},
3616 { 0x60, 8, 64, 16*1024, sl1_dcache_str
},
3617 { 0x5d, 0, 0, 256, dtlb44_str
},
3618 { 0x5c, 0, 0, 128, dtlb44_str
},
3619 { 0x5b, 0, 0, 64, dtlb44_str
},
3620 { 0x5a, 4, 0, 32, dtlb24_str
},
3621 { 0x59, 0, 0, 16, dtlb4k_str
},
3622 { 0x57, 4, 0, 16, dtlb4k_str
},
3623 { 0x56, 4, 0, 16, dtlb4M_str
},
3624 { 0x55, 0, 0, 7, itlb24_str
},
3625 { 0x52, 0, 0, 256, itlb424_str
},
3626 { 0x51, 0, 0, 128, itlb424_str
},
3627 { 0x50, 0, 0, 64, itlb424_str
},
3628 { 0x4f, 0, 0, 32, itlb4k_str
},
3629 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str
},
3630 { 0x4d, 16, 64, 16*1024*1024, l3_cache_str
},
3631 { 0x4c, 12, 64, 12*1024*1024, l3_cache_str
},
3632 { 0x4b, 16, 64, 8*1024*1024, l3_cache_str
},
3633 { 0x4a, 12, 64, 6*1024*1024, l3_cache_str
},
3634 { 0x49, 16, 64, 4*1024*1024, l3_cache_str
},
3635 { 0x48, 12, 64, 3*1024*1024, l2_cache_str
},
3636 { 0x47, 8, 64, 8*1024*1024, l3_cache_str
},
3637 { 0x46, 4, 64, 4*1024*1024, l3_cache_str
},
3638 { 0x45, 4, 32, 2*1024*1024, l2_cache_str
},
3639 { 0x44, 4, 32, 1024*1024, l2_cache_str
},
3640 { 0x43, 4, 32, 512*1024, l2_cache_str
},
3641 { 0x42, 4, 32, 256*1024, l2_cache_str
},
3642 { 0x41, 4, 32, 128*1024, l2_cache_str
},
3643 { 0x3e, 4, 64, 512*1024, sl2_cache_str
},
3644 { 0x3d, 6, 64, 384*1024, sl2_cache_str
},
3645 { 0x3c, 4, 64, 256*1024, sl2_cache_str
},
3646 { 0x3b, 2, 64, 128*1024, sl2_cache_str
},
3647 { 0x3a, 6, 64, 192*1024, sl2_cache_str
},
3648 { 0x39, 4, 64, 128*1024, sl2_cache_str
},
3649 { 0x30, 8, 64, 32*1024, l1_icache_str
},
3650 { 0x2c, 8, 64, 32*1024, l1_dcache_str
},
3651 { 0x29, 8, 64, 4096*1024, sl3_cache_str
},
3652 { 0x25, 8, 64, 2048*1024, sl3_cache_str
},
3653 { 0x23, 8, 64, 1024*1024, sl3_cache_str
},
3654 { 0x22, 4, 64, 512*1024, sl3_cache_str
},
3655 { 0x0e, 6, 64, 24*1024, l1_dcache_str
},
3656 { 0x0d, 4, 32, 16*1024, l1_dcache_str
},
3657 { 0x0c, 4, 32, 16*1024, l1_dcache_str
},
3658 { 0x0b, 4, 0, 4, itlb4M_str
},
3659 { 0x0a, 2, 32, 8*1024, l1_dcache_str
},
3660 { 0x08, 4, 32, 16*1024, l1_icache_str
},
3661 { 0x06, 4, 32, 8*1024, l1_icache_str
},
3662 { 0x05, 4, 0, 32, dtlb4M_str
},
3663 { 0x04, 4, 0, 8, dtlb4M_str
},
3664 { 0x03, 4, 0, 64, dtlb4k_str
},
3665 { 0x02, 4, 0, 2, itlb4M_str
},
3666 { 0x01, 4, 0, 32, itlb4k_str
},
3670 static const struct cachetab cyrix_ctab
[] = {
3671 { 0x70, 4, 0, 32, "tlb-4K" },
3672 { 0x80, 4, 16, 16*1024, "l1-cache" },
3677 * Search a cache table for a matching entry
3679 static const struct cachetab
*
3680 find_cacheent(const struct cachetab
*ct
, uint_t code
)
3683 for (; ct
->ct_code
!= 0; ct
++)
3684 if (ct
->ct_code
<= code
)
3686 if (ct
->ct_code
== code
)
3693 * Populate cachetab entry with L2 or L3 cache-information using
3694 * cpuid function 4. This function is called from intel_walk_cacheinfo()
3695 * when descriptor 0x49 is encountered. It returns 0 if no such cache
3696 * information is found.
3699 intel_cpuid_4_cache_info(struct cachetab
*ct
, struct cpuid_info
*cpi
)
3704 for (i
= 0; i
< cpi
->cpi_std_4_size
; i
++) {
3705 level
= CPI_CACHE_LVL(cpi
->cpi_std_4
[i
]);
3707 if (level
== 2 || level
== 3) {
3708 ct
->ct_assoc
= CPI_CACHE_WAYS(cpi
->cpi_std_4
[i
]) + 1;
3710 CPI_CACHE_COH_LN_SZ(cpi
->cpi_std_4
[i
]) + 1;
3711 ct
->ct_size
= ct
->ct_assoc
*
3712 (CPI_CACHE_PARTS(cpi
->cpi_std_4
[i
]) + 1) *
3714 (cpi
->cpi_std_4
[i
]->cp_ecx
+ 1);
3717 ct
->ct_label
= l2_cache_str
;
3718 } else if (level
== 3) {
3719 ct
->ct_label
= l3_cache_str
;
3729 * Walk the cacheinfo descriptor, applying 'func' to every valid element
3730 * The walk is terminated if the walker returns non-zero.
3733 intel_walk_cacheinfo(struct cpuid_info
*cpi
,
3734 void *arg
, int (*func
)(void *, const struct cachetab
*))
3736 const struct cachetab
*ct
;
3737 struct cachetab des_49_ct
, des_b1_ct
;
3741 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
3743 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
3745 * For overloaded descriptor 0x49 we use cpuid function 4
3746 * if supported by the current processor, to create
3747 * cache information.
3748 * For overloaded descriptor 0xb1 we use X86_PAE flag
3749 * to disambiguate the cache information.
3751 if (*dp
== 0x49 && cpi
->cpi_maxeax
>= 0x4 &&
3752 intel_cpuid_4_cache_info(&des_49_ct
, cpi
) == 1) {
3754 } else if (*dp
== 0xb1) {
3755 des_b1_ct
.ct_code
= 0xb1;
3756 des_b1_ct
.ct_assoc
= 4;
3757 des_b1_ct
.ct_line_size
= 0;
3758 if (is_x86_feature(x86_featureset
, X86FSET_PAE
)) {
3759 des_b1_ct
.ct_size
= 8;
3760 des_b1_ct
.ct_label
= itlb2M_str
;
3762 des_b1_ct
.ct_size
= 4;
3763 des_b1_ct
.ct_label
= itlb4M_str
;
3767 if ((ct
= find_cacheent(intel_ctab
, *dp
)) == NULL
) {
3772 if (func(arg
, ct
) != 0) {
3779 * (Like the Intel one, except for Cyrix CPUs)
3782 cyrix_walk_cacheinfo(struct cpuid_info
*cpi
,
3783 void *arg
, int (*func
)(void *, const struct cachetab
*))
3785 const struct cachetab
*ct
;
3789 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
3791 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
3793 * Search Cyrix-specific descriptor table first ..
3795 if ((ct
= find_cacheent(cyrix_ctab
, *dp
)) != NULL
) {
3796 if (func(arg
, ct
) != 0)
3801 * .. else fall back to the Intel one
3803 if ((ct
= find_cacheent(intel_ctab
, *dp
)) != NULL
) {
3804 if (func(arg
, ct
) != 0)
3812 * A cacheinfo walker that adds associativity, line-size, and size properties
3813 * to the devinfo node it is passed as an argument.
3816 add_cacheent_props(void *arg
, const struct cachetab
*ct
)
3818 dev_info_t
*devi
= arg
;
3820 add_cache_prop(devi
, ct
->ct_label
, assoc_str
, ct
->ct_assoc
);
3821 if (ct
->ct_line_size
!= 0)
3822 add_cache_prop(devi
, ct
->ct_label
, line_str
,
3824 add_cache_prop(devi
, ct
->ct_label
, size_str
, ct
->ct_size
);
3829 static const char fully_assoc
[] = "fully-associative?";
3832 * AMD style cache/tlb description
3834 * Extended functions 5 and 6 directly describe properties of
3835 * tlbs and various cache levels.
3838 add_amd_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
3841 case 0: /* reserved; ignore */
3844 add_cache_prop(devi
, label
, assoc_str
, assoc
);
3847 add_cache_prop(devi
, label
, fully_assoc
, 1);
3853 add_amd_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
3857 add_cache_prop(devi
, label
, size_str
, size
);
3858 add_amd_assoc(devi
, label
, assoc
);
3862 add_amd_cache(dev_info_t
*devi
, const char *label
,
3863 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
3865 if (size
== 0 || line_size
== 0)
3867 add_amd_assoc(devi
, label
, assoc
);
3869 * Most AMD parts have a sectored cache. Multiple cache lines are
3870 * associated with each tag. A sector consists of all cache lines
3871 * associated with a tag. For example, the AMD K6-III has a sector
3872 * size of 2 cache lines per tag.
3874 if (lines_per_tag
!= 0)
3875 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
3876 add_cache_prop(devi
, label
, line_str
, line_size
);
3877 add_cache_prop(devi
, label
, size_str
, size
* 1024);
3881 add_amd_l2_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
3889 add_cache_prop(devi
, label
, assoc_str
, assoc
);
3892 add_cache_prop(devi
, label
, assoc_str
, 8);
3895 add_cache_prop(devi
, label
, assoc_str
, 16);
3898 add_cache_prop(devi
, label
, fully_assoc
, 1);
3900 default: /* reserved; ignore */
3906 add_amd_l2_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
3908 if (size
== 0 || assoc
== 0)
3910 add_amd_l2_assoc(devi
, label
, assoc
);
3911 add_cache_prop(devi
, label
, size_str
, size
);
3915 add_amd_l2_cache(dev_info_t
*devi
, const char *label
,
3916 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
3918 if (size
== 0 || assoc
== 0 || line_size
== 0)
3920 add_amd_l2_assoc(devi
, label
, assoc
);
3921 if (lines_per_tag
!= 0)
3922 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
3923 add_cache_prop(devi
, label
, line_str
, line_size
);
3924 add_cache_prop(devi
, label
, size_str
, size
* 1024);
3928 amd_cache_info(struct cpuid_info
*cpi
, dev_info_t
*devi
)
3930 struct cpuid_regs
*cp
;
3932 if (cpi
->cpi_xmaxeax
< 0x80000005)
3934 cp
= &cpi
->cpi_extd
[5];
3937 * 4M/2M L1 TLB configuration
3939 * We report the size for 2M pages because AMD uses two
3940 * TLB entries for one 4M page.
3942 add_amd_tlb(devi
, "dtlb-2M",
3943 BITX(cp
->cp_eax
, 31, 24), BITX(cp
->cp_eax
, 23, 16));
3944 add_amd_tlb(devi
, "itlb-2M",
3945 BITX(cp
->cp_eax
, 15, 8), BITX(cp
->cp_eax
, 7, 0));
3948 * 4K L1 TLB configuration
3951 switch (cpi
->cpi_vendor
) {
3954 if (cpi
->cpi_family
>= 5) {
3956 * Crusoe processors have 256 TLB entries, but
3957 * cpuid data format constrains them to only
3958 * reporting 255 of them.
3960 if ((nentries
= BITX(cp
->cp_ebx
, 23, 16)) == 255)
3963 * Crusoe processors also have a unified TLB
3965 add_amd_tlb(devi
, "tlb-4K", BITX(cp
->cp_ebx
, 31, 24),
3971 add_amd_tlb(devi
, itlb4k_str
,
3972 BITX(cp
->cp_ebx
, 31, 24), BITX(cp
->cp_ebx
, 23, 16));
3973 add_amd_tlb(devi
, dtlb4k_str
,
3974 BITX(cp
->cp_ebx
, 15, 8), BITX(cp
->cp_ebx
, 7, 0));
3979 * data L1 cache configuration
3982 add_amd_cache(devi
, l1_dcache_str
,
3983 BITX(cp
->cp_ecx
, 31, 24), BITX(cp
->cp_ecx
, 23, 16),
3984 BITX(cp
->cp_ecx
, 15, 8), BITX(cp
->cp_ecx
, 7, 0));
3987 * code L1 cache configuration
3990 add_amd_cache(devi
, l1_icache_str
,
3991 BITX(cp
->cp_edx
, 31, 24), BITX(cp
->cp_edx
, 23, 16),
3992 BITX(cp
->cp_edx
, 15, 8), BITX(cp
->cp_edx
, 7, 0));
3994 if (cpi
->cpi_xmaxeax
< 0x80000006)
3996 cp
= &cpi
->cpi_extd
[6];
3998 /* Check for a unified L2 TLB for large pages */
4000 if (BITX(cp
->cp_eax
, 31, 16) == 0)
4001 add_amd_l2_tlb(devi
, "l2-tlb-2M",
4002 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4004 add_amd_l2_tlb(devi
, "l2-dtlb-2M",
4005 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4006 add_amd_l2_tlb(devi
, "l2-itlb-2M",
4007 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4010 /* Check for a unified L2 TLB for 4K pages */
4012 if (BITX(cp
->cp_ebx
, 31, 16) == 0) {
4013 add_amd_l2_tlb(devi
, "l2-tlb-4K",
4014 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4016 add_amd_l2_tlb(devi
, "l2-dtlb-4K",
4017 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4018 add_amd_l2_tlb(devi
, "l2-itlb-4K",
4019 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4022 add_amd_l2_cache(devi
, l2_cache_str
,
4023 BITX(cp
->cp_ecx
, 31, 16), BITX(cp
->cp_ecx
, 15, 12),
4024 BITX(cp
->cp_ecx
, 11, 8), BITX(cp
->cp_ecx
, 7, 0));
4028 * There are two basic ways that the x86 world describes it cache
4029 * and tlb architecture - Intel's way and AMD's way.
4031 * Return which flavor of cache architecture we should use
4034 x86_which_cacheinfo(struct cpuid_info
*cpi
)
4036 switch (cpi
->cpi_vendor
) {
4037 case X86_VENDOR_Intel
:
4038 if (cpi
->cpi_maxeax
>= 2)
4039 return (X86_VENDOR_Intel
);
4041 case X86_VENDOR_AMD
:
4043 * The K5 model 1 was the first part from AMD that reported
4044 * cache sizes via extended cpuid functions.
4046 if (cpi
->cpi_family
> 5 ||
4047 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
4048 return (X86_VENDOR_AMD
);
4051 if (cpi
->cpi_family
>= 5)
4052 return (X86_VENDOR_AMD
);
4056 * If they have extended CPU data for 0x80000005
4057 * then we assume they have AMD-format cache
4060 * If not, and the vendor happens to be Cyrix,
4061 * then try our-Cyrix specific handler.
4063 * If we're not Cyrix, then assume we're using Intel's
4064 * table-driven format instead.
4066 if (cpi
->cpi_xmaxeax
>= 0x80000005)
4067 return (X86_VENDOR_AMD
);
4068 else if (cpi
->cpi_vendor
== X86_VENDOR_Cyrix
)
4069 return (X86_VENDOR_Cyrix
);
4070 else if (cpi
->cpi_maxeax
>= 2)
4071 return (X86_VENDOR_Intel
);
4078 cpuid_set_cpu_properties(void *dip
, processorid_t cpu_id
,
4079 struct cpuid_info
*cpi
)
4081 dev_info_t
*cpu_devi
;
4084 cpu_devi
= (dev_info_t
*)dip
;
4087 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4088 "device_type", "cpu");
4091 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4094 /* cpu-mhz, and clock-frequency */
4098 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4099 "cpu-mhz", cpu_freq
);
4100 if ((mul
= cpu_freq
* 1000000LL) <= INT_MAX
)
4101 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4102 "clock-frequency", (int)mul
);
4105 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
)) {
4110 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4111 "vendor-id", cpi
->cpi_vendorstr
);
4113 if (cpi
->cpi_maxeax
== 0) {
4118 * family, model, and step
4120 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4121 "family", CPI_FAMILY(cpi
));
4122 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4123 "cpu-model", CPI_MODEL(cpi
));
4124 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4125 "stepping-id", CPI_STEP(cpi
));
4128 switch (cpi
->cpi_vendor
) {
4129 case X86_VENDOR_Intel
:
4137 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4138 "type", CPI_TYPE(cpi
));
4141 switch (cpi
->cpi_vendor
) {
4142 case X86_VENDOR_Intel
:
4143 case X86_VENDOR_AMD
:
4144 create
= cpi
->cpi_family
>= 0xf;
4151 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4152 "ext-family", CPI_FAMILY_XTD(cpi
));
4155 switch (cpi
->cpi_vendor
) {
4156 case X86_VENDOR_Intel
:
4157 create
= IS_EXTENDED_MODEL_INTEL(cpi
);
4159 case X86_VENDOR_AMD
:
4160 create
= CPI_FAMILY(cpi
) == 0xf;
4167 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4168 "ext-model", CPI_MODEL_XTD(cpi
));
4171 switch (cpi
->cpi_vendor
) {
4172 case X86_VENDOR_AMD
:
4174 * AMD K5 model 1 was the first part to support this
4176 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4183 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4184 "generation", BITX((cpi
)->cpi_extd
[1].cp_eax
, 11, 8));
4187 switch (cpi
->cpi_vendor
) {
4188 case X86_VENDOR_Intel
:
4190 * brand id first appeared on Pentium III Xeon model 8,
4191 * and Celeron model 8 processors and Opteron
4193 create
= cpi
->cpi_family
> 6 ||
4194 (cpi
->cpi_family
== 6 && cpi
->cpi_model
>= 8);
4196 case X86_VENDOR_AMD
:
4197 create
= cpi
->cpi_family
>= 0xf;
4203 if (create
&& cpi
->cpi_brandid
!= 0) {
4204 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4205 "brand-id", cpi
->cpi_brandid
);
4208 /* chunks, and apic-id */
4209 switch (cpi
->cpi_vendor
) {
4211 * first available on Pentium IV and Opteron (K8)
4213 case X86_VENDOR_Intel
:
4214 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4216 case X86_VENDOR_AMD
:
4217 create
= cpi
->cpi_family
>= 0xf;
4224 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4225 "chunks", CPI_CHUNKS(cpi
));
4226 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4227 "apic-id", cpi
->cpi_apicid
);
4228 if (cpi
->cpi_chipid
>= 0) {
4229 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4230 "chip#", cpi
->cpi_chipid
);
4231 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4232 "clog#", cpi
->cpi_clogid
);
4236 /* cpuid-features */
4237 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4238 "cpuid-features", CPI_FEATURES_EDX(cpi
));
4241 /* cpuid-features-ecx */
4242 switch (cpi
->cpi_vendor
) {
4243 case X86_VENDOR_Intel
:
4244 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4246 case X86_VENDOR_AMD
:
4247 create
= cpi
->cpi_family
>= 0xf;
4254 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4255 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi
));
4257 /* ext-cpuid-features */
4258 switch (cpi
->cpi_vendor
) {
4259 case X86_VENDOR_Intel
:
4260 case X86_VENDOR_AMD
:
4261 case X86_VENDOR_Cyrix
:
4263 case X86_VENDOR_Centaur
:
4264 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4271 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4272 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi
));
4273 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4274 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi
));
4278 * Brand String first appeared in Intel Pentium IV, AMD K5
4279 * model 1, and Cyrix GXm. On earlier models we try and
4280 * simulate something similar .. so this string should always
4281 * same -something- about the processor, however lame.
4283 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4284 "brand-string", cpi
->cpi_brandstr
);
4287 * Finally, cache and tlb information
4289 switch (x86_which_cacheinfo(cpi
)) {
4290 case X86_VENDOR_Intel
:
4291 intel_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4293 case X86_VENDOR_Cyrix
:
4294 cyrix_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4296 case X86_VENDOR_AMD
:
4297 amd_cache_info(cpi
, cpu_devi
);
4312 * A cacheinfo walker that fetches the size, line-size and associativity
4316 intel_l2cinfo(void *arg
, const struct cachetab
*ct
)
4318 struct l2info
*l2i
= arg
;
4321 if (ct
->ct_label
!= l2_cache_str
&&
4322 ct
->ct_label
!= sl2_cache_str
)
4323 return (0); /* not an L2 -- keep walking */
4325 if ((ip
= l2i
->l2i_csz
) != NULL
)
4327 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4328 *ip
= ct
->ct_line_size
;
4329 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4331 l2i
->l2i_ret
= ct
->ct_size
;
4332 return (1); /* was an L2 -- terminate walk */
4336 * AMD L2/L3 Cache and TLB Associativity Field Definition:
4338 * Unlike the associativity for the L1 cache and tlb where the 8 bit
4339 * value is the associativity, the associativity for the L2 cache and
4340 * tlb is encoded in the following table. The 4 bit L2 value serves as
4341 * an index into the amd_afd[] array to determine the associativity.
4342 * -1 is undefined. 0 is fully associative.
4345 static int amd_afd
[] =
4346 {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4349 amd_l2cacheinfo(struct cpuid_info
*cpi
, struct l2info
*l2i
)
4351 struct cpuid_regs
*cp
;
4356 if (cpi
->cpi_xmaxeax
< 0x80000006)
4358 cp
= &cpi
->cpi_extd
[6];
4360 if ((i
= BITX(cp
->cp_ecx
, 15, 12)) != 0 &&
4361 (size
= BITX(cp
->cp_ecx
, 31, 16)) != 0) {
4362 uint_t cachesz
= size
* 1024;
4365 ASSERT(assoc
!= -1);
4367 if ((ip
= l2i
->l2i_csz
) != NULL
)
4369 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4370 *ip
= BITX(cp
->cp_ecx
, 7, 0);
4371 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4373 l2i
->l2i_ret
= cachesz
;
4378 getl2cacheinfo(cpu_t
*cpu
, int *csz
, int *lsz
, int *assoc
)
4380 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
4381 struct l2info __l2info
, *l2i
= &__l2info
;
4385 l2i
->l2i_assoc
= assoc
;
4388 switch (x86_which_cacheinfo(cpi
)) {
4389 case X86_VENDOR_Intel
:
4390 intel_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4392 case X86_VENDOR_Cyrix
:
4393 cyrix_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4395 case X86_VENDOR_AMD
:
4396 amd_l2cacheinfo(cpi
, l2i
);
4401 return (l2i
->l2i_ret
);
4407 cpuid_mwait_alloc(cpu_t
*cpu
)
4412 ASSERT(cpuid_checkpass(CPU
, 2));
4414 mwait_size
= CPU
->cpu_m
.mcpu_cpi
->cpi_mwait
.mon_max
;
4415 if (mwait_size
== 0)
4419 * kmem_alloc() returns cache line size aligned data for mwait_size
4420 * allocations. mwait_size is currently cache line sized. Neither
4421 * of these implementation details are guarantied to be true in the
4424 * First try allocating mwait_size as kmem_alloc() currently returns
4425 * correctly aligned memory. If kmem_alloc() does not return
4426 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4428 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4429 * decide to free this memory.
4431 ret
= kmem_zalloc(mwait_size
, KM_SLEEP
);
4432 if (ret
== (uint32_t *)P2ROUNDUP((uintptr_t)ret
, mwait_size
)) {
4433 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= ret
;
4434 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= mwait_size
;
4435 *ret
= MWAIT_RUNNING
;
4438 kmem_free(ret
, mwait_size
);
4439 ret
= kmem_zalloc(mwait_size
* 2, KM_SLEEP
);
4440 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= ret
;
4441 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= mwait_size
* 2;
4442 ret
= (uint32_t *)P2ROUNDUP((uintptr_t)ret
, mwait_size
);
4443 *ret
= MWAIT_RUNNING
;
4449 cpuid_mwait_free(cpu_t
*cpu
)
4451 if (cpu
->cpu_m
.mcpu_cpi
== NULL
) {
4455 if (cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
!= NULL
&&
4456 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
> 0) {
4457 kmem_free(cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
,
4458 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
);
4461 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= NULL
;
4462 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= 0;
4466 patch_tsc_read(int flag
)
4472 cnt
= &_no_rdtsc_end
- &_no_rdtsc_start
;
4473 (void) memcpy((void *)tsc_read
, (void *)&_no_rdtsc_start
, cnt
);
4476 cnt
= &_tscp_end
- &_tscp_start
;
4477 (void) memcpy((void *)tsc_read
, (void *)&_tscp_start
, cnt
);
4479 case X86_TSC_MFENCE
:
4480 cnt
= &_tsc_mfence_end
- &_tsc_mfence_start
;
4481 (void) memcpy((void *)tsc_read
,
4482 (void *)&_tsc_mfence_start
, cnt
);
4484 case X86_TSC_LFENCE
:
4485 cnt
= &_tsc_lfence_end
- &_tsc_lfence_start
;
4486 (void) memcpy((void *)tsc_read
,
4487 (void *)&_tsc_lfence_start
, cnt
);
4495 cpuid_deep_cstates_supported(void)
4497 struct cpuid_info
*cpi
;
4498 struct cpuid_regs regs
;
4500 ASSERT(cpuid_checkpass(CPU
, 1));
4502 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4504 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
))
4507 switch (cpi
->cpi_vendor
) {
4508 case X86_VENDOR_Intel
:
4509 if (cpi
->cpi_xmaxeax
< 0x80000007)
4513 * TSC run at a constant rate in all ACPI C-states?
4515 regs
.cp_eax
= 0x80000007;
4516 (void) __cpuid_insn(®s
);
4517 return (regs
.cp_edx
& CPUID_TSC_CSTATE_INVARIANCE
);
4527 post_startup_cpu_fixups(void)
4531 * Some AMD processors support C1E state. Entering this state will
4532 * cause the local APIC timer to stop, which we can't deal with at
4535 if (cpuid_getvendor(CPU
) == X86_VENDOR_AMD
) {
4539 if (!on_trap(&otd
, OT_DATA_ACCESS
)) {
4540 reg
= rdmsr(MSR_AMD_INT_PENDING_CMP_HALT
);
4541 /* Disable C1E state if it is enabled by BIOS */
4542 if ((reg
>> AMD_ACTONCMPHALT_SHIFT
) &
4543 AMD_ACTONCMPHALT_MASK
) {
4544 reg
&= ~(AMD_ACTONCMPHALT_MASK
<<
4545 AMD_ACTONCMPHALT_SHIFT
);
4546 wrmsr(MSR_AMD_INT_PENDING_CMP_HALT
, reg
);
4555 * Setup necessary registers to enable XSAVE feature on this processor.
4556 * This function needs to be called early enough, so that no xsave/xrstor
4557 * ops will execute on the processor before the MSRs are properly set up.
4559 * Current implementation has the following assumption:
4560 * - cpuid_pass1() is done, so that X86 features are known.
4561 * - fpu_probe() is done, so that fp_save_mech is chosen.
4564 xsave_setup_msr(cpu_t
*cpu
)
4566 ASSERT(fp_save_mech
== FP_XSAVE
);
4567 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
4569 /* Enable OSXSAVE in CR4. */
4570 setcr4(getcr4() | CR4_OSXSAVE
);
4572 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4575 cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_ecx
|= CPUID_INTC_ECX_OSXSAVE
;
4580 * Starting with the Westmere processor the local
4581 * APIC timer will continue running in all C-states,
4582 * including the deepest C-states.
4585 cpuid_arat_supported(void)
4587 struct cpuid_info
*cpi
;
4588 struct cpuid_regs regs
;
4590 ASSERT(cpuid_checkpass(CPU
, 1));
4591 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
4593 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4595 switch (cpi
->cpi_vendor
) {
4596 case X86_VENDOR_Intel
:
4598 * Always-running Local APIC Timer is
4599 * indicated by CPUID.6.EAX[2].
4601 if (cpi
->cpi_maxeax
>= 6) {
4603 (void) cpuid_insn(NULL
, ®s
);
4604 return (regs
.cp_eax
& CPUID_CSTATE_ARAT
);
4614 * Check support for Intel ENERGY_PERF_BIAS feature
4617 cpuid_iepb_supported(struct cpu
*cp
)
4619 struct cpuid_info
*cpi
= cp
->cpu_m
.mcpu_cpi
;
4620 struct cpuid_regs regs
;
4622 ASSERT(cpuid_checkpass(cp
, 1));
4624 if (!(is_x86_feature(x86_featureset
, X86FSET_CPUID
)) ||
4625 !(is_x86_feature(x86_featureset
, X86FSET_MSR
))) {
4630 * Intel ENERGY_PERF_BIAS MSR is indicated by
4631 * capability bit CPUID.6.ECX.3
4633 if ((cpi
->cpi_vendor
!= X86_VENDOR_Intel
) || (cpi
->cpi_maxeax
< 6))
4637 (void) cpuid_insn(NULL
, ®s
);
4638 return (regs
.cp_ecx
& CPUID_EPB_SUPPORT
);
4642 * Check support for TSC deadline timer
4644 * TSC deadline timer provides a superior software programming
4645 * model over local APIC timer that eliminates "time drifts".
4646 * Instead of specifying a relative time, software specifies an
4647 * absolute time as the target at which the processor should
4648 * generate a timer event.
4651 cpuid_deadline_tsc_supported(void)
4653 struct cpuid_info
*cpi
= CPU
->cpu_m
.mcpu_cpi
;
4654 struct cpuid_regs regs
;
4656 ASSERT(cpuid_checkpass(CPU
, 1));
4657 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
4659 switch (cpi
->cpi_vendor
) {
4660 case X86_VENDOR_Intel
:
4661 if (cpi
->cpi_maxeax
>= 1) {
4663 (void) cpuid_insn(NULL
, ®s
);
4664 return (regs
.cp_ecx
& CPUID_DEADLINE_TSC
);
4673 #if defined(__amd64) && !defined(__xpv)
4675 * Patch in versions of bcopy for high performance Intel Nhm processors
4679 patch_memops(uint_t vendor
)
4684 if ((vendor
== X86_VENDOR_Intel
) &&
4685 is_x86_feature(x86_featureset
, X86FSET_SSE4_2
)) {
4686 cnt
= &bcopy_patch_end
- &bcopy_patch_start
;
4687 to
= &bcopy_ck_size
;
4688 from
= &bcopy_patch_start
;
4689 for (i
= 0; i
< cnt
; i
++) {
4694 #endif /* __amd64 && !__xpv */
4697 * This function finds the number of bits to represent the number of cores per
4698 * chip and the number of strands per core for the Intel platforms.
4699 * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4702 cpuid_get_ext_topo(uint_t vendor
, uint_t
*core_nbits
, uint_t
*strand_nbits
)
4704 struct cpuid_regs regs
;
4705 struct cpuid_regs
*cp
= ®s
;
4707 if (vendor
!= X86_VENDOR_Intel
) {
4711 /* if the cpuid level is 0xB, extended topo is available. */
4713 if (__cpuid_insn(cp
) >= 0xB) {
4716 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
4717 (void) __cpuid_insn(cp
);
4720 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4721 * indicates that the extended topology enumeration leaf is
4725 uint_t coreid_shift
= 0;
4726 uint_t chipid_shift
= 0;
4730 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
4734 (void) __cpuid_insn(cp
);
4735 level
= CPI_CPU_LEVEL_TYPE(cp
);
4739 * Thread level processor topology
4740 * Number of bits shift right APIC ID
4741 * to get the coreid.
4743 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
4744 } else if (level
== 2) {
4746 * Core level processor topology
4747 * Number of bits shift right APIC ID
4748 * to get the chipid.
4750 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
4754 if (coreid_shift
> 0 && chipid_shift
> coreid_shift
) {
4755 *strand_nbits
= coreid_shift
;
4756 *core_nbits
= chipid_shift
- coreid_shift
;