4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
28 * Copyright (c) 2010, Intel Corporation.
29 * All rights reserved.
32 * Portions Copyright 2009 Advanced Micro Devices, Inc.
35 * Copyright 2018 Joyent, Inc.
38 * Various routines to handle identification
39 * and classification of x86 processors.
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
60 #include <sys/comm_page.h>
61 #include <sys/mach_mmu.h>
65 #include <sys/hypervisor.h>
67 #include <sys/ontrap.h>
71 * Pass 0 of cpuid feature analysis happens in locore. It contains special code
72 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
73 * them accordingly. For most modern processors, feature detection occurs here
76 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
77 * for the boot CPU and does the basic analysis that the early kernel needs.
78 * x86_featureset is set based on the return value of cpuid_pass1() of the boot
83 * o Determining vendor/model/family/stepping and setting x86_type and
84 * x86_vendor accordingly.
85 * o Processing the feature flags returned by the cpuid instruction while
86 * applying any workarounds or tricks for the specific processor.
87 * o Mapping the feature flags into illumos feature bits (X86_*).
88 * o Processing extended feature flags if supported by the processor,
89 * again while applying specific processor knowledge.
90 * o Determining the CMT characteristics of the system.
92 * Pass 1 is done on non-boot CPUs during their initialization and the results
93 * are used only as a meager attempt at ensuring that all processors within the
94 * system support the same features.
96 * Pass 2 of cpuid feature analysis happens just at the beginning
97 * of startup(). It just copies in and corrects the remainder
98 * of the cpuid data we depend on: standard cpuid functions that we didn't
99 * need for pass1 feature analysis, and extended cpuid functions beyond the
100 * simple feature processing done in pass1.
102 * Pass 3 of cpuid analysis is invoked after basic kernel services; in
103 * particular kernel memory allocation has been made available. It creates a
104 * readable brand string based on the data collected in the first two passes.
106 * Pass 4 of cpuid analysis is invoked after post_startup() when all
107 * the support infrastructure for various hardware features has been
108 * initialized. It determines which processor features will be reported
109 * to userland via the aux vector.
111 * All passes are executed on all CPUs, but only the boot CPU determines what
112 * features the kernel will use.
114 * Much of the worst junk in this file is for the support of processors
115 * that didn't really implement the cpuid instruction properly.
117 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
118 * the pass numbers. Accordingly, changes to the pass code may require changes
119 * to the accessor code.
122 uint_t x86_vendor
= X86_VENDOR_IntelClone
;
123 uint_t x86_type
= X86_TYPE_OTHER
;
124 uint_t x86_clflush_size
= 0;
127 int x86_use_pcid
= 0;
128 int x86_use_invpcid
= 0;
130 int x86_use_pcid
= -1;
131 int x86_use_invpcid
= -1;
134 uint_t pentiumpro_bug4046376
;
136 uchar_t x86_featureset
[BT_SIZEOFMAP(NUM_X86_FEATURES
)];
138 static char *x86_feature_names
[NUM_X86_FEATURES
] = {
213 is_x86_feature(void *featureset
, uint_t feature
)
215 ASSERT(feature
< NUM_X86_FEATURES
);
216 return (BT_TEST((ulong_t
*)featureset
, feature
));
220 add_x86_feature(void *featureset
, uint_t feature
)
222 ASSERT(feature
< NUM_X86_FEATURES
);
223 BT_SET((ulong_t
*)featureset
, feature
);
227 remove_x86_feature(void *featureset
, uint_t feature
)
229 ASSERT(feature
< NUM_X86_FEATURES
);
230 BT_CLEAR((ulong_t
*)featureset
, feature
);
234 compare_x86_featureset(void *setA
, void *setB
)
237 * We assume that the unused bits of the bitmap are always zero.
239 if (memcmp(setA
, setB
, BT_SIZEOFMAP(NUM_X86_FEATURES
)) == 0) {
247 print_x86_featureset(void *featureset
)
251 for (i
= 0; i
< NUM_X86_FEATURES
; i
++) {
252 if (is_x86_feature(featureset
, i
)) {
253 cmn_err(CE_CONT
, "?x86_feature: %s\n",
254 x86_feature_names
[i
]);
259 /* Note: This is the maximum size for the CPU, not the size of the structure. */
260 static size_t xsave_state_size
= 0;
261 uint64_t xsave_bv_all
= (XFEATURE_LEGACY_FP
| XFEATURE_SSE
);
262 boolean_t xsave_force_disable
= B_FALSE
;
263 extern int disable_smap
;
266 * This is set to platform type we are running on.
268 static int platform_type
= -1;
272 * Variable to patch if hypervisor platform detection needs to be
273 * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
275 int enable_platform_detection
= 1;
279 * monitor/mwait info.
281 * size_actual and buf_actual are the real address and size allocated to get
282 * proper mwait_buf alignement. buf_actual and size_actual should be passed
283 * to kmem_free(). Currently kmem_alloc() and mwait happen to both use
284 * processor cache-line alignment, but this is not guarantied in the furture.
287 size_t mon_min
; /* min size to avoid missed wakeups */
288 size_t mon_max
; /* size to avoid false wakeups */
289 size_t size_actual
; /* size actually allocated */
290 void *buf_actual
; /* memory actually allocated */
291 uint32_t support
; /* processor support of monitor/mwait */
295 * xsave/xrestor info.
297 * This structure contains HW feature bits and the size of the xsave save area.
298 * Note: the kernel declares a fixed size (AVX_XSAVE_SIZE) structure
299 * (xsave_state) to describe the xsave layout. However, at runtime the
300 * per-lwp xsave area is dynamically allocated based on xsav_max_size. The
301 * xsave_state structure simply represents the legacy layout of the beginning
305 uint32_t xsav_hw_features_low
; /* Supported HW features */
306 uint32_t xsav_hw_features_high
; /* Supported HW features */
307 size_t xsav_max_size
; /* max size save area for HW features */
308 size_t ymm_size
; /* AVX: size of ymm save area */
309 size_t ymm_offset
; /* AVX: offset for ymm save area */
310 size_t bndregs_size
; /* MPX: size of bndregs save area */
311 size_t bndregs_offset
; /* MPX: offset for bndregs save area */
312 size_t bndcsr_size
; /* MPX: size of bndcsr save area */
313 size_t bndcsr_offset
; /* MPX: offset for bndcsr save area */
314 size_t opmask_size
; /* AVX512: size of opmask save */
315 size_t opmask_offset
; /* AVX512: offset for opmask save */
316 size_t zmmlo_size
; /* AVX512: size of zmm 256 save */
317 size_t zmmlo_offset
; /* AVX512: offset for zmm 256 save */
318 size_t zmmhi_size
; /* AVX512: size of zmm hi reg save */
319 size_t zmmhi_offset
; /* AVX512: offset for zmm hi reg save */
324 * These constants determine how many of the elements of the
325 * cpuid we cache in the cpuid_info data structure; the
326 * remaining elements are accessible via the cpuid instruction.
329 #define NMAX_CPI_STD 8 /* eax = 0 .. 7 */
330 #define NMAX_CPI_EXTD 0x1f /* eax = 0x80000000 .. 0x8000001e */
333 * Some terminology needs to be explained:
334 * - Socket: Something that can be plugged into a motherboard.
335 * - Package: Same as socket
336 * - Chip: Same as socket. Note that AMD's documentation uses term "chip"
337 * differently: there, chip is the same as processor node (below)
338 * - Processor node: Some AMD processors have more than one
339 * "subprocessor" embedded in a package. These subprocessors (nodes)
340 * are fully-functional processors themselves with cores, caches,
341 * memory controllers, PCI configuration spaces. They are connected
342 * inside the package with Hypertransport links. On single-node
343 * processors, processor node is equivalent to chip/socket/package.
344 * - Compute Unit: Some AMD processors pair cores in "compute units" that
345 * share the FPU and the I$ and L2 caches.
349 uint_t cpi_pass
; /* last pass completed */
351 * standard function information
353 uint_t cpi_maxeax
; /* fn 0: %eax */
354 char cpi_vendorstr
[13]; /* fn 0: %ebx:%ecx:%edx */
355 uint_t cpi_vendor
; /* enum of cpi_vendorstr */
357 uint_t cpi_family
; /* fn 1: extended family */
358 uint_t cpi_model
; /* fn 1: extended model */
359 uint_t cpi_step
; /* fn 1: stepping */
360 chipid_t cpi_chipid
; /* fn 1: %ebx: Intel: chip # */
361 /* AMD: package/socket # */
362 uint_t cpi_brandid
; /* fn 1: %ebx: brand ID */
363 int cpi_clogid
; /* fn 1: %ebx: thread # */
364 uint_t cpi_ncpu_per_chip
; /* fn 1: %ebx: logical cpu count */
365 uint8_t cpi_cacheinfo
[16]; /* fn 2: intel-style cache desc */
366 uint_t cpi_ncache
; /* fn 2: number of elements */
367 uint_t cpi_ncpu_shr_last_cache
; /* fn 4: %eax: ncpus sharing cache */
368 id_t cpi_last_lvl_cacheid
; /* fn 4: %eax: derived cache id */
369 uint_t cpi_std_4_size
; /* fn 4: number of fn 4 elements */
370 struct cpuid_regs
**cpi_std_4
; /* fn 4: %ecx == 0 .. fn4_size */
371 struct cpuid_regs cpi_std
[NMAX_CPI_STD
]; /* 0 .. 7 */
373 * extended function information
375 uint_t cpi_xmaxeax
; /* fn 0x80000000: %eax */
376 char cpi_brandstr
[49]; /* fn 0x8000000[234] */
377 uint8_t cpi_pabits
; /* fn 0x80000006: %eax */
378 uint8_t cpi_vabits
; /* fn 0x80000006: %eax */
379 uint8_t cpi_fp_amd_save
; /* AMD: FP error pointer save rqd. */
380 struct cpuid_regs cpi_extd
[NMAX_CPI_EXTD
]; /* 0x800000XX */
382 id_t cpi_coreid
; /* same coreid => strands share core */
383 int cpi_pkgcoreid
; /* core number within single package */
384 uint_t cpi_ncore_per_chip
; /* AMD: fn 0x80000008: %ecx[7-0] */
385 /* Intel: fn 4: %eax[31-26] */
387 * supported feature information
389 uint32_t cpi_support
[6];
390 #define STD_EDX_FEATURES 0
391 #define AMD_EDX_FEATURES 1
392 #define TM_EDX_FEATURES 2
393 #define STD_ECX_FEATURES 3
394 #define AMD_ECX_FEATURES 4
395 #define STD_EBX_FEATURES 5
397 * Synthesized information, where known.
399 uint32_t cpi_chiprev
; /* See X86_CHIPREV_* in x86_archext.h */
400 const char *cpi_chiprevstr
; /* May be NULL if chiprev unknown */
401 uint32_t cpi_socket
; /* Chip package/socket type */
403 struct mwait_info cpi_mwait
; /* fn 5: monitor/mwait info */
405 uint_t cpi_procnodeid
; /* AMD: nodeID on HT, Intel: chipid */
406 uint_t cpi_procnodes_per_pkg
; /* AMD: # of nodes in the package */
408 uint_t cpi_compunitid
; /* AMD: ComputeUnit ID, Intel: coreid */
409 uint_t cpi_cores_per_compunit
; /* AMD: # of cores in the ComputeUnit */
411 struct xsave_info cpi_xsave
; /* fn D: xsave/xrestor info */
415 static struct cpuid_info cpuid_info0
;
418 * These bit fields are defined by the Intel Application Note AP-485
419 * "Intel Processor Identification and the CPUID Instruction"
421 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
422 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
423 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
424 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
425 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
426 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
428 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
429 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
430 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
431 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
432 #define CPI_FEATURES_7_0_EBX(cpi) ((cpi)->cpi_std[7].cp_ebx)
433 #define CPI_FEATURES_7_0_ECX(cpi) ((cpi)->cpi_std[7].cp_ecx)
434 #define CPI_FEATURES_7_0_EDX(cpi) ((cpi)->cpi_std[7].cp_edx)
436 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
437 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
438 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
439 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
441 #define CPI_MAXEAX_MAX 0x100 /* sanity control */
442 #define CPI_XMAXEAX_MAX 0x80000100
443 #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */
444 #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */
447 * Function 4 (Deterministic Cache Parameters) macros
448 * Defined by Intel Application Note AP-485
450 #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26)
451 #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14)
452 #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9)
453 #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8)
454 #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5)
455 #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0)
456 #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8)
458 #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22)
459 #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12)
460 #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0)
462 #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0)
464 #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0)
468 * A couple of shorthand macros to identify "later" P6-family chips
469 * like the Pentium M and Core. First, the "older" P6-based stuff
470 * (loosely defined as "pre-Pentium-4"):
471 * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
473 #define IS_LEGACY_P6(cpi) ( \
474 cpi->cpi_family == 6 && \
475 (cpi->cpi_model == 1 || \
476 cpi->cpi_model == 3 || \
477 cpi->cpi_model == 5 || \
478 cpi->cpi_model == 6 || \
479 cpi->cpi_model == 7 || \
480 cpi->cpi_model == 8 || \
481 cpi->cpi_model == 0xA || \
482 cpi->cpi_model == 0xB) \
485 /* A "new F6" is everything with family 6 that's not the above */
486 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
488 /* Extended family/model support */
489 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
490 cpi->cpi_family >= 0xf)
493 * Info for monitor/mwait idle loop.
495 * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
496 * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
498 * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
499 * Documentation Updates" #33633, Rev 2.05, December 2006.
501 #define MWAIT_SUPPORT (0x00000001) /* mwait supported */
502 #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */
503 #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */
504 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
505 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
506 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
507 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
508 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
510 * Number of sub-cstates for a given c-state.
512 #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \
513 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
516 * XSAVE leaf 0xD enumeration
518 #define CPUID_LEAFD_2_YMM_OFFSET 576
519 #define CPUID_LEAFD_2_YMM_SIZE 256
522 * Functions we consune from cpuid_subr.c; don't publish these in a header
523 * file to try and keep people using the expected cpuid_* interfaces.
525 extern uint32_t _cpuid_skt(uint_t
, uint_t
, uint_t
, uint_t
);
526 extern const char *_cpuid_sktstr(uint_t
, uint_t
, uint_t
, uint_t
);
527 extern uint32_t _cpuid_chiprev(uint_t
, uint_t
, uint_t
, uint_t
);
528 extern const char *_cpuid_chiprevstr(uint_t
, uint_t
, uint_t
, uint_t
);
529 extern uint_t
_cpuid_vendorstr_to_vendorcode(char *);
532 * Apply up various platform-dependent restrictions where the
533 * underlying platform restrictions mean the CPU can be marked
534 * as less capable than its cpuid instruction would imply.
538 platform_cpuid_mangle(uint_t vendor
, uint32_t eax
, struct cpuid_regs
*cp
)
542 uint32_t mcamask
= DOMAIN_IS_INITDOMAIN(xen_info
) ?
543 0 : CPUID_INTC_EDX_MCA
;
547 CPUID_INTC_EDX_VME
| CPUID_INTC_EDX_DE
|
548 CPUID_INTC_EDX_SEP
| CPUID_INTC_EDX_MTRR
|
549 CPUID_INTC_EDX_PGE
| CPUID_INTC_EDX_PAT
|
550 CPUID_AMD_EDX_SYSC
| CPUID_INTC_EDX_SEP
|
551 CPUID_INTC_EDX_PSE36
| CPUID_INTC_EDX_HTT
);
557 ~(CPUID_AMD_EDX_PSE
|
558 CPUID_INTC_EDX_VME
| CPUID_INTC_EDX_DE
|
559 CPUID_AMD_EDX_MTRR
| CPUID_AMD_EDX_PGE
|
560 CPUID_AMD_EDX_PAT
| CPUID_AMD_EDX_PSE36
|
561 CPUID_AMD_EDX_SYSC
| CPUID_INTC_EDX_SEP
|
563 cp
->cp_ecx
&= ~CPUID_AMD_ECX_CMP_LGCY
;
570 case X86_VENDOR_Intel
:
574 * Zero out the (ncores-per-chip - 1) field
576 cp
->cp_eax
&= 0x03fffffff;
586 cp
->cp_ecx
&= ~CPUID_AMD_ECX_CR8D
;
591 * Zero out the (ncores-per-chip - 1) field
593 cp
->cp_ecx
&= 0xffffff00;
604 #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */
608 * Some undocumented ways of patching the results of the cpuid
609 * instruction to permit running Solaris 10 on future cpus that
610 * we don't currently support. Could be set to non-zero values
611 * via settings in eeprom.
614 uint32_t cpuid_feature_ecx_include
;
615 uint32_t cpuid_feature_ecx_exclude
;
616 uint32_t cpuid_feature_edx_include
;
617 uint32_t cpuid_feature_edx_exclude
;
620 * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
623 cpuid_alloc_space(cpu_t
*cpu
)
626 * By convention, cpu0 is the boot cpu, which is set up
627 * before memory allocation is available. All other cpus get
628 * their cpuid_info struct allocated here.
630 ASSERT(cpu
->cpu_id
!= 0);
631 ASSERT(cpu
->cpu_m
.mcpu_cpi
== NULL
);
632 cpu
->cpu_m
.mcpu_cpi
=
633 kmem_zalloc(sizeof (*cpu
->cpu_m
.mcpu_cpi
), KM_SLEEP
);
637 cpuid_free_space(cpu_t
*cpu
)
639 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
643 ASSERT(cpi
!= &cpuid_info0
);
646 * Free up any function 4 related dynamic storage
648 for (i
= 1; i
< cpi
->cpi_std_4_size
; i
++)
649 kmem_free(cpi
->cpi_std_4
[i
], sizeof (struct cpuid_regs
));
650 if (cpi
->cpi_std_4_size
> 0)
651 kmem_free(cpi
->cpi_std_4
,
652 cpi
->cpi_std_4_size
* sizeof (struct cpuid_regs
*));
654 kmem_free(cpi
, sizeof (*cpi
));
655 cpu
->cpu_m
.mcpu_cpi
= NULL
;
660 * Determine the type of the underlying platform. This is used to customize
661 * initialization of various subsystems (e.g. TSC). determine_platform() must
662 * only ever be called once to prevent two processors from seeing different
663 * values of platform_type. Must be called before cpuid_pass1(), the earliest
664 * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
667 determine_platform(void)
669 struct cpuid_regs cp
;
672 char *hvstr
= (char *)regs
;
674 ASSERT(platform_type
== -1);
676 platform_type
= HW_NATIVE
;
678 if (!enable_platform_detection
)
682 * If Hypervisor CPUID bit is set, try to determine hypervisor
683 * vendor signature, and set platform type accordingly.
686 * http://lkml.org/lkml/2008/10/1/246
687 * http://kb.vmware.com/kb/1009458
690 (void) __cpuid_insn(&cp
);
691 if ((cp
.cp_ecx
& CPUID_INTC_ECX_HV
) != 0) {
692 cp
.cp_eax
= 0x40000000;
693 (void) __cpuid_insn(&cp
);
698 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0) {
699 platform_type
= HW_XEN_HVM
;
702 if (strcmp(hvstr
, HVSIG_VMWARE
) == 0) {
703 platform_type
= HW_VMWARE
;
706 if (strcmp(hvstr
, HVSIG_KVM
) == 0) {
707 platform_type
= HW_KVM
;
710 if (strcmp(hvstr
, HVSIG_MICROSOFT
) == 0)
711 platform_type
= HW_MICROSOFT
;
714 * Check older VMware hardware versions. VMware hypervisor is
715 * detected by performing an IN operation to VMware hypervisor
716 * port and checking that value returned in %ebx is VMware
717 * hypervisor magic value.
719 * References: http://kb.vmware.com/kb/1009458
721 vmware_port(VMWARE_HVCMD_GETVERSION
, regs
);
722 if (regs
[1] == VMWARE_HVMAGIC
) {
723 platform_type
= HW_VMWARE
;
729 * Check Xen hypervisor. In a fully virtualized domain,
730 * Xen's pseudo-cpuid function returns a string representing the
731 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
732 * supported cpuid function. We need at least a (base + 2) leaf value
733 * to do what we want to do. Try different base values, since the
734 * hypervisor might use a different one depending on whether Hyper-V
735 * emulation is switched on by default or not.
737 for (base
= 0x40000000; base
< 0x40010000; base
+= 0x100) {
739 (void) __cpuid_insn(&cp
);
744 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0 &&
745 cp
.cp_eax
>= (base
+ 2)) {
746 platform_type
&= ~HW_NATIVE
;
747 platform_type
|= HW_XEN_HVM
;
756 ASSERT(platform_type
!= -1);
757 return (platform_type
);
777 return (DOMAIN_IS_INITDOMAIN(xen_info
));
783 cpuid_intel_getids(cpu_t
*cpu
, void *feature
)
786 uint_t chipid_shift
= 0;
787 uint_t coreid_shift
= 0;
788 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
790 for (i
= 1; i
< cpi
->cpi_ncpu_per_chip
; i
<<= 1)
793 cpi
->cpi_chipid
= cpi
->cpi_apicid
>> chipid_shift
;
794 cpi
->cpi_clogid
= cpi
->cpi_apicid
& ((1 << chipid_shift
) - 1);
796 if (is_x86_feature(feature
, X86FSET_CMP
)) {
798 * Multi-core (and possibly multi-threaded)
801 uint_t ncpu_per_core
;
802 if (cpi
->cpi_ncore_per_chip
== 1)
803 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
;
804 else if (cpi
->cpi_ncore_per_chip
> 1)
805 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
/
806 cpi
->cpi_ncore_per_chip
;
808 * 8bit APIC IDs on dual core Pentiums
811 * +-----------------------+------+------+
812 * | Physical Package ID | MC | HT |
813 * +-----------------------+------+------+
814 * <------- chipid -------->
815 * <------- coreid --------------->
820 * Where the number of bits necessary to
821 * represent MC and HT fields together equals
822 * to the minimum number of bits necessary to
823 * store the value of cpi->cpi_ncpu_per_chip.
824 * Of those bits, the MC part uses the number
825 * of bits necessary to store the value of
826 * cpi->cpi_ncore_per_chip.
828 for (i
= 1; i
< ncpu_per_core
; i
<<= 1)
830 cpi
->cpi_coreid
= cpi
->cpi_apicid
>> coreid_shift
;
831 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
832 } else if (is_x86_feature(feature
, X86FSET_HTT
)) {
834 * Single-core multi-threaded processors.
836 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
837 cpi
->cpi_pkgcoreid
= 0;
839 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
840 cpi
->cpi_compunitid
= cpi
->cpi_coreid
;
844 cpuid_amd_getids(cpu_t
*cpu
)
846 int i
, first_half
, coreidsz
;
847 uint32_t nb_caps_reg
;
849 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
850 struct cpuid_regs
*cp
;
853 * AMD CMP chips currently have a single thread per core.
855 * Since no two cpus share a core we must assign a distinct coreid
856 * per cpu, and we do this by using the cpu_id. This scheme does not,
857 * however, guarantee that sibling cores of a chip will have sequential
858 * coreids starting at a multiple of the number of cores per chip -
859 * that is usually the case, but if the ACPI MADT table is presented
860 * in a different order then we need to perform a few more gymnastics
863 * All processors in the system have the same number of enabled
864 * cores. Cores within a processor are always numbered sequentially
865 * from 0 regardless of how many or which are disabled, and there
866 * is no way for operating system to discover the real core id when some
869 * In family 0x15, the cores come in pairs called compute units. They
870 * share I$ and L2 caches and the FPU. Enumeration of this feature is
871 * simplified by the new topology extensions CPUID leaf, indicated by
872 * the X86 feature X86FSET_TOPOEXT.
875 cpi
->cpi_coreid
= cpu
->cpu_id
;
876 cpi
->cpi_compunitid
= cpu
->cpu_id
;
878 if (cpi
->cpi_xmaxeax
>= 0x80000008) {
880 coreidsz
= BITX((cpi
)->cpi_extd
[8].cp_ecx
, 15, 12);
883 * In AMD parlance chip is really a node while Solaris
884 * sees chip as equivalent to socket/package.
886 cpi
->cpi_ncore_per_chip
=
887 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
889 /* Use legacy method */
890 for (i
= 1; i
< cpi
->cpi_ncore_per_chip
; i
<<= 1)
896 /* Assume single-core part */
897 cpi
->cpi_ncore_per_chip
= 1;
901 cpi
->cpi_clogid
= cpi
->cpi_pkgcoreid
=
902 cpi
->cpi_apicid
& ((1<<coreidsz
) - 1);
903 cpi
->cpi_ncpu_per_chip
= cpi
->cpi_ncore_per_chip
;
905 /* Get node ID, compute unit ID */
906 if (is_x86_feature(x86_featureset
, X86FSET_TOPOEXT
) &&
907 cpi
->cpi_xmaxeax
>= 0x8000001e) {
908 cp
= &cpi
->cpi_extd
[0x1e];
909 cp
->cp_eax
= 0x8000001e;
910 (void) __cpuid_insn(cp
);
912 cpi
->cpi_procnodes_per_pkg
= BITX(cp
->cp_ecx
, 10, 8) + 1;
913 cpi
->cpi_procnodeid
= BITX(cp
->cp_ecx
, 7, 0);
914 cpi
->cpi_cores_per_compunit
= BITX(cp
->cp_ebx
, 15, 8) + 1;
915 cpi
->cpi_compunitid
= BITX(cp
->cp_ebx
, 7, 0)
916 + (cpi
->cpi_ncore_per_chip
/ cpi
->cpi_cores_per_compunit
)
917 * (cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
);
918 } else if (cpi
->cpi_family
== 0xf || cpi
->cpi_family
>= 0x11) {
919 cpi
->cpi_procnodeid
= (cpi
->cpi_apicid
>> coreidsz
) & 7;
920 } else if (cpi
->cpi_family
== 0x10) {
922 * See if we are a multi-node processor.
923 * All processors in the system have the same number of nodes
925 nb_caps_reg
= pci_getl_func(0, 24, 3, 0xe8);
926 if ((cpi
->cpi_model
< 8) || BITX(nb_caps_reg
, 29, 29) == 0) {
928 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 5,
933 * Multi-node revision D (2 nodes per package
936 cpi
->cpi_procnodes_per_pkg
= 2;
938 first_half
= (cpi
->cpi_pkgcoreid
<=
939 (cpi
->cpi_ncore_per_chip
/2 - 1));
941 if (cpi
->cpi_apicid
== cpi
->cpi_pkgcoreid
) {
943 cpi
->cpi_procnodeid
= (first_half
? 0 : 1);
947 /* NodeId[2:1] bits to use for reading F3xe8 */
948 node2_1
= BITX(cpi
->cpi_apicid
, 5, 4) << 1;
951 pci_getl_func(0, 24 + node2_1
, 3, 0xe8);
954 * Check IntNodeNum bit (31:30, but bit 31 is
955 * always 0 on dual-node processors)
957 if (BITX(nb_caps_reg
, 30, 30) == 0)
958 cpi
->cpi_procnodeid
= node2_1
+
961 cpi
->cpi_procnodeid
= node2_1
+
966 cpi
->cpi_procnodeid
= 0;
970 cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
;
974 * Setup XFeature_Enabled_Mask register. Required by xsave feature.
979 uint64_t flags
= XFEATURE_LEGACY_FP
;
981 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
983 if (is_x86_feature(x86_featureset
, X86FSET_SSE
))
984 flags
|= XFEATURE_SSE
;
986 if (is_x86_feature(x86_featureset
, X86FSET_AVX
))
987 flags
|= XFEATURE_AVX
;
989 if (is_x86_feature(x86_featureset
, X86FSET_AVX512F
))
990 flags
|= XFEATURE_AVX512
;
992 set_xcr(XFEATURE_ENABLED_MASK
, flags
);
994 xsave_bv_all
= flags
;
998 cpuid_pass1(cpu_t
*cpu
, uchar_t
*featureset
)
1000 uint32_t mask_ecx
, mask_edx
;
1001 struct cpuid_info
*cpi
;
1002 struct cpuid_regs
*cp
;
1005 extern int idle_cpu_prefer_mwait
;
1009 * Space statically allocated for BSP, ensure pointer is set
1011 if (cpu
->cpu_id
== 0) {
1012 if (cpu
->cpu_m
.mcpu_cpi
== NULL
)
1013 cpu
->cpu_m
.mcpu_cpi
= &cpuid_info0
;
1016 add_x86_feature(featureset
, X86FSET_CPUID
);
1018 cpi
= cpu
->cpu_m
.mcpu_cpi
;
1019 ASSERT(cpi
!= NULL
);
1020 cp
= &cpi
->cpi_std
[0];
1022 cpi
->cpi_maxeax
= __cpuid_insn(cp
);
1024 uint32_t *iptr
= (uint32_t *)cpi
->cpi_vendorstr
;
1025 *iptr
++ = cp
->cp_ebx
;
1026 *iptr
++ = cp
->cp_edx
;
1027 *iptr
++ = cp
->cp_ecx
;
1028 *(char *)&cpi
->cpi_vendorstr
[12] = '\0';
1031 cpi
->cpi_vendor
= _cpuid_vendorstr_to_vendorcode(cpi
->cpi_vendorstr
);
1032 x86_vendor
= cpi
->cpi_vendor
; /* for compatibility */
1035 * Limit the range in case of weird hardware
1037 if (cpi
->cpi_maxeax
> CPI_MAXEAX_MAX
)
1038 cpi
->cpi_maxeax
= CPI_MAXEAX_MAX
;
1039 if (cpi
->cpi_maxeax
< 1)
1042 cp
= &cpi
->cpi_std
[1];
1044 (void) __cpuid_insn(cp
);
1047 * Extract identifying constants for easy access.
1049 cpi
->cpi_model
= CPI_MODEL(cpi
);
1050 cpi
->cpi_family
= CPI_FAMILY(cpi
);
1052 if (cpi
->cpi_family
== 0xf)
1053 cpi
->cpi_family
+= CPI_FAMILY_XTD(cpi
);
1056 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
1057 * Intel, and presumably everyone else, uses model == 0xf, as
1058 * one would expect (max value means possible overflow). Sigh.
1061 switch (cpi
->cpi_vendor
) {
1062 case X86_VENDOR_Intel
:
1063 if (IS_EXTENDED_MODEL_INTEL(cpi
))
1064 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1066 case X86_VENDOR_AMD
:
1067 if (CPI_FAMILY(cpi
) == 0xf)
1068 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1071 if (cpi
->cpi_model
== 0xf)
1072 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1076 cpi
->cpi_step
= CPI_STEP(cpi
);
1077 cpi
->cpi_brandid
= CPI_BRANDID(cpi
);
1080 * *default* assumptions:
1081 * - believe %edx feature word
1082 * - ignore %ecx feature word
1083 * - 32-bit virtual and physical addressing
1085 mask_edx
= 0xffffffff;
1088 cpi
->cpi_pabits
= cpi
->cpi_vabits
= 32;
1090 switch (cpi
->cpi_vendor
) {
1091 case X86_VENDOR_Intel
:
1092 if (cpi
->cpi_family
== 5)
1093 x86_type
= X86_TYPE_P5
;
1094 else if (IS_LEGACY_P6(cpi
)) {
1095 x86_type
= X86_TYPE_P6
;
1096 pentiumpro_bug4046376
= 1;
1098 * Clear the SEP bit when it was set erroneously
1100 if (cpi
->cpi_model
< 3 && cpi
->cpi_step
< 3)
1101 cp
->cp_edx
&= ~CPUID_INTC_EDX_SEP
;
1102 } else if (IS_NEW_F6(cpi
) || cpi
->cpi_family
== 0xf) {
1103 x86_type
= X86_TYPE_P4
;
1105 * We don't currently depend on any of the %ecx
1106 * features until Prescott, so we'll only check
1107 * this from P4 onwards. We might want to revisit
1110 mask_ecx
= 0xffffffff;
1111 } else if (cpi
->cpi_family
> 0xf)
1112 mask_ecx
= 0xffffffff;
1114 * We don't support MONITOR/MWAIT if leaf 5 is not available
1115 * to obtain the monitor linesize.
1117 if (cpi
->cpi_maxeax
< 5)
1118 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1120 case X86_VENDOR_IntelClone
:
1123 case X86_VENDOR_AMD
:
1124 #if defined(OPTERON_ERRATUM_108)
1125 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 0xe) {
1126 cp
->cp_eax
= (0xf0f & cp
->cp_eax
) | 0xc0;
1127 cpi
->cpi_model
= 0xc;
1130 if (cpi
->cpi_family
== 5) {
1134 * These CPUs have an incomplete implementation
1135 * of MCA/MCE which we mask away.
1137 mask_edx
&= ~(CPUID_INTC_EDX_MCE
| CPUID_INTC_EDX_MCA
);
1140 * Model 0 uses the wrong (APIC) bit
1141 * to indicate PGE. Fix it here.
1143 if (cpi
->cpi_model
== 0) {
1144 if (cp
->cp_edx
& 0x200) {
1145 cp
->cp_edx
&= ~0x200;
1146 cp
->cp_edx
|= CPUID_INTC_EDX_PGE
;
1151 * Early models had problems w/ MMX; disable.
1153 if (cpi
->cpi_model
< 6)
1154 mask_edx
&= ~CPUID_INTC_EDX_MMX
;
1158 * For newer families, SSE3 and CX16, at least, are valid;
1161 if (cpi
->cpi_family
>= 0xf)
1162 mask_ecx
= 0xffffffff;
1164 * We don't support MONITOR/MWAIT if leaf 5 is not available
1165 * to obtain the monitor linesize.
1167 if (cpi
->cpi_maxeax
< 5)
1168 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1172 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1173 * processors. AMD does not intend MWAIT to be used in the cpu
1174 * idle loop on current and future processors. 10h and future
1175 * AMD processors use more power in MWAIT than HLT.
1176 * Pre-family-10h Opterons do not have the MWAIT instruction.
1178 idle_cpu_prefer_mwait
= 0;
1184 * workaround the NT workaround in CMS 4.1
1186 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4 &&
1187 (cpi
->cpi_step
== 2 || cpi
->cpi_step
== 3))
1188 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1190 case X86_VENDOR_Centaur
:
1192 * workaround the NT workarounds again
1194 if (cpi
->cpi_family
== 6)
1195 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1197 case X86_VENDOR_Cyrix
:
1199 * We rely heavily on the probing in locore
1200 * to actually figure out what parts, if any,
1201 * of the Cyrix cpuid instruction to believe.
1204 case X86_TYPE_CYRIX_486
:
1207 case X86_TYPE_CYRIX_6x86
:
1210 case X86_TYPE_CYRIX_6x86L
:
1215 case X86_TYPE_CYRIX_6x86MX
:
1218 CPUID_INTC_EDX_MSR
|
1219 CPUID_INTC_EDX_CX8
|
1220 CPUID_INTC_EDX_PGE
|
1221 CPUID_INTC_EDX_CMOV
|
1224 case X86_TYPE_CYRIX_GXm
:
1226 CPUID_INTC_EDX_MSR
|
1227 CPUID_INTC_EDX_CX8
|
1228 CPUID_INTC_EDX_CMOV
|
1231 case X86_TYPE_CYRIX_MediaGX
:
1233 case X86_TYPE_CYRIX_MII
:
1234 case X86_TYPE_VIA_CYRIX_III
:
1237 CPUID_INTC_EDX_TSC
|
1238 CPUID_INTC_EDX_MSR
|
1239 CPUID_INTC_EDX_CX8
|
1240 CPUID_INTC_EDX_PGE
|
1241 CPUID_INTC_EDX_CMOV
|
1252 * Do not support MONITOR/MWAIT under a hypervisor
1254 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1256 * Do not support XSAVE under a hypervisor for now
1258 xsave_force_disable
= B_TRUE
;
1262 if (xsave_force_disable
) {
1263 mask_ecx
&= ~CPUID_INTC_ECX_XSAVE
;
1264 mask_ecx
&= ~CPUID_INTC_ECX_AVX
;
1265 mask_ecx
&= ~CPUID_INTC_ECX_F16C
;
1266 mask_ecx
&= ~CPUID_INTC_ECX_FMA
;
1270 * Now we've figured out the masks that determine
1271 * which bits we choose to believe, apply the masks
1272 * to the feature words, then map the kernel's view
1273 * of these feature words into its feature word.
1275 cp
->cp_edx
&= mask_edx
;
1276 cp
->cp_ecx
&= mask_ecx
;
1279 * apply any platform restrictions (we don't call this
1280 * immediately after __cpuid_insn here, because we need the
1281 * workarounds applied above first)
1283 platform_cpuid_mangle(cpi
->cpi_vendor
, 1, cp
);
1286 * In addition to ecx and edx, Intel is storing a bunch of instruction
1287 * set extensions in leaf 7's ebx, ecx, and edx.
1289 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
&& cpi
->cpi_maxeax
>= 7) {
1290 struct cpuid_regs
*ecp
;
1291 ecp
= &cpi
->cpi_std
[7];
1294 (void) __cpuid_insn(ecp
);
1296 * If XSAVE has been disabled, just ignore all of the
1297 * extended-save-area dependent flags here.
1299 if (xsave_force_disable
) {
1300 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_BMI1
;
1301 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_BMI2
;
1302 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_AVX2
;
1303 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_MPX
;
1304 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_ALL_AVX512
;
1305 ecp
->cp_ecx
&= ~CPUID_INTC_ECX_7_0_ALL_AVX512
;
1306 ecp
->cp_edx
&= ~CPUID_INTC_EDX_7_0_ALL_AVX512
;
1309 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_SMEP
)
1310 add_x86_feature(featureset
, X86FSET_SMEP
);
1312 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_INVPCID
) {
1313 add_x86_feature(featureset
, X86FSET_INVPCID
);
1317 * We check disable_smap here in addition to in startup_smap()
1318 * to ensure CPUs that aren't the boot CPU don't accidentally
1319 * include it in the feature set and thus generate a mismatched
1320 * x86 feature set across CPUs. Note that at this time we only
1321 * enable SMAP for the 64-bit kernel.
1323 #if defined(__amd64)
1324 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_SMAP
&&
1326 add_x86_feature(featureset
, X86FSET_SMAP
);
1328 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_MPX
)
1329 add_x86_feature(featureset
, X86FSET_MPX
);
1331 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_RDSEED
)
1332 add_x86_feature(featureset
, X86FSET_RDSEED
);
1334 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_ADX
)
1335 add_x86_feature(featureset
, X86FSET_ADX
);
1339 * fold in overrides from the "eeprom" mechanism
1341 cp
->cp_edx
|= cpuid_feature_edx_include
;
1342 cp
->cp_edx
&= ~cpuid_feature_edx_exclude
;
1344 cp
->cp_ecx
|= cpuid_feature_ecx_include
;
1345 cp
->cp_ecx
&= ~cpuid_feature_ecx_exclude
;
1347 if (cp
->cp_edx
& CPUID_INTC_EDX_PSE
) {
1348 add_x86_feature(featureset
, X86FSET_LARGEPAGE
);
1350 if (cp
->cp_edx
& CPUID_INTC_EDX_TSC
) {
1351 add_x86_feature(featureset
, X86FSET_TSC
);
1353 if (cp
->cp_edx
& CPUID_INTC_EDX_MSR
) {
1354 add_x86_feature(featureset
, X86FSET_MSR
);
1356 if (cp
->cp_edx
& CPUID_INTC_EDX_MTRR
) {
1357 add_x86_feature(featureset
, X86FSET_MTRR
);
1359 if (cp
->cp_edx
& CPUID_INTC_EDX_PGE
) {
1360 add_x86_feature(featureset
, X86FSET_PGE
);
1362 if (cp
->cp_edx
& CPUID_INTC_EDX_CMOV
) {
1363 add_x86_feature(featureset
, X86FSET_CMOV
);
1365 if (cp
->cp_edx
& CPUID_INTC_EDX_MMX
) {
1366 add_x86_feature(featureset
, X86FSET_MMX
);
1368 if ((cp
->cp_edx
& CPUID_INTC_EDX_MCE
) != 0 &&
1369 (cp
->cp_edx
& CPUID_INTC_EDX_MCA
) != 0) {
1370 add_x86_feature(featureset
, X86FSET_MCA
);
1372 if (cp
->cp_edx
& CPUID_INTC_EDX_PAE
) {
1373 add_x86_feature(featureset
, X86FSET_PAE
);
1375 if (cp
->cp_edx
& CPUID_INTC_EDX_CX8
) {
1376 add_x86_feature(featureset
, X86FSET_CX8
);
1378 if (cp
->cp_ecx
& CPUID_INTC_ECX_CX16
) {
1379 add_x86_feature(featureset
, X86FSET_CX16
);
1381 if (cp
->cp_edx
& CPUID_INTC_EDX_PAT
) {
1382 add_x86_feature(featureset
, X86FSET_PAT
);
1384 if (cp
->cp_edx
& CPUID_INTC_EDX_SEP
) {
1385 add_x86_feature(featureset
, X86FSET_SEP
);
1387 if (cp
->cp_edx
& CPUID_INTC_EDX_FXSR
) {
1389 * In our implementation, fxsave/fxrstor
1390 * are prerequisites before we'll even
1391 * try and do SSE things.
1393 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE
) {
1394 add_x86_feature(featureset
, X86FSET_SSE
);
1396 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE2
) {
1397 add_x86_feature(featureset
, X86FSET_SSE2
);
1399 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE3
) {
1400 add_x86_feature(featureset
, X86FSET_SSE3
);
1402 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSSE3
) {
1403 add_x86_feature(featureset
, X86FSET_SSSE3
);
1405 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_1
) {
1406 add_x86_feature(featureset
, X86FSET_SSE4_1
);
1408 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_2
) {
1409 add_x86_feature(featureset
, X86FSET_SSE4_2
);
1411 if (cp
->cp_ecx
& CPUID_INTC_ECX_AES
) {
1412 add_x86_feature(featureset
, X86FSET_AES
);
1414 if (cp
->cp_ecx
& CPUID_INTC_ECX_PCLMULQDQ
) {
1415 add_x86_feature(featureset
, X86FSET_PCLMULQDQ
);
1418 if (cpi
->cpi_std
[7].cp_ebx
& CPUID_INTC_EBX_7_0_SHA
)
1419 add_x86_feature(featureset
, X86FSET_SHA
);
1421 if (cpi
->cpi_std
[7].cp_ecx
& CPUID_INTC_ECX_7_0_UMIP
)
1422 add_x86_feature(featureset
, X86FSET_UMIP
);
1423 if (cpi
->cpi_std
[7].cp_ecx
& CPUID_INTC_ECX_7_0_PKU
)
1424 add_x86_feature(featureset
, X86FSET_PKU
);
1425 if (cpi
->cpi_std
[7].cp_ecx
& CPUID_INTC_ECX_7_0_OSPKE
)
1426 add_x86_feature(featureset
, X86FSET_OSPKE
);
1428 if (cp
->cp_ecx
& CPUID_INTC_ECX_XSAVE
) {
1429 add_x86_feature(featureset
, X86FSET_XSAVE
);
1431 /* We only test AVX & AVX512 when there is XSAVE */
1433 if (cp
->cp_ecx
& CPUID_INTC_ECX_AVX
) {
1434 add_x86_feature(featureset
,
1438 * Intel says we can't check these without also
1441 if (cp
->cp_ecx
& CPUID_INTC_ECX_F16C
)
1442 add_x86_feature(featureset
,
1445 if (cp
->cp_ecx
& CPUID_INTC_ECX_FMA
)
1446 add_x86_feature(featureset
,
1449 if (cpi
->cpi_std
[7].cp_ebx
&
1450 CPUID_INTC_EBX_7_0_BMI1
)
1451 add_x86_feature(featureset
,
1454 if (cpi
->cpi_std
[7].cp_ebx
&
1455 CPUID_INTC_EBX_7_0_BMI2
)
1456 add_x86_feature(featureset
,
1459 if (cpi
->cpi_std
[7].cp_ebx
&
1460 CPUID_INTC_EBX_7_0_AVX2
)
1461 add_x86_feature(featureset
,
1465 if (cpi
->cpi_std
[7].cp_ebx
&
1466 CPUID_INTC_EBX_7_0_AVX512F
) {
1467 add_x86_feature(featureset
, X86FSET_AVX512F
);
1469 if (cpi
->cpi_std
[7].cp_ebx
&
1470 CPUID_INTC_EBX_7_0_AVX512DQ
)
1471 add_x86_feature(featureset
,
1473 if (cpi
->cpi_std
[7].cp_ebx
&
1474 CPUID_INTC_EBX_7_0_AVX512IFMA
)
1475 add_x86_feature(featureset
,
1477 if (cpi
->cpi_std
[7].cp_ebx
&
1478 CPUID_INTC_EBX_7_0_AVX512PF
)
1479 add_x86_feature(featureset
,
1481 if (cpi
->cpi_std
[7].cp_ebx
&
1482 CPUID_INTC_EBX_7_0_AVX512ER
)
1483 add_x86_feature(featureset
,
1485 if (cpi
->cpi_std
[7].cp_ebx
&
1486 CPUID_INTC_EBX_7_0_AVX512CD
)
1487 add_x86_feature(featureset
,
1489 if (cpi
->cpi_std
[7].cp_ebx
&
1490 CPUID_INTC_EBX_7_0_AVX512BW
)
1491 add_x86_feature(featureset
,
1493 if (cpi
->cpi_std
[7].cp_ebx
&
1494 CPUID_INTC_EBX_7_0_AVX512VL
)
1495 add_x86_feature(featureset
,
1498 if (cpi
->cpi_std
[7].cp_ecx
&
1499 CPUID_INTC_ECX_7_0_AVX512VBMI
)
1500 add_x86_feature(featureset
,
1501 X86FSET_AVX512VBMI
);
1502 if (cpi
->cpi_std
[7].cp_ecx
&
1503 CPUID_INTC_ECX_7_0_AVX512VPOPCDQ
)
1504 add_x86_feature(featureset
,
1505 X86FSET_AVX512VPOPCDQ
);
1507 if (cpi
->cpi_std
[7].cp_edx
&
1508 CPUID_INTC_EDX_7_0_AVX5124NNIW
)
1509 add_x86_feature(featureset
,
1510 X86FSET_AVX512NNIW
);
1511 if (cpi
->cpi_std
[7].cp_edx
&
1512 CPUID_INTC_EDX_7_0_AVX5124FMAPS
)
1513 add_x86_feature(featureset
,
1514 X86FSET_AVX512FMAPS
);
1519 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
1520 if (cp
->cp_ecx
& CPUID_INTC_ECX_PCID
) {
1521 add_x86_feature(featureset
, X86FSET_PCID
);
1525 if (cp
->cp_ecx
& CPUID_INTC_ECX_X2APIC
) {
1526 add_x86_feature(featureset
, X86FSET_X2APIC
);
1528 if (cp
->cp_edx
& CPUID_INTC_EDX_DE
) {
1529 add_x86_feature(featureset
, X86FSET_DE
);
1532 if (cp
->cp_ecx
& CPUID_INTC_ECX_MON
) {
1535 * We require the CLFLUSH instruction for erratum workaround
1536 * to use MONITOR/MWAIT.
1538 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1539 cpi
->cpi_mwait
.support
|= MWAIT_SUPPORT
;
1540 add_x86_feature(featureset
, X86FSET_MWAIT
);
1542 extern int idle_cpu_assert_cflush_monitor
;
1545 * All processors we are aware of which have
1546 * MONITOR/MWAIT also have CLFLUSH.
1548 if (idle_cpu_assert_cflush_monitor
) {
1549 ASSERT((cp
->cp_ecx
& CPUID_INTC_ECX_MON
) &&
1550 (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
));
1556 if (cp
->cp_ecx
& CPUID_INTC_ECX_VMX
) {
1557 add_x86_feature(featureset
, X86FSET_VMX
);
1560 if (cp
->cp_ecx
& CPUID_INTC_ECX_RDRAND
)
1561 add_x86_feature(featureset
, X86FSET_RDRAND
);
1564 * Only need it first time, rest of the cpus would follow suit.
1565 * we only capture this for the bootcpu.
1567 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1568 add_x86_feature(featureset
, X86FSET_CLFSH
);
1569 x86_clflush_size
= (BITX(cp
->cp_ebx
, 15, 8) * 8);
1571 if (is_x86_feature(featureset
, X86FSET_PAE
))
1572 cpi
->cpi_pabits
= 36;
1575 * Hyperthreading configuration is slightly tricky on Intel
1576 * and pure clones, and even trickier on AMD.
1578 * (AMD chose to set the HTT bit on their CMP processors,
1579 * even though they're not actually hyperthreaded. Thus it
1580 * takes a bit more work to figure out what's really going
1581 * on ... see the handling of the CMP_LGCY bit below)
1583 if (cp
->cp_edx
& CPUID_INTC_EDX_HTT
) {
1584 cpi
->cpi_ncpu_per_chip
= CPI_CPU_COUNT(cpi
);
1585 if (cpi
->cpi_ncpu_per_chip
> 1)
1586 add_x86_feature(featureset
, X86FSET_HTT
);
1588 cpi
->cpi_ncpu_per_chip
= 1;
1591 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
&& cpi
->cpi_maxeax
>= 0xD &&
1592 !xsave_force_disable
) {
1593 struct cpuid_regs r
, *ecp
;
1598 ecp
->cp_edx
= ecp
->cp_ebx
= 0;
1599 (void) __cpuid_insn(ecp
);
1601 if (ecp
->cp_eax
& CPUID_INTC_EAX_D_1_XSAVEOPT
)
1602 add_x86_feature(featureset
, X86FSET_XSAVEOPT
);
1603 if (ecp
->cp_eax
& CPUID_INTC_EAX_D_1_XSAVEC
)
1604 add_x86_feature(featureset
, X86FSET_XSAVEC
);
1605 if (ecp
->cp_eax
& CPUID_INTC_EAX_D_1_XSAVES
)
1606 add_x86_feature(featureset
, X86FSET_XSAVES
);
1610 * Work on the "extended" feature information, doing
1611 * some basic initialization for cpuid_pass2()
1614 switch (cpi
->cpi_vendor
) {
1615 case X86_VENDOR_Intel
:
1617 * On KVM we know we will have proper support for extended
1620 if (IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf ||
1621 (get_hwenv() == HW_KVM
&& cpi
->cpi_family
== 6 &&
1622 (cpi
->cpi_model
== 6 || cpi
->cpi_model
== 2)))
1625 case X86_VENDOR_AMD
:
1626 if (cpi
->cpi_family
> 5 ||
1627 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
1630 case X86_VENDOR_Cyrix
:
1632 * Only these Cyrix CPUs are -known- to support
1633 * extended cpuid operations.
1635 if (x86_type
== X86_TYPE_VIA_CYRIX_III
||
1636 x86_type
== X86_TYPE_CYRIX_GXm
)
1639 case X86_VENDOR_Centaur
:
1647 cp
= &cpi
->cpi_extd
[0];
1648 cp
->cp_eax
= 0x80000000;
1649 cpi
->cpi_xmaxeax
= __cpuid_insn(cp
);
1652 if (cpi
->cpi_xmaxeax
& 0x80000000) {
1654 if (cpi
->cpi_xmaxeax
> CPI_XMAXEAX_MAX
)
1655 cpi
->cpi_xmaxeax
= CPI_XMAXEAX_MAX
;
1657 switch (cpi
->cpi_vendor
) {
1658 case X86_VENDOR_Intel
:
1659 case X86_VENDOR_AMD
:
1660 if (cpi
->cpi_xmaxeax
< 0x80000001)
1662 cp
= &cpi
->cpi_extd
[1];
1663 cp
->cp_eax
= 0x80000001;
1664 (void) __cpuid_insn(cp
);
1666 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1667 cpi
->cpi_family
== 5 &&
1668 cpi
->cpi_model
== 6 &&
1669 cpi
->cpi_step
== 6) {
1671 * K6 model 6 uses bit 10 to indicate SYSC
1672 * Later models use bit 11. Fix it here.
1674 if (cp
->cp_edx
& 0x400) {
1675 cp
->cp_edx
&= ~0x400;
1676 cp
->cp_edx
|= CPUID_AMD_EDX_SYSC
;
1680 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000001, cp
);
1683 * Compute the additions to the kernel's feature word.
1685 if (cp
->cp_edx
& CPUID_AMD_EDX_NX
) {
1686 add_x86_feature(featureset
, X86FSET_NX
);
1690 * Regardless whether or not we boot 64-bit,
1691 * we should have a way to identify whether
1692 * the CPU is capable of running 64-bit.
1694 if (cp
->cp_edx
& CPUID_AMD_EDX_LM
) {
1695 add_x86_feature(featureset
, X86FSET_64
);
1698 #if defined(__amd64)
1699 /* 1 GB large page - enable only for 64 bit kernel */
1700 if (cp
->cp_edx
& CPUID_AMD_EDX_1GPG
) {
1701 add_x86_feature(featureset
, X86FSET_1GPG
);
1705 if ((cpi
->cpi_vendor
== X86_VENDOR_AMD
) &&
1706 (cpi
->cpi_std
[1].cp_edx
& CPUID_INTC_EDX_FXSR
) &&
1707 (cp
->cp_ecx
& CPUID_AMD_ECX_SSE4A
)) {
1708 add_x86_feature(featureset
, X86FSET_SSE4A
);
1712 * If both the HTT and CMP_LGCY bits are set,
1713 * then we're not actually HyperThreaded. Read
1714 * "AMD CPUID Specification" for more details.
1716 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1717 is_x86_feature(featureset
, X86FSET_HTT
) &&
1718 (cp
->cp_ecx
& CPUID_AMD_ECX_CMP_LGCY
)) {
1719 remove_x86_feature(featureset
, X86FSET_HTT
);
1720 add_x86_feature(featureset
, X86FSET_CMP
);
1722 #if defined(__amd64)
1724 * It's really tricky to support syscall/sysret in
1725 * the i386 kernel; we rely on sysenter/sysexit
1726 * instead. In the amd64 kernel, things are -way-
1729 if (cp
->cp_edx
& CPUID_AMD_EDX_SYSC
) {
1730 add_x86_feature(featureset
, X86FSET_ASYSC
);
1734 * While we're thinking about system calls, note
1735 * that AMD processors don't support sysenter
1736 * in long mode at all, so don't try to program them.
1738 if (x86_vendor
== X86_VENDOR_AMD
) {
1739 remove_x86_feature(featureset
, X86FSET_SEP
);
1742 if (cp
->cp_edx
& CPUID_AMD_EDX_TSCP
) {
1743 add_x86_feature(featureset
, X86FSET_TSCP
);
1746 if (cp
->cp_ecx
& CPUID_AMD_ECX_SVM
) {
1747 add_x86_feature(featureset
, X86FSET_SVM
);
1750 if (cp
->cp_ecx
& CPUID_AMD_ECX_TOPOEXT
) {
1751 add_x86_feature(featureset
, X86FSET_TOPOEXT
);
1759 * Get CPUID data about processor cores and hyperthreads.
1761 switch (cpi
->cpi_vendor
) {
1762 case X86_VENDOR_Intel
:
1763 if (cpi
->cpi_maxeax
>= 4) {
1764 cp
= &cpi
->cpi_std
[4];
1767 (void) __cpuid_insn(cp
);
1768 platform_cpuid_mangle(cpi
->cpi_vendor
, 4, cp
);
1771 case X86_VENDOR_AMD
:
1772 if (cpi
->cpi_xmaxeax
< 0x80000008)
1774 cp
= &cpi
->cpi_extd
[8];
1775 cp
->cp_eax
= 0x80000008;
1776 (void) __cpuid_insn(cp
);
1777 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000008, cp
);
1780 * Virtual and physical address limits from
1781 * cpuid override previously guessed values.
1783 cpi
->cpi_pabits
= BITX(cp
->cp_eax
, 7, 0);
1784 cpi
->cpi_vabits
= BITX(cp
->cp_eax
, 15, 8);
1791 * Derive the number of cores per chip
1793 switch (cpi
->cpi_vendor
) {
1794 case X86_VENDOR_Intel
:
1795 if (cpi
->cpi_maxeax
< 4) {
1796 cpi
->cpi_ncore_per_chip
= 1;
1799 cpi
->cpi_ncore_per_chip
=
1800 BITX((cpi
)->cpi_std
[4].cp_eax
, 31, 26) + 1;
1803 case X86_VENDOR_AMD
:
1804 if (cpi
->cpi_xmaxeax
< 0x80000008) {
1805 cpi
->cpi_ncore_per_chip
= 1;
1809 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1810 * 1 less than the number of physical cores on
1811 * the chip. In family 0x10 this value can
1812 * be affected by "downcoring" - it reflects
1813 * 1 less than the number of cores actually
1814 * enabled on this node.
1816 cpi
->cpi_ncore_per_chip
=
1817 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
1821 cpi
->cpi_ncore_per_chip
= 1;
1826 * Get CPUID data about TSC Invariance in Deep C-State.
1828 switch (cpi
->cpi_vendor
) {
1829 case X86_VENDOR_Intel
:
1830 if (cpi
->cpi_maxeax
>= 7) {
1831 cp
= &cpi
->cpi_extd
[7];
1832 cp
->cp_eax
= 0x80000007;
1834 (void) __cpuid_insn(cp
);
1841 cpi
->cpi_ncore_per_chip
= 1;
1845 * If more than one core, then this processor is CMP.
1847 if (cpi
->cpi_ncore_per_chip
> 1) {
1848 add_x86_feature(featureset
, X86FSET_CMP
);
1852 * If the number of cores is the same as the number
1853 * of CPUs, then we cannot have HyperThreading.
1855 if (cpi
->cpi_ncpu_per_chip
== cpi
->cpi_ncore_per_chip
) {
1856 remove_x86_feature(featureset
, X86FSET_HTT
);
1859 cpi
->cpi_apicid
= CPI_APIC_ID(cpi
);
1860 cpi
->cpi_procnodes_per_pkg
= 1;
1861 cpi
->cpi_cores_per_compunit
= 1;
1862 if (is_x86_feature(featureset
, X86FSET_HTT
) == B_FALSE
&&
1863 is_x86_feature(featureset
, X86FSET_CMP
) == B_FALSE
) {
1865 * Single-core single-threaded processors.
1867 cpi
->cpi_chipid
= -1;
1868 cpi
->cpi_clogid
= 0;
1869 cpi
->cpi_coreid
= cpu
->cpu_id
;
1870 cpi
->cpi_pkgcoreid
= 0;
1871 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1872 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 3, 0);
1874 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1875 } else if (cpi
->cpi_ncpu_per_chip
> 1) {
1876 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
)
1877 cpuid_intel_getids(cpu
, featureset
);
1878 else if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1879 cpuid_amd_getids(cpu
);
1882 * All other processors are currently
1883 * assumed to have single cores.
1885 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
1886 cpi
->cpi_pkgcoreid
= 0;
1887 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1888 cpi
->cpi_compunitid
= cpi
->cpi_chipid
;
1893 * Synthesize chip "revision" and socket type
1895 cpi
->cpi_chiprev
= _cpuid_chiprev(cpi
->cpi_vendor
, cpi
->cpi_family
,
1896 cpi
->cpi_model
, cpi
->cpi_step
);
1897 cpi
->cpi_chiprevstr
= _cpuid_chiprevstr(cpi
->cpi_vendor
,
1898 cpi
->cpi_family
, cpi
->cpi_model
, cpi
->cpi_step
);
1899 cpi
->cpi_socket
= _cpuid_skt(cpi
->cpi_vendor
, cpi
->cpi_family
,
1900 cpi
->cpi_model
, cpi
->cpi_step
);
1903 * While we're here, check for the AMD "Error Pointer Zero/Restore"
1904 * feature. This can be used to setup the FP save handlers
1907 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
) {
1908 if (cpi
->cpi_xmaxeax
>= 0x80000008 &&
1909 cpi
->cpi_extd
[8].cp_ebx
& CPUID_AMD_EBX_ERR_PTR_ZERO
) {
1910 /* Special handling for AMD FP not necessary. */
1911 cpi
->cpi_fp_amd_save
= 0;
1913 cpi
->cpi_fp_amd_save
= 1;
1922 * Make copies of the cpuid table entries we depend on, in
1923 * part for ease of parsing now, in part so that we have only
1924 * one place to correct any of it, in part for ease of
1925 * later export to userland, and in part so we can look at
1926 * this stuff in a crash dump.
1931 cpuid_pass2(cpu_t
*cpu
)
1935 struct cpuid_regs
*cp
;
1938 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
1940 ASSERT(cpi
->cpi_pass
== 1);
1942 if (cpi
->cpi_maxeax
< 1)
1945 if ((nmax
= cpi
->cpi_maxeax
+ 1) > NMAX_CPI_STD
)
1946 nmax
= NMAX_CPI_STD
;
1948 * (We already handled n == 0 and n == 1 in pass 1)
1950 for (n
= 2, cp
= &cpi
->cpi_std
[2]; n
< nmax
; n
++, cp
++) {
1954 * CPUID function 4 expects %ecx to be initialized
1955 * with an index which indicates which cache to return
1956 * information about. The OS is expected to call function 4
1957 * with %ecx set to 0, 1, 2, ... until it returns with
1958 * EAX[4:0] set to 0, which indicates there are no more
1961 * Here, populate cpi_std[4] with the information returned by
1962 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1963 * when dynamic memory allocation becomes available.
1965 * Note: we need to explicitly initialize %ecx here, since
1966 * function 4 may have been previously invoked.
1968 * The same is all true for CPUID function 7.
1970 if (n
== 4 || n
== 7)
1973 (void) __cpuid_insn(cp
);
1974 platform_cpuid_mangle(cpi
->cpi_vendor
, n
, cp
);
1978 * "the lower 8 bits of the %eax register
1979 * contain a value that identifies the number
1980 * of times the cpuid [instruction] has to be
1981 * executed to obtain a complete image of the
1982 * processor's caching systems."
1984 * How *do* they make this stuff up?
1986 cpi
->cpi_ncache
= sizeof (*cp
) *
1987 BITX(cp
->cp_eax
, 7, 0);
1988 if (cpi
->cpi_ncache
== 0)
1990 cpi
->cpi_ncache
--; /* skip count byte */
1993 * Well, for now, rather than attempt to implement
1994 * this slightly dubious algorithm, we just look
1995 * at the first 15 ..
1997 if (cpi
->cpi_ncache
> (sizeof (*cp
) - 1))
1998 cpi
->cpi_ncache
= sizeof (*cp
) - 1;
2000 dp
= cpi
->cpi_cacheinfo
;
2001 if (BITX(cp
->cp_eax
, 31, 31) == 0) {
2002 uint8_t *p
= (void *)&cp
->cp_eax
;
2003 for (i
= 1; i
< 4; i
++)
2007 if (BITX(cp
->cp_ebx
, 31, 31) == 0) {
2008 uint8_t *p
= (void *)&cp
->cp_ebx
;
2009 for (i
= 0; i
< 4; i
++)
2013 if (BITX(cp
->cp_ecx
, 31, 31) == 0) {
2014 uint8_t *p
= (void *)&cp
->cp_ecx
;
2015 for (i
= 0; i
< 4; i
++)
2019 if (BITX(cp
->cp_edx
, 31, 31) == 0) {
2020 uint8_t *p
= (void *)&cp
->cp_edx
;
2021 for (i
= 0; i
< 4; i
++)
2027 case 3: /* Processor serial number, if PSN supported */
2030 case 4: /* Deterministic cache parameters */
2033 case 5: /* Monitor/Mwait parameters */
2038 * check cpi_mwait.support which was set in cpuid_pass1
2040 if (!(cpi
->cpi_mwait
.support
& MWAIT_SUPPORT
))
2044 * Protect ourself from insane mwait line size.
2045 * Workaround for incomplete hardware emulator(s).
2047 mwait_size
= (size_t)MWAIT_SIZE_MAX(cpi
);
2048 if (mwait_size
< sizeof (uint32_t) ||
2049 !ISP2(mwait_size
)) {
2051 cmn_err(CE_NOTE
, "Cannot handle cpu %d mwait "
2052 "size %ld", cpu
->cpu_id
, (long)mwait_size
);
2057 cpi
->cpi_mwait
.mon_min
= (size_t)MWAIT_SIZE_MIN(cpi
);
2058 cpi
->cpi_mwait
.mon_max
= mwait_size
;
2059 if (MWAIT_EXTENSION(cpi
)) {
2060 cpi
->cpi_mwait
.support
|= MWAIT_EXTENSIONS
;
2061 if (MWAIT_INT_ENABLE(cpi
))
2062 cpi
->cpi_mwait
.support
|=
2063 MWAIT_ECX_INT_ENABLE
;
2072 if (cpi
->cpi_maxeax
>= 0xB && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
2073 struct cpuid_regs regs
;
2077 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
2079 (void) __cpuid_insn(cp
);
2082 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
2083 * indicates that the extended topology enumeration leaf is
2088 uint_t coreid_shift
= 0;
2089 uint_t ncpu_per_core
= 1;
2090 uint_t chipid_shift
= 0;
2091 uint_t ncpu_per_chip
= 1;
2095 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
2099 (void) __cpuid_insn(cp
);
2100 level
= CPI_CPU_LEVEL_TYPE(cp
);
2103 x2apic_id
= cp
->cp_edx
;
2104 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
2105 ncpu_per_core
= BITX(cp
->cp_ebx
, 15, 0);
2106 } else if (level
== 2) {
2107 x2apic_id
= cp
->cp_edx
;
2108 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
2109 ncpu_per_chip
= BITX(cp
->cp_ebx
, 15, 0);
2113 cpi
->cpi_apicid
= x2apic_id
;
2114 cpi
->cpi_ncpu_per_chip
= ncpu_per_chip
;
2115 cpi
->cpi_ncore_per_chip
= ncpu_per_chip
/
2117 cpi
->cpi_chipid
= x2apic_id
>> chipid_shift
;
2118 cpi
->cpi_clogid
= x2apic_id
& ((1 << chipid_shift
) - 1);
2119 cpi
->cpi_coreid
= x2apic_id
>> coreid_shift
;
2120 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
2123 /* Make cp NULL so that we don't stumble on others */
2130 if (cpi
->cpi_maxeax
>= 0xD) {
2131 struct cpuid_regs regs
;
2132 boolean_t cpuid_d_valid
= B_TRUE
;
2136 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
2138 (void) __cpuid_insn(cp
);
2141 * Sanity checks for debug
2143 if ((cp
->cp_eax
& XFEATURE_LEGACY_FP
) == 0 ||
2144 (cp
->cp_eax
& XFEATURE_SSE
) == 0) {
2145 cpuid_d_valid
= B_FALSE
;
2148 cpi
->cpi_xsave
.xsav_hw_features_low
= cp
->cp_eax
;
2149 cpi
->cpi_xsave
.xsav_hw_features_high
= cp
->cp_edx
;
2150 cpi
->cpi_xsave
.xsav_max_size
= cp
->cp_ecx
;
2153 * If the hw supports AVX, get the size and offset in the save
2154 * area for the ymm state.
2156 if (cpi
->cpi_xsave
.xsav_hw_features_low
& XFEATURE_AVX
) {
2159 cp
->cp_edx
= cp
->cp_ebx
= 0;
2161 (void) __cpuid_insn(cp
);
2163 if (cp
->cp_ebx
!= CPUID_LEAFD_2_YMM_OFFSET
||
2164 cp
->cp_eax
!= CPUID_LEAFD_2_YMM_SIZE
) {
2165 cpuid_d_valid
= B_FALSE
;
2168 cpi
->cpi_xsave
.ymm_size
= cp
->cp_eax
;
2169 cpi
->cpi_xsave
.ymm_offset
= cp
->cp_ebx
;
2173 * If the hw supports MPX, get the size and offset in the
2174 * save area for BNDREGS and BNDCSR.
2176 if (cpi
->cpi_xsave
.xsav_hw_features_low
& XFEATURE_MPX
) {
2179 cp
->cp_edx
= cp
->cp_ebx
= 0;
2181 (void) __cpuid_insn(cp
);
2183 cpi
->cpi_xsave
.bndregs_size
= cp
->cp_eax
;
2184 cpi
->cpi_xsave
.bndregs_offset
= cp
->cp_ebx
;
2188 cp
->cp_edx
= cp
->cp_ebx
= 0;
2190 (void) __cpuid_insn(cp
);
2192 cpi
->cpi_xsave
.bndcsr_size
= cp
->cp_eax
;
2193 cpi
->cpi_xsave
.bndcsr_offset
= cp
->cp_ebx
;
2197 * If the hw supports AVX512, get the size and offset in the
2198 * save area for the opmask registers and zmm state.
2200 if (cpi
->cpi_xsave
.xsav_hw_features_low
& XFEATURE_AVX512
) {
2203 cp
->cp_edx
= cp
->cp_ebx
= 0;
2205 (void) __cpuid_insn(cp
);
2207 cpi
->cpi_xsave
.opmask_size
= cp
->cp_eax
;
2208 cpi
->cpi_xsave
.opmask_offset
= cp
->cp_ebx
;
2212 cp
->cp_edx
= cp
->cp_ebx
= 0;
2214 (void) __cpuid_insn(cp
);
2216 cpi
->cpi_xsave
.zmmlo_size
= cp
->cp_eax
;
2217 cpi
->cpi_xsave
.zmmlo_offset
= cp
->cp_ebx
;
2221 cp
->cp_edx
= cp
->cp_ebx
= 0;
2223 (void) __cpuid_insn(cp
);
2225 cpi
->cpi_xsave
.zmmhi_size
= cp
->cp_eax
;
2226 cpi
->cpi_xsave
.zmmhi_offset
= cp
->cp_ebx
;
2229 if (is_x86_feature(x86_featureset
, X86FSET_XSAVE
)) {
2230 xsave_state_size
= 0;
2231 } else if (cpuid_d_valid
) {
2232 xsave_state_size
= cpi
->cpi_xsave
.xsav_max_size
;
2234 /* Broken CPUID 0xD, probably in HVM */
2235 cmn_err(CE_WARN
, "cpu%d: CPUID.0xD returns invalid "
2236 "value: hw_low = %d, hw_high = %d, xsave_size = %d"
2237 ", ymm_size = %d, ymm_offset = %d\n",
2238 cpu
->cpu_id
, cpi
->cpi_xsave
.xsav_hw_features_low
,
2239 cpi
->cpi_xsave
.xsav_hw_features_high
,
2240 (int)cpi
->cpi_xsave
.xsav_max_size
,
2241 (int)cpi
->cpi_xsave
.ymm_size
,
2242 (int)cpi
->cpi_xsave
.ymm_offset
);
2244 if (xsave_state_size
!= 0) {
2246 * This must be a non-boot CPU. We cannot
2247 * continue, because boot cpu has already
2250 ASSERT(cpu
->cpu_id
!= 0);
2251 cmn_err(CE_PANIC
, "cpu%d: we have already "
2252 "enabled XSAVE on boot cpu, cannot "
2253 "continue.", cpu
->cpu_id
);
2256 * If we reached here on the boot CPU, it's also
2257 * almost certain that we'll reach here on the
2258 * non-boot CPUs. When we're here on a boot CPU
2259 * we should disable the feature, on a non-boot
2260 * CPU we need to confirm that we have.
2262 if (cpu
->cpu_id
== 0) {
2263 remove_x86_feature(x86_featureset
,
2265 remove_x86_feature(x86_featureset
,
2267 remove_x86_feature(x86_featureset
,
2269 remove_x86_feature(x86_featureset
,
2271 remove_x86_feature(x86_featureset
,
2273 remove_x86_feature(x86_featureset
,
2275 remove_x86_feature(x86_featureset
,
2277 remove_x86_feature(x86_featureset
,
2279 remove_x86_feature(x86_featureset
,
2281 remove_x86_feature(x86_featureset
,
2283 remove_x86_feature(x86_featureset
,
2285 remove_x86_feature(x86_featureset
,
2287 remove_x86_feature(x86_featureset
,
2289 remove_x86_feature(x86_featureset
,
2291 remove_x86_feature(x86_featureset
,
2293 remove_x86_feature(x86_featureset
,
2295 remove_x86_feature(x86_featureset
,
2296 X86FSET_AVX512VBMI
);
2297 remove_x86_feature(x86_featureset
,
2298 X86FSET_AVX512VPOPCDQ
);
2299 remove_x86_feature(x86_featureset
,
2300 X86FSET_AVX512NNIW
);
2301 remove_x86_feature(x86_featureset
,
2302 X86FSET_AVX512FMAPS
);
2304 CPI_FEATURES_ECX(cpi
) &=
2305 ~CPUID_INTC_ECX_XSAVE
;
2306 CPI_FEATURES_ECX(cpi
) &=
2307 ~CPUID_INTC_ECX_AVX
;
2308 CPI_FEATURES_ECX(cpi
) &=
2309 ~CPUID_INTC_ECX_F16C
;
2310 CPI_FEATURES_ECX(cpi
) &=
2311 ~CPUID_INTC_ECX_FMA
;
2312 CPI_FEATURES_7_0_EBX(cpi
) &=
2313 ~CPUID_INTC_EBX_7_0_BMI1
;
2314 CPI_FEATURES_7_0_EBX(cpi
) &=
2315 ~CPUID_INTC_EBX_7_0_BMI2
;
2316 CPI_FEATURES_7_0_EBX(cpi
) &=
2317 ~CPUID_INTC_EBX_7_0_AVX2
;
2318 CPI_FEATURES_7_0_EBX(cpi
) &=
2319 ~CPUID_INTC_EBX_7_0_MPX
;
2320 CPI_FEATURES_7_0_EBX(cpi
) &=
2321 ~CPUID_INTC_EBX_7_0_ALL_AVX512
;
2323 CPI_FEATURES_7_0_ECX(cpi
) &=
2324 ~CPUID_INTC_ECX_7_0_ALL_AVX512
;
2326 CPI_FEATURES_7_0_EDX(cpi
) &=
2327 ~CPUID_INTC_EDX_7_0_ALL_AVX512
;
2329 xsave_force_disable
= B_TRUE
;
2331 VERIFY(is_x86_feature(x86_featureset
,
2332 X86FSET_XSAVE
) == B_FALSE
);
2339 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0)
2342 if ((nmax
= cpi
->cpi_xmaxeax
- 0x80000000 + 1) > NMAX_CPI_EXTD
)
2343 nmax
= NMAX_CPI_EXTD
;
2345 * Copy the extended properties, fixing them as we go.
2346 * (We already handled n == 0 and n == 1 in pass 1)
2348 iptr
= (void *)cpi
->cpi_brandstr
;
2349 for (n
= 2, cp
= &cpi
->cpi_extd
[2]; n
< nmax
; cp
++, n
++) {
2350 cp
->cp_eax
= 0x80000000 + n
;
2351 (void) __cpuid_insn(cp
);
2352 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000000 + n
, cp
);
2358 * Extract the brand string
2360 *iptr
++ = cp
->cp_eax
;
2361 *iptr
++ = cp
->cp_ebx
;
2362 *iptr
++ = cp
->cp_ecx
;
2363 *iptr
++ = cp
->cp_edx
;
2366 switch (cpi
->cpi_vendor
) {
2367 case X86_VENDOR_AMD
:
2369 * The Athlon and Duron were the first
2370 * parts to report the sizes of the
2371 * TLB for large pages. Before then,
2372 * we don't trust the data.
2374 if (cpi
->cpi_family
< 6 ||
2375 (cpi
->cpi_family
== 6 &&
2376 cpi
->cpi_model
< 1))
2384 switch (cpi
->cpi_vendor
) {
2385 case X86_VENDOR_AMD
:
2387 * The Athlon and Duron were the first
2388 * AMD parts with L2 TLB's.
2389 * Before then, don't trust the data.
2391 if (cpi
->cpi_family
< 6 ||
2392 cpi
->cpi_family
== 6 &&
2394 cp
->cp_eax
= cp
->cp_ebx
= 0;
2396 * AMD Duron rev A0 reports L2
2397 * cache size incorrectly as 1K
2398 * when it is really 64K
2400 if (cpi
->cpi_family
== 6 &&
2401 cpi
->cpi_model
== 3 &&
2402 cpi
->cpi_step
== 0) {
2403 cp
->cp_ecx
&= 0xffff;
2404 cp
->cp_ecx
|= 0x400000;
2407 case X86_VENDOR_Cyrix
: /* VIA C3 */
2409 * VIA C3 processors are a bit messed
2410 * up w.r.t. encoding cache sizes in %ecx
2412 if (cpi
->cpi_family
!= 6)
2415 * model 7 and 8 were incorrectly encoded
2417 * xxx is model 8 really broken?
2419 if (cpi
->cpi_model
== 7 ||
2420 cpi
->cpi_model
== 8)
2422 BITX(cp
->cp_ecx
, 31, 24) << 16 |
2423 BITX(cp
->cp_ecx
, 23, 16) << 12 |
2424 BITX(cp
->cp_ecx
, 15, 8) << 8 |
2425 BITX(cp
->cp_ecx
, 7, 0);
2427 * model 9 stepping 1 has wrong associativity
2429 if (cpi
->cpi_model
== 9 && cpi
->cpi_step
== 1)
2430 cp
->cp_ecx
|= 8 << 12;
2432 case X86_VENDOR_Intel
:
2434 * Extended L2 Cache features function.
2435 * First appeared on Prescott.
2451 intel_cpubrand(const struct cpuid_info
*cpi
)
2455 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2456 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2459 switch (cpi
->cpi_family
) {
2461 return ("Intel Pentium(r)");
2463 switch (cpi
->cpi_model
) {
2464 uint_t celeron
, xeon
;
2465 const struct cpuid_regs
*cp
;
2469 return ("Intel Pentium(r) Pro");
2472 return ("Intel Pentium(r) II");
2474 return ("Intel Celeron(r)");
2478 cp
= &cpi
->cpi_std
[2]; /* cache info */
2480 for (i
= 1; i
< 4; i
++) {
2483 tmp
= (cp
->cp_eax
>> (8 * i
)) & 0xff;
2486 if (tmp
>= 0x44 && tmp
<= 0x45)
2490 for (i
= 0; i
< 2; i
++) {
2493 tmp
= (cp
->cp_ebx
>> (8 * i
)) & 0xff;
2496 else if (tmp
>= 0x44 && tmp
<= 0x45)
2500 for (i
= 0; i
< 4; i
++) {
2503 tmp
= (cp
->cp_ecx
>> (8 * i
)) & 0xff;
2506 else if (tmp
>= 0x44 && tmp
<= 0x45)
2510 for (i
= 0; i
< 4; i
++) {
2513 tmp
= (cp
->cp_edx
>> (8 * i
)) & 0xff;
2516 else if (tmp
>= 0x44 && tmp
<= 0x45)
2521 return ("Intel Celeron(r)");
2523 return (cpi
->cpi_model
== 5 ?
2524 "Intel Pentium(r) II Xeon(tm)" :
2525 "Intel Pentium(r) III Xeon(tm)");
2526 return (cpi
->cpi_model
== 5 ?
2527 "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2528 "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2536 /* BrandID is present if the field is nonzero */
2537 if (cpi
->cpi_brandid
!= 0) {
2538 static const struct {
2542 { 0x1, "Intel(r) Celeron(r)" },
2543 { 0x2, "Intel(r) Pentium(r) III" },
2544 { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" },
2545 { 0x4, "Intel(r) Pentium(r) III" },
2546 { 0x6, "Mobile Intel(r) Pentium(r) III" },
2547 { 0x7, "Mobile Intel(r) Celeron(r)" },
2548 { 0x8, "Intel(r) Pentium(r) 4" },
2549 { 0x9, "Intel(r) Pentium(r) 4" },
2550 { 0xa, "Intel(r) Celeron(r)" },
2551 { 0xb, "Intel(r) Xeon(tm)" },
2552 { 0xc, "Intel(r) Xeon(tm) MP" },
2553 { 0xe, "Mobile Intel(r) Pentium(r) 4" },
2554 { 0xf, "Mobile Intel(r) Celeron(r)" },
2555 { 0x11, "Mobile Genuine Intel(r)" },
2556 { 0x12, "Intel(r) Celeron(r) M" },
2557 { 0x13, "Mobile Intel(r) Celeron(r)" },
2558 { 0x14, "Intel(r) Celeron(r)" },
2559 { 0x15, "Mobile Genuine Intel(r)" },
2560 { 0x16, "Intel(r) Pentium(r) M" },
2561 { 0x17, "Mobile Intel(r) Celeron(r)" }
2563 uint_t btblmax
= sizeof (brand_tbl
) / sizeof (brand_tbl
[0]);
2566 sgn
= (cpi
->cpi_family
<< 8) |
2567 (cpi
->cpi_model
<< 4) | cpi
->cpi_step
;
2569 for (i
= 0; i
< btblmax
; i
++)
2570 if (brand_tbl
[i
].bt_bid
== cpi
->cpi_brandid
)
2573 if (sgn
== 0x6b1 && cpi
->cpi_brandid
== 3)
2574 return ("Intel(r) Celeron(r)");
2575 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xb)
2576 return ("Intel(r) Xeon(tm) MP");
2577 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xe)
2578 return ("Intel(r) Xeon(tm)");
2579 return (brand_tbl
[i
].bt_str
);
2587 amd_cpubrand(const struct cpuid_info
*cpi
)
2589 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2590 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2591 return ("i486 compatible");
2593 switch (cpi
->cpi_family
) {
2595 switch (cpi
->cpi_model
) {
2602 return ("AMD-K5(r)");
2605 return ("AMD-K6(r)");
2607 return ("AMD-K6(r)-2");
2609 return ("AMD-K6(r)-III");
2611 return ("AMD (family 5)");
2614 switch (cpi
->cpi_model
) {
2616 return ("AMD-K7(tm)");
2620 return ("AMD Athlon(tm)");
2623 return ("AMD Duron(tm)");
2628 * Use the L2 cache size to distinguish
2630 return ((cpi
->cpi_extd
[6].cp_ecx
>> 16) >= 256 ?
2631 "AMD Athlon(tm)" : "AMD Duron(tm)");
2633 return ("AMD (family 6)");
2639 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 5 &&
2640 cpi
->cpi_brandid
!= 0) {
2641 switch (BITX(cpi
->cpi_brandid
, 7, 5)) {
2643 return ("AMD Opteron(tm) UP 1xx");
2645 return ("AMD Opteron(tm) DP 2xx");
2647 return ("AMD Opteron(tm) MP 8xx");
2649 return ("AMD Opteron(tm)");
2657 cyrix_cpubrand(struct cpuid_info
*cpi
, uint_t type
)
2659 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2660 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5 ||
2661 type
== X86_TYPE_CYRIX_486
)
2662 return ("i486 compatible");
2665 case X86_TYPE_CYRIX_6x86
:
2666 return ("Cyrix 6x86");
2667 case X86_TYPE_CYRIX_6x86L
:
2668 return ("Cyrix 6x86L");
2669 case X86_TYPE_CYRIX_6x86MX
:
2670 return ("Cyrix 6x86MX");
2671 case X86_TYPE_CYRIX_GXm
:
2672 return ("Cyrix GXm");
2673 case X86_TYPE_CYRIX_MediaGX
:
2674 return ("Cyrix MediaGX");
2675 case X86_TYPE_CYRIX_MII
:
2676 return ("Cyrix M2");
2677 case X86_TYPE_VIA_CYRIX_III
:
2678 return ("VIA Cyrix M3");
2681 * Have another wild guess ..
2683 if (cpi
->cpi_family
== 4 && cpi
->cpi_model
== 9)
2684 return ("Cyrix 5x86");
2685 else if (cpi
->cpi_family
== 5) {
2686 switch (cpi
->cpi_model
) {
2688 return ("Cyrix 6x86"); /* Cyrix M1 */
2690 return ("Cyrix MediaGX");
2694 } else if (cpi
->cpi_family
== 6) {
2695 switch (cpi
->cpi_model
) {
2697 return ("Cyrix 6x86MX"); /* Cyrix M2? */
2714 * This only gets called in the case that the CPU extended
2715 * feature brand string (0x80000002, 0x80000003, 0x80000004)
2716 * aren't available, or contain null bytes for some reason.
2719 fabricate_brandstr(struct cpuid_info
*cpi
)
2721 const char *brand
= NULL
;
2723 switch (cpi
->cpi_vendor
) {
2724 case X86_VENDOR_Intel
:
2725 brand
= intel_cpubrand(cpi
);
2727 case X86_VENDOR_AMD
:
2728 brand
= amd_cpubrand(cpi
);
2730 case X86_VENDOR_Cyrix
:
2731 brand
= cyrix_cpubrand(cpi
, x86_type
);
2733 case X86_VENDOR_NexGen
:
2734 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2735 brand
= "NexGen Nx586";
2737 case X86_VENDOR_Centaur
:
2738 if (cpi
->cpi_family
== 5)
2739 switch (cpi
->cpi_model
) {
2741 brand
= "Centaur C6";
2744 brand
= "Centaur C2";
2747 brand
= "Centaur C3";
2753 case X86_VENDOR_Rise
:
2754 if (cpi
->cpi_family
== 5 &&
2755 (cpi
->cpi_model
== 0 || cpi
->cpi_model
== 2))
2758 case X86_VENDOR_SiS
:
2759 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2763 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4)
2764 brand
= "Transmeta Crusoe TM3x00 or TM5x00";
2766 case X86_VENDOR_NSC
:
2767 case X86_VENDOR_UMC
:
2772 (void) strcpy((char *)cpi
->cpi_brandstr
, brand
);
2777 * If all else fails ...
2779 (void) snprintf(cpi
->cpi_brandstr
, sizeof (cpi
->cpi_brandstr
),
2780 "%s %d.%d.%d", cpi
->cpi_vendorstr
, cpi
->cpi_family
,
2781 cpi
->cpi_model
, cpi
->cpi_step
);
2785 * This routine is called just after kernel memory allocation
2786 * becomes available on cpu0, and as part of mp_startup() on
2789 * Fixup the brand string, and collect any information from cpuid
2790 * that requires dynamically allocated storage to represent.
2794 cpuid_pass3(cpu_t
*cpu
)
2796 int i
, max
, shft
, level
, size
;
2797 struct cpuid_regs regs
;
2798 struct cpuid_regs
*cp
;
2799 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2801 ASSERT(cpi
->cpi_pass
== 2);
2804 * Function 4: Deterministic cache parameters
2806 * Take this opportunity to detect the number of threads
2807 * sharing the last level cache, and construct a corresponding
2808 * cache id. The respective cpuid_info members are initialized
2809 * to the default case of "no last level cache sharing".
2811 cpi
->cpi_ncpu_shr_last_cache
= 1;
2812 cpi
->cpi_last_lvl_cacheid
= cpu
->cpu_id
;
2814 if (cpi
->cpi_maxeax
>= 4 && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
2817 * Find the # of elements (size) returned by fn 4, and along
2818 * the way detect last level cache sharing details.
2820 bzero(®s
, sizeof (regs
));
2822 for (i
= 0, max
= 0; i
< CPI_FN4_ECX_MAX
; i
++) {
2826 (void) __cpuid_insn(cp
);
2828 if (CPI_CACHE_TYPE(cp
) == 0)
2830 level
= CPI_CACHE_LVL(cp
);
2833 cpi
->cpi_ncpu_shr_last_cache
=
2834 CPI_NTHR_SHR_CACHE(cp
) + 1;
2837 cpi
->cpi_std_4_size
= size
= i
;
2840 * Allocate the cpi_std_4 array. The first element
2841 * references the regs for fn 4, %ecx == 0, which
2842 * cpuid_pass2() stashed in cpi->cpi_std[4].
2846 kmem_alloc(size
* sizeof (cp
), KM_SLEEP
);
2847 cpi
->cpi_std_4
[0] = &cpi
->cpi_std
[4];
2850 * Allocate storage to hold the additional regs
2851 * for function 4, %ecx == 1 .. cpi_std_4_size.
2853 * The regs for fn 4, %ecx == 0 has already
2854 * been allocated as indicated above.
2856 for (i
= 1; i
< size
; i
++) {
2857 cp
= cpi
->cpi_std_4
[i
] =
2858 kmem_zalloc(sizeof (regs
), KM_SLEEP
);
2862 (void) __cpuid_insn(cp
);
2866 * Determine the number of bits needed to represent
2867 * the number of CPUs sharing the last level cache.
2869 * Shift off that number of bits from the APIC id to
2870 * derive the cache id.
2873 for (i
= 1; i
< cpi
->cpi_ncpu_shr_last_cache
; i
<<= 1)
2875 cpi
->cpi_last_lvl_cacheid
= cpi
->cpi_apicid
>> shft
;
2879 * Now fixup the brand string
2881 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0) {
2882 fabricate_brandstr(cpi
);
2886 * If we successfully extracted a brand string from the cpuid
2887 * instruction, clean it up by removing leading spaces and
2890 if (cpi
->cpi_brandstr
[0]) {
2891 size_t maxlen
= sizeof (cpi
->cpi_brandstr
);
2894 dst
= src
= (char *)cpi
->cpi_brandstr
;
2895 src
[maxlen
- 1] = '\0';
2897 * strip leading spaces
2902 * Remove any 'Genuine' or "Authentic" prefixes
2904 if (strncmp(src
, "Genuine ", 8) == 0)
2906 if (strncmp(src
, "Authentic ", 10) == 0)
2910 * Now do an in-place copy.
2911 * Map (R) to (r) and (TM) to (tm).
2912 * The era of teletypes is long gone, and there's
2913 * -really- no need to shout.
2915 while (*src
!= '\0') {
2916 if (src
[0] == '(') {
2917 if (strncmp(src
+ 1, "R)", 2) == 0) {
2918 (void) strncpy(dst
, "(r)", 3);
2923 if (strncmp(src
+ 1, "TM)", 3) == 0) {
2924 (void) strncpy(dst
, "(tm)", 4);
2935 * Finally, remove any trailing spaces
2937 while (--dst
> cpi
->cpi_brandstr
)
2943 fabricate_brandstr(cpi
);
2949 * This routine is called out of bind_hwcap() much later in the life
2950 * of the kernel (post_startup()). The job of this routine is to resolve
2951 * the hardware feature support and kernel support for those features into
2952 * what we're actually going to tell applications via the aux vector.
2955 cpuid_pass4(cpu_t
*cpu
, uint_t
*hwcap_out
)
2957 struct cpuid_info
*cpi
;
2958 uint_t hwcap_flags
= 0, hwcap_flags_2
= 0;
2962 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2964 ASSERT(cpi
->cpi_pass
== 3);
2966 if (cpi
->cpi_maxeax
>= 1) {
2967 uint32_t *edx
= &cpi
->cpi_support
[STD_EDX_FEATURES
];
2968 uint32_t *ecx
= &cpi
->cpi_support
[STD_ECX_FEATURES
];
2969 uint32_t *ebx
= &cpi
->cpi_support
[STD_EBX_FEATURES
];
2971 *edx
= CPI_FEATURES_EDX(cpi
);
2972 *ecx
= CPI_FEATURES_ECX(cpi
);
2973 *ebx
= CPI_FEATURES_7_0_EBX(cpi
);
2976 * [these require explicit kernel support]
2978 if (!is_x86_feature(x86_featureset
, X86FSET_SEP
))
2979 *edx
&= ~CPUID_INTC_EDX_SEP
;
2981 if (!is_x86_feature(x86_featureset
, X86FSET_SSE
))
2982 *edx
&= ~(CPUID_INTC_EDX_FXSR
|CPUID_INTC_EDX_SSE
);
2983 if (!is_x86_feature(x86_featureset
, X86FSET_SSE2
))
2984 *edx
&= ~CPUID_INTC_EDX_SSE2
;
2986 if (!is_x86_feature(x86_featureset
, X86FSET_HTT
))
2987 *edx
&= ~CPUID_INTC_EDX_HTT
;
2989 if (!is_x86_feature(x86_featureset
, X86FSET_SSE3
))
2990 *ecx
&= ~CPUID_INTC_ECX_SSE3
;
2992 if (!is_x86_feature(x86_featureset
, X86FSET_SSSE3
))
2993 *ecx
&= ~CPUID_INTC_ECX_SSSE3
;
2994 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_1
))
2995 *ecx
&= ~CPUID_INTC_ECX_SSE4_1
;
2996 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_2
))
2997 *ecx
&= ~CPUID_INTC_ECX_SSE4_2
;
2998 if (!is_x86_feature(x86_featureset
, X86FSET_AES
))
2999 *ecx
&= ~CPUID_INTC_ECX_AES
;
3000 if (!is_x86_feature(x86_featureset
, X86FSET_PCLMULQDQ
))
3001 *ecx
&= ~CPUID_INTC_ECX_PCLMULQDQ
;
3002 if (!is_x86_feature(x86_featureset
, X86FSET_XSAVE
))
3003 *ecx
&= ~(CPUID_INTC_ECX_XSAVE
|
3004 CPUID_INTC_ECX_OSXSAVE
);
3005 if (!is_x86_feature(x86_featureset
, X86FSET_AVX
))
3006 *ecx
&= ~CPUID_INTC_ECX_AVX
;
3007 if (!is_x86_feature(x86_featureset
, X86FSET_F16C
))
3008 *ecx
&= ~CPUID_INTC_ECX_F16C
;
3009 if (!is_x86_feature(x86_featureset
, X86FSET_FMA
))
3010 *ecx
&= ~CPUID_INTC_ECX_FMA
;
3011 if (!is_x86_feature(x86_featureset
, X86FSET_BMI1
))
3012 *ebx
&= ~CPUID_INTC_EBX_7_0_BMI1
;
3013 if (!is_x86_feature(x86_featureset
, X86FSET_BMI2
))
3014 *ebx
&= ~CPUID_INTC_EBX_7_0_BMI2
;
3015 if (!is_x86_feature(x86_featureset
, X86FSET_AVX2
))
3016 *ebx
&= ~CPUID_INTC_EBX_7_0_AVX2
;
3017 if (!is_x86_feature(x86_featureset
, X86FSET_RDSEED
))
3018 *ebx
&= ~CPUID_INTC_EBX_7_0_RDSEED
;
3019 if (!is_x86_feature(x86_featureset
, X86FSET_ADX
))
3020 *ebx
&= ~CPUID_INTC_EBX_7_0_ADX
;
3023 * [no explicit support required beyond x87 fp context]
3026 *edx
&= ~(CPUID_INTC_EDX_FPU
| CPUID_INTC_EDX_MMX
);
3029 * Now map the supported feature vector to things that we
3030 * think userland will care about.
3032 if (*edx
& CPUID_INTC_EDX_SEP
)
3033 hwcap_flags
|= AV_386_SEP
;
3034 if (*edx
& CPUID_INTC_EDX_SSE
)
3035 hwcap_flags
|= AV_386_FXSR
| AV_386_SSE
;
3036 if (*edx
& CPUID_INTC_EDX_SSE2
)
3037 hwcap_flags
|= AV_386_SSE2
;
3038 if (*ecx
& CPUID_INTC_ECX_SSE3
)
3039 hwcap_flags
|= AV_386_SSE3
;
3040 if (*ecx
& CPUID_INTC_ECX_SSSE3
)
3041 hwcap_flags
|= AV_386_SSSE3
;
3042 if (*ecx
& CPUID_INTC_ECX_SSE4_1
)
3043 hwcap_flags
|= AV_386_SSE4_1
;
3044 if (*ecx
& CPUID_INTC_ECX_SSE4_2
)
3045 hwcap_flags
|= AV_386_SSE4_2
;
3046 if (*ecx
& CPUID_INTC_ECX_MOVBE
)
3047 hwcap_flags
|= AV_386_MOVBE
;
3048 if (*ecx
& CPUID_INTC_ECX_AES
)
3049 hwcap_flags
|= AV_386_AES
;
3050 if (*ecx
& CPUID_INTC_ECX_PCLMULQDQ
)
3051 hwcap_flags
|= AV_386_PCLMULQDQ
;
3052 if ((*ecx
& CPUID_INTC_ECX_XSAVE
) &&
3053 (*ecx
& CPUID_INTC_ECX_OSXSAVE
)) {
3054 hwcap_flags
|= AV_386_XSAVE
;
3056 if (*ecx
& CPUID_INTC_ECX_AVX
) {
3057 uint32_t *ecx_7
= &CPI_FEATURES_7_0_ECX(cpi
);
3058 uint32_t *edx_7
= &CPI_FEATURES_7_0_EDX(cpi
);
3060 hwcap_flags
|= AV_386_AVX
;
3061 if (*ecx
& CPUID_INTC_ECX_F16C
)
3062 hwcap_flags_2
|= AV_386_2_F16C
;
3063 if (*ecx
& CPUID_INTC_ECX_FMA
)
3064 hwcap_flags_2
|= AV_386_2_FMA
;
3066 if (*ebx
& CPUID_INTC_EBX_7_0_BMI1
)
3067 hwcap_flags_2
|= AV_386_2_BMI1
;
3068 if (*ebx
& CPUID_INTC_EBX_7_0_BMI2
)
3069 hwcap_flags_2
|= AV_386_2_BMI2
;
3070 if (*ebx
& CPUID_INTC_EBX_7_0_AVX2
)
3071 hwcap_flags_2
|= AV_386_2_AVX2
;
3072 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512F
)
3073 hwcap_flags_2
|= AV_386_2_AVX512F
;
3074 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512DQ
)
3075 hwcap_flags_2
|= AV_386_2_AVX512DQ
;
3076 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512IFMA
)
3077 hwcap_flags_2
|= AV_386_2_AVX512IFMA
;
3078 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512PF
)
3079 hwcap_flags_2
|= AV_386_2_AVX512PF
;
3080 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512ER
)
3081 hwcap_flags_2
|= AV_386_2_AVX512ER
;
3082 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512CD
)
3083 hwcap_flags_2
|= AV_386_2_AVX512CD
;
3084 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512BW
)
3085 hwcap_flags_2
|= AV_386_2_AVX512BW
;
3086 if (*ebx
& CPUID_INTC_EBX_7_0_AVX512VL
)
3087 hwcap_flags_2
|= AV_386_2_AVX512VL
;
3089 if (*ecx_7
& CPUID_INTC_ECX_7_0_AVX512VBMI
)
3090 hwcap_flags_2
|= AV_386_2_AVX512VBMI
;
3091 if (*ecx_7
& CPUID_INTC_ECX_7_0_AVX512VPOPCDQ
)
3092 hwcap_flags_2
|= AV_386_2_AVX512VPOPCDQ
;
3094 if (*edx_7
& CPUID_INTC_EDX_7_0_AVX5124NNIW
)
3095 hwcap_flags_2
|= AV_386_2_AVX512_4NNIW
;
3096 if (*edx_7
& CPUID_INTC_EDX_7_0_AVX5124FMAPS
)
3097 hwcap_flags_2
|= AV_386_2_AVX512_4FMAPS
;
3100 if (*ecx
& CPUID_INTC_ECX_VMX
)
3101 hwcap_flags
|= AV_386_VMX
;
3102 if (*ecx
& CPUID_INTC_ECX_POPCNT
)
3103 hwcap_flags
|= AV_386_POPCNT
;
3104 if (*edx
& CPUID_INTC_EDX_FPU
)
3105 hwcap_flags
|= AV_386_FPU
;
3106 if (*edx
& CPUID_INTC_EDX_MMX
)
3107 hwcap_flags
|= AV_386_MMX
;
3109 if (*edx
& CPUID_INTC_EDX_TSC
)
3110 hwcap_flags
|= AV_386_TSC
;
3111 if (*edx
& CPUID_INTC_EDX_CX8
)
3112 hwcap_flags
|= AV_386_CX8
;
3113 if (*edx
& CPUID_INTC_EDX_CMOV
)
3114 hwcap_flags
|= AV_386_CMOV
;
3115 if (*ecx
& CPUID_INTC_ECX_CX16
)
3116 hwcap_flags
|= AV_386_CX16
;
3118 if (*ecx
& CPUID_INTC_ECX_RDRAND
)
3119 hwcap_flags_2
|= AV_386_2_RDRAND
;
3120 if (*ebx
& CPUID_INTC_EBX_7_0_ADX
)
3121 hwcap_flags_2
|= AV_386_2_ADX
;
3122 if (*ebx
& CPUID_INTC_EBX_7_0_RDSEED
)
3123 hwcap_flags_2
|= AV_386_2_RDSEED
;
3127 if (cpi
->cpi_xmaxeax
< 0x80000001)
3130 switch (cpi
->cpi_vendor
) {
3131 struct cpuid_regs cp
;
3132 uint32_t *edx
, *ecx
;
3134 case X86_VENDOR_Intel
:
3136 * Seems like Intel duplicated what we necessary
3137 * here to make the initial crop of 64-bit OS's work.
3138 * Hopefully, those are the only "extended" bits
3143 case X86_VENDOR_AMD
:
3144 edx
= &cpi
->cpi_support
[AMD_EDX_FEATURES
];
3145 ecx
= &cpi
->cpi_support
[AMD_ECX_FEATURES
];
3147 *edx
= CPI_FEATURES_XTD_EDX(cpi
);
3148 *ecx
= CPI_FEATURES_XTD_ECX(cpi
);
3151 * [these features require explicit kernel support]
3153 switch (cpi
->cpi_vendor
) {
3154 case X86_VENDOR_Intel
:
3155 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
3156 *edx
&= ~CPUID_AMD_EDX_TSCP
;
3159 case X86_VENDOR_AMD
:
3160 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
3161 *edx
&= ~CPUID_AMD_EDX_TSCP
;
3162 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4A
))
3163 *ecx
&= ~CPUID_AMD_ECX_SSE4A
;
3171 * [no explicit support required beyond
3172 * x87 fp context and exception handlers]
3175 *edx
&= ~(CPUID_AMD_EDX_MMXamd
|
3176 CPUID_AMD_EDX_3DNow
| CPUID_AMD_EDX_3DNowx
);
3178 if (!is_x86_feature(x86_featureset
, X86FSET_NX
))
3179 *edx
&= ~CPUID_AMD_EDX_NX
;
3180 #if !defined(__amd64)
3181 *edx
&= ~CPUID_AMD_EDX_LM
;
3184 * Now map the supported feature vector to
3185 * things that we think userland will care about.
3187 #if defined(__amd64)
3188 if (*edx
& CPUID_AMD_EDX_SYSC
)
3189 hwcap_flags
|= AV_386_AMD_SYSC
;
3191 if (*edx
& CPUID_AMD_EDX_MMXamd
)
3192 hwcap_flags
|= AV_386_AMD_MMX
;
3193 if (*edx
& CPUID_AMD_EDX_3DNow
)
3194 hwcap_flags
|= AV_386_AMD_3DNow
;
3195 if (*edx
& CPUID_AMD_EDX_3DNowx
)
3196 hwcap_flags
|= AV_386_AMD_3DNowx
;
3197 if (*ecx
& CPUID_AMD_ECX_SVM
)
3198 hwcap_flags
|= AV_386_AMD_SVM
;
3200 switch (cpi
->cpi_vendor
) {
3201 case X86_VENDOR_AMD
:
3202 if (*edx
& CPUID_AMD_EDX_TSCP
)
3203 hwcap_flags
|= AV_386_TSCP
;
3204 if (*ecx
& CPUID_AMD_ECX_AHF64
)
3205 hwcap_flags
|= AV_386_AHF
;
3206 if (*ecx
& CPUID_AMD_ECX_SSE4A
)
3207 hwcap_flags
|= AV_386_AMD_SSE4A
;
3208 if (*ecx
& CPUID_AMD_ECX_LZCNT
)
3209 hwcap_flags
|= AV_386_AMD_LZCNT
;
3212 case X86_VENDOR_Intel
:
3213 if (*edx
& CPUID_AMD_EDX_TSCP
)
3214 hwcap_flags
|= AV_386_TSCP
;
3217 * Intel uses a different bit in the same word.
3219 if (*ecx
& CPUID_INTC_ECX_AHF64
)
3220 hwcap_flags
|= AV_386_AHF
;
3229 cp
.cp_eax
= 0x80860001;
3230 (void) __cpuid_insn(&cp
);
3231 cpi
->cpi_support
[TM_EDX_FEATURES
] = cp
.cp_edx
;
3240 if (hwcap_out
!= NULL
) {
3241 hwcap_out
[0] = hwcap_flags
;
3242 hwcap_out
[1] = hwcap_flags_2
;
3248 * Simulate the cpuid instruction using the data we previously
3249 * captured about this CPU. We try our best to return the truth
3250 * about the hardware, independently of kernel support.
3253 cpuid_insn(cpu_t
*cpu
, struct cpuid_regs
*cp
)
3255 struct cpuid_info
*cpi
;
3256 struct cpuid_regs
*xcp
;
3260 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3262 ASSERT(cpuid_checkpass(cpu
, 3));
3265 * CPUID data is cached in two separate places: cpi_std for standard
3266 * CPUID functions, and cpi_extd for extended CPUID functions.
3268 if (cp
->cp_eax
<= cpi
->cpi_maxeax
&& cp
->cp_eax
< NMAX_CPI_STD
)
3269 xcp
= &cpi
->cpi_std
[cp
->cp_eax
];
3270 else if (cp
->cp_eax
>= 0x80000000 && cp
->cp_eax
<= cpi
->cpi_xmaxeax
&&
3271 cp
->cp_eax
< 0x80000000 + NMAX_CPI_EXTD
)
3272 xcp
= &cpi
->cpi_extd
[cp
->cp_eax
- 0x80000000];
3275 * The caller is asking for data from an input parameter which
3276 * the kernel has not cached. In this case we go fetch from
3277 * the hardware and return the data directly to the user.
3279 return (__cpuid_insn(cp
));
3281 cp
->cp_eax
= xcp
->cp_eax
;
3282 cp
->cp_ebx
= xcp
->cp_ebx
;
3283 cp
->cp_ecx
= xcp
->cp_ecx
;
3284 cp
->cp_edx
= xcp
->cp_edx
;
3285 return (cp
->cp_eax
);
3289 cpuid_checkpass(cpu_t
*cpu
, int pass
)
3291 return (cpu
!= NULL
&& cpu
->cpu_m
.mcpu_cpi
!= NULL
&&
3292 cpu
->cpu_m
.mcpu_cpi
->cpi_pass
>= pass
);
3296 cpuid_getbrandstr(cpu_t
*cpu
, char *s
, size_t n
)
3298 ASSERT(cpuid_checkpass(cpu
, 3));
3300 return (snprintf(s
, n
, "%s", cpu
->cpu_m
.mcpu_cpi
->cpi_brandstr
));
3304 cpuid_is_cmt(cpu_t
*cpu
)
3309 ASSERT(cpuid_checkpass(cpu
, 1));
3311 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
>= 0);
3315 * AMD and Intel both implement the 64-bit variant of the syscall
3316 * instruction (syscallq), so if there's -any- support for syscall,
3317 * cpuid currently says "yes, we support this".
3319 * However, Intel decided to -not- implement the 32-bit variant of the
3320 * syscall instruction, so we provide a predicate to allow our caller
3321 * to test that subtlety here.
3323 * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor,
3324 * even in the case where the hardware would in fact support it.
3328 cpuid_syscall32_insn(cpu_t
*cpu
)
3330 ASSERT(cpuid_checkpass((cpu
== NULL
? CPU
: cpu
), 1));
3338 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3340 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
3341 cpi
->cpi_xmaxeax
>= 0x80000001 &&
3342 (CPI_FEATURES_XTD_EDX(cpi
) & CPUID_AMD_EDX_SYSC
))
3350 cpuid_getidstr(cpu_t
*cpu
, char *s
, size_t n
)
3352 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3354 static const char fmt
[] =
3355 "x86 (%s %X family %d model %d step %d clock %d MHz)";
3356 static const char fmt_ht
[] =
3357 "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
3359 ASSERT(cpuid_checkpass(cpu
, 1));
3361 if (cpuid_is_cmt(cpu
))
3362 return (snprintf(s
, n
, fmt_ht
, cpi
->cpi_chipid
,
3363 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
3364 cpi
->cpi_family
, cpi
->cpi_model
,
3365 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
3366 return (snprintf(s
, n
, fmt
,
3367 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
3368 cpi
->cpi_family
, cpi
->cpi_model
,
3369 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
3373 cpuid_getvendorstr(cpu_t
*cpu
)
3375 ASSERT(cpuid_checkpass(cpu
, 1));
3376 return ((const char *)cpu
->cpu_m
.mcpu_cpi
->cpi_vendorstr
);
3380 cpuid_getvendor(cpu_t
*cpu
)
3382 ASSERT(cpuid_checkpass(cpu
, 1));
3383 return (cpu
->cpu_m
.mcpu_cpi
->cpi_vendor
);
3387 cpuid_getfamily(cpu_t
*cpu
)
3389 ASSERT(cpuid_checkpass(cpu
, 1));
3390 return (cpu
->cpu_m
.mcpu_cpi
->cpi_family
);
3394 cpuid_getmodel(cpu_t
*cpu
)
3396 ASSERT(cpuid_checkpass(cpu
, 1));
3397 return (cpu
->cpu_m
.mcpu_cpi
->cpi_model
);
3401 cpuid_get_ncpu_per_chip(cpu_t
*cpu
)
3403 ASSERT(cpuid_checkpass(cpu
, 1));
3404 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_per_chip
);
3408 cpuid_get_ncore_per_chip(cpu_t
*cpu
)
3410 ASSERT(cpuid_checkpass(cpu
, 1));
3411 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncore_per_chip
);
3415 cpuid_get_ncpu_sharing_last_cache(cpu_t
*cpu
)
3417 ASSERT(cpuid_checkpass(cpu
, 2));
3418 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_shr_last_cache
);
3422 cpuid_get_last_lvl_cacheid(cpu_t
*cpu
)
3424 ASSERT(cpuid_checkpass(cpu
, 2));
3425 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3429 cpuid_getstep(cpu_t
*cpu
)
3431 ASSERT(cpuid_checkpass(cpu
, 1));
3432 return (cpu
->cpu_m
.mcpu_cpi
->cpi_step
);
3436 cpuid_getsig(struct cpu
*cpu
)
3438 ASSERT(cpuid_checkpass(cpu
, 1));
3439 return (cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_eax
);
3443 cpuid_getchiprev(struct cpu
*cpu
)
3445 ASSERT(cpuid_checkpass(cpu
, 1));
3446 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprev
);
3450 cpuid_getchiprevstr(struct cpu
*cpu
)
3452 ASSERT(cpuid_checkpass(cpu
, 1));
3453 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprevstr
);
3457 cpuid_getsockettype(struct cpu
*cpu
)
3459 ASSERT(cpuid_checkpass(cpu
, 1));
3460 return (cpu
->cpu_m
.mcpu_cpi
->cpi_socket
);
3464 cpuid_getsocketstr(cpu_t
*cpu
)
3466 static const char *socketstr
= NULL
;
3467 struct cpuid_info
*cpi
;
3469 ASSERT(cpuid_checkpass(cpu
, 1));
3470 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3472 /* Assume that socket types are the same across the system */
3473 if (socketstr
== NULL
)
3474 socketstr
= _cpuid_sktstr(cpi
->cpi_vendor
, cpi
->cpi_family
,
3475 cpi
->cpi_model
, cpi
->cpi_step
);
3482 cpuid_get_chipid(cpu_t
*cpu
)
3484 ASSERT(cpuid_checkpass(cpu
, 1));
3486 if (cpuid_is_cmt(cpu
))
3487 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
);
3488 return (cpu
->cpu_id
);
3492 cpuid_get_coreid(cpu_t
*cpu
)
3494 ASSERT(cpuid_checkpass(cpu
, 1));
3495 return (cpu
->cpu_m
.mcpu_cpi
->cpi_coreid
);
3499 cpuid_get_pkgcoreid(cpu_t
*cpu
)
3501 ASSERT(cpuid_checkpass(cpu
, 1));
3502 return (cpu
->cpu_m
.mcpu_cpi
->cpi_pkgcoreid
);
3506 cpuid_get_clogid(cpu_t
*cpu
)
3508 ASSERT(cpuid_checkpass(cpu
, 1));
3509 return (cpu
->cpu_m
.mcpu_cpi
->cpi_clogid
);
3513 cpuid_get_cacheid(cpu_t
*cpu
)
3515 ASSERT(cpuid_checkpass(cpu
, 1));
3516 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3520 cpuid_get_procnodeid(cpu_t
*cpu
)
3522 ASSERT(cpuid_checkpass(cpu
, 1));
3523 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodeid
);
3527 cpuid_get_procnodes_per_pkg(cpu_t
*cpu
)
3529 ASSERT(cpuid_checkpass(cpu
, 1));
3530 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodes_per_pkg
);
3534 cpuid_get_compunitid(cpu_t
*cpu
)
3536 ASSERT(cpuid_checkpass(cpu
, 1));
3537 return (cpu
->cpu_m
.mcpu_cpi
->cpi_compunitid
);
3541 cpuid_get_cores_per_compunit(cpu_t
*cpu
)
3543 ASSERT(cpuid_checkpass(cpu
, 1));
3544 return (cpu
->cpu_m
.mcpu_cpi
->cpi_cores_per_compunit
);
3549 cpuid_have_cr8access(cpu_t
*cpu
)
3551 #if defined(__amd64)
3554 struct cpuid_info
*cpi
;
3556 ASSERT(cpu
!= NULL
);
3557 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3558 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&& cpi
->cpi_maxeax
>= 1 &&
3559 (CPI_FEATURES_XTD_ECX(cpi
) & CPUID_AMD_ECX_CR8D
) != 0)
3566 cpuid_get_apicid(cpu_t
*cpu
)
3568 ASSERT(cpuid_checkpass(cpu
, 1));
3569 if (cpu
->cpu_m
.mcpu_cpi
->cpi_maxeax
< 1) {
3570 return (UINT32_MAX
);
3572 return (cpu
->cpu_m
.mcpu_cpi
->cpi_apicid
);
3577 cpuid_get_addrsize(cpu_t
*cpu
, uint_t
*pabits
, uint_t
*vabits
)
3579 struct cpuid_info
*cpi
;
3583 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3585 ASSERT(cpuid_checkpass(cpu
, 1));
3588 *pabits
= cpi
->cpi_pabits
;
3590 *vabits
= cpi
->cpi_vabits
;
3594 cpuid_get_xsave_size()
3596 return (MAX(cpuid_info0
.cpi_xsave
.xsav_max_size
,
3597 sizeof (struct xsave_state
)));
3601 * Return true if the CPUs on this system require 'pointer clearing' for the
3602 * floating point error pointer exception handling. In the past, this has been
3603 * true for all AMD K7 & K8 CPUs, although newer AMD CPUs have been changed to
3604 * behave the same as Intel. This is checked via the CPUID_AMD_EBX_ERR_PTR_ZERO
3605 * feature bit and is reflected in the cpi_fp_amd_save member. Once this has
3606 * been confirmed on hardware which supports that feature, this test should be
3607 * narrowed. In the meantime, we always follow the existing behavior on any AMD
3611 cpuid_need_fp_excp_handling()
3613 return (cpuid_info0
.cpi_vendor
== X86_VENDOR_AMD
);
3617 * Returns the number of data TLB entries for a corresponding
3618 * pagesize. If it can't be computed, or isn't known, the
3619 * routine returns zero. If you ask about an architecturally
3620 * impossible pagesize, the routine will panic (so that the
3621 * hat implementor knows that things are inconsistent.)
3624 cpuid_get_dtlb_nent(cpu_t
*cpu
, size_t pagesize
)
3626 struct cpuid_info
*cpi
;
3627 uint_t dtlb_nent
= 0;
3631 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3633 ASSERT(cpuid_checkpass(cpu
, 1));
3636 * Check the L2 TLB info
3638 if (cpi
->cpi_xmaxeax
>= 0x80000006) {
3639 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[6];
3645 * All zero in the top 16 bits of the register
3646 * indicates a unified TLB. Size is in low 16 bits.
3648 if ((cp
->cp_ebx
& 0xffff0000) == 0)
3649 dtlb_nent
= cp
->cp_ebx
& 0x0000ffff;
3651 dtlb_nent
= BITX(cp
->cp_ebx
, 27, 16);
3654 case 2 * 1024 * 1024:
3655 if ((cp
->cp_eax
& 0xffff0000) == 0)
3656 dtlb_nent
= cp
->cp_eax
& 0x0000ffff;
3658 dtlb_nent
= BITX(cp
->cp_eax
, 27, 16);
3662 panic("unknown L2 pagesize");
3671 * No L2 TLB support for this size, try L1.
3673 if (cpi
->cpi_xmaxeax
>= 0x80000005) {
3674 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[5];
3678 dtlb_nent
= BITX(cp
->cp_ebx
, 23, 16);
3680 case 2 * 1024 * 1024:
3681 dtlb_nent
= BITX(cp
->cp_eax
, 23, 16);
3684 panic("unknown L1 d-TLB pagesize");
3693 * Return 0 if the erratum is not present or not applicable, positive
3694 * if it is, and negative if the status of the erratum is unknown.
3696 * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3697 * Processors" #25759, Rev 3.57, August 2005
3700 cpuid_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3702 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3706 * Bail out if this CPU isn't an AMD CPU, or if it's
3707 * a legacy (32-bit) AMD CPU.
3709 if (cpi
->cpi_vendor
!= X86_VENDOR_AMD
||
3710 cpi
->cpi_family
== 4 || cpi
->cpi_family
== 5 ||
3711 cpi
->cpi_family
== 6)
3715 eax
= cpi
->cpi_std
[1].cp_eax
;
3717 #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50)
3718 #define SH_B3(eax) (eax == 0xf51)
3719 #define B(eax) (SH_B0(eax) || SH_B3(eax))
3721 #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58)
3723 #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3724 #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3725 #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2)
3726 #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3728 #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3729 #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0)
3730 #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0)
3731 #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3733 #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3734 #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */
3735 #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0)
3736 #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71)
3737 #define BH_E4(eax) (eax == 0x20fb1)
3738 #define SH_E5(eax) (eax == 0x20f42)
3739 #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2)
3740 #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32)
3741 #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3742 SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3743 DH_E6(eax) || JH_E6(eax))
3745 #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3746 #define DR_B0(eax) (eax == 0x100f20)
3747 #define DR_B1(eax) (eax == 0x100f21)
3748 #define DR_BA(eax) (eax == 0x100f2a)
3749 #define DR_B2(eax) (eax == 0x100f22)
3750 #define DR_B3(eax) (eax == 0x100f23)
3751 #define RB_C0(eax) (eax == 0x100f40)
3755 return (cpi
->cpi_family
< 0x10);
3756 case 51: /* what does the asterisk mean? */
3757 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3761 return (cpi
->cpi_family
<= 0x11);
3765 return (cpi
->cpi_family
<= 0x11);
3778 return (SH_B0(eax
));
3782 return (cpi
->cpi_family
< 0x10);
3786 return (cpi
->cpi_family
<= 0x11);
3788 return (B(eax
) || SH_C0(eax
));
3790 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3796 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3798 return (cpi
->cpi_family
< 0x10);
3800 return (SH_C0(eax
) || CG(eax
));
3802 #if !defined(__amd64)
3805 return (B(eax
) || SH_C0(eax
));
3808 return (cpi
->cpi_family
< 0x10);
3810 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3813 return (B(eax
) || SH_C0(eax
));
3815 return (SH_C0(eax
));
3817 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3819 #if !defined(__amd64)
3822 return (B(eax
) || SH_C0(eax
));
3825 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3828 return (SH_C0(eax
) || CG(eax
));
3830 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3832 return (B(eax
) || SH_C0(eax
));
3835 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3837 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3841 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3843 return (DH_CG(eax
));
3845 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3847 return (D0(eax
) || EX(eax
));
3851 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3853 return (eax
== 0x20fc0);
3855 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3857 return (SH_E0(eax
) || JH_E1(eax
));
3859 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3861 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3863 return (SH_E0(eax
) || JH_E1(eax
) || SH_E4(eax
) || BH_E4(eax
) ||
3866 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3868 return (cpi
->cpi_family
< 0x10 || cpi
->cpi_family
== 0x11);
3870 return (JH_E1(eax
) || BH_E4(eax
) || JH_E6(eax
));
3872 return (cpi
->cpi_family
< 0x10);
3875 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3876 * if this is a K8 family or newer processor
3878 if (CPI_FAMILY(cpi
) == 0xf) {
3879 struct cpuid_regs regs
;
3880 regs
.cp_eax
= 0x80000007;
3881 (void) __cpuid_insn(®s
);
3882 return (!(regs
.cp_edx
& 0x100));
3886 return (((((eax
>> 12) & 0xff00) + (eax
& 0xf00)) |
3887 (((eax
>> 4) & 0xf) | ((eax
>> 12) & 0xf0))) < 0xf40);
3891 * check for processors (pre-Shanghai) that do not provide
3892 * optimal management of 1gb ptes in its tlb.
3894 return (cpi
->cpi_family
== 0x10 && cpi
->cpi_model
< 4);
3897 return (DR_AX(eax
) || DR_B0(eax
) || DR_B1(eax
) || DR_BA(eax
) ||
3898 DR_B2(eax
) || RB_C0(eax
));
3901 #if defined(__amd64)
3902 return (cpi
->cpi_family
== 0x10 || cpi
->cpi_family
== 0x12);
3914 * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3915 * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3918 osvw_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3920 struct cpuid_info
*cpi
;
3922 static int osvwfeature
= -1;
3923 uint64_t osvwlength
;
3926 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3928 /* confirm OSVW supported */
3929 if (osvwfeature
== -1) {
3930 osvwfeature
= cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
;
3932 /* assert that osvw feature setting is consistent on all cpus */
3933 ASSERT(osvwfeature
==
3934 (cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
));
3939 osvwlength
= rdmsr(MSR_AMD_OSVW_ID_LEN
) & OSVW_ID_LEN_MASK
;
3942 case 298: /* osvwid is 0 */
3944 if (osvwlength
<= (uint64_t)osvwid
) {
3945 /* osvwid 0 is unknown */
3950 * Check the OSVW STATUS MSR to determine the state
3951 * of the erratum where:
3953 * 1 - BIOS has applied the workaround when BIOS
3954 * workaround is available. (Or for other errata,
3955 * OS workaround is required.)
3956 * For a value of 1, caller will confirm that the
3957 * erratum 298 workaround has indeed been applied by BIOS.
3959 * A 1 may be set in cpus that have a HW fix
3960 * in a mixed cpu system. Regarding erratum 298:
3961 * In a multiprocessor platform, the workaround above
3962 * should be applied to all processors regardless of
3963 * silicon revision when an affected processor is
3967 return (rdmsr(MSR_AMD_OSVW_STATUS
+
3968 (osvwid
/ OSVW_ID_CNT_PER_MSR
)) &
3969 (1ULL << (osvwid
% OSVW_ID_CNT_PER_MSR
)));
3976 static const char assoc_str
[] = "associativity";
3977 static const char line_str
[] = "line-size";
3978 static const char size_str
[] = "size";
3981 add_cache_prop(dev_info_t
*devi
, const char *label
, const char *type
,
3987 * ndi_prop_update_int() is used because it is desirable for
3988 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3990 if (snprintf(buf
, sizeof (buf
), "%s-%s", label
, type
) < sizeof (buf
))
3991 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, devi
, buf
, val
);
3995 * Intel-style cache/tlb description
3997 * Standard cpuid level 2 gives a randomly ordered
3998 * selection of tags that index into a table that describes
3999 * cache and tlb properties.
4002 static const char l1_icache_str
[] = "l1-icache";
4003 static const char l1_dcache_str
[] = "l1-dcache";
4004 static const char l2_cache_str
[] = "l2-cache";
4005 static const char l3_cache_str
[] = "l3-cache";
4006 static const char itlb4k_str
[] = "itlb-4K";
4007 static const char dtlb4k_str
[] = "dtlb-4K";
4008 static const char itlb2M_str
[] = "itlb-2M";
4009 static const char itlb4M_str
[] = "itlb-4M";
4010 static const char dtlb4M_str
[] = "dtlb-4M";
4011 static const char dtlb24_str
[] = "dtlb0-2M-4M";
4012 static const char itlb424_str
[] = "itlb-4K-2M-4M";
4013 static const char itlb24_str
[] = "itlb-2M-4M";
4014 static const char dtlb44_str
[] = "dtlb-4K-4M";
4015 static const char sl1_dcache_str
[] = "sectored-l1-dcache";
4016 static const char sl2_cache_str
[] = "sectored-l2-cache";
4017 static const char itrace_str
[] = "itrace-cache";
4018 static const char sl3_cache_str
[] = "sectored-l3-cache";
4019 static const char sh_l2_tlb4k_str
[] = "shared-l2-tlb-4k";
4021 static const struct cachetab
{
4024 uint16_t ct_line_size
;
4026 const char *ct_label
;
4029 * maintain descending order!
4031 * Codes ignored - Reason
4032 * ----------------------
4033 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
4034 * f0H/f1H - Currently we do not interpret prefetch size by design
4036 { 0xe4, 16, 64, 8*1024*1024, l3_cache_str
},
4037 { 0xe3, 16, 64, 4*1024*1024, l3_cache_str
},
4038 { 0xe2, 16, 64, 2*1024*1024, l3_cache_str
},
4039 { 0xde, 12, 64, 6*1024*1024, l3_cache_str
},
4040 { 0xdd, 12, 64, 3*1024*1024, l3_cache_str
},
4041 { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str
},
4042 { 0xd8, 8, 64, 4*1024*1024, l3_cache_str
},
4043 { 0xd7, 8, 64, 2*1024*1024, l3_cache_str
},
4044 { 0xd6, 8, 64, 1*1024*1024, l3_cache_str
},
4045 { 0xd2, 4, 64, 2*1024*1024, l3_cache_str
},
4046 { 0xd1, 4, 64, 1*1024*1024, l3_cache_str
},
4047 { 0xd0, 4, 64, 512*1024, l3_cache_str
},
4048 { 0xca, 4, 0, 512, sh_l2_tlb4k_str
},
4049 { 0xc0, 4, 0, 8, dtlb44_str
},
4050 { 0xba, 4, 0, 64, dtlb4k_str
},
4051 { 0xb4, 4, 0, 256, dtlb4k_str
},
4052 { 0xb3, 4, 0, 128, dtlb4k_str
},
4053 { 0xb2, 4, 0, 64, itlb4k_str
},
4054 { 0xb0, 4, 0, 128, itlb4k_str
},
4055 { 0x87, 8, 64, 1024*1024, l2_cache_str
},
4056 { 0x86, 4, 64, 512*1024, l2_cache_str
},
4057 { 0x85, 8, 32, 2*1024*1024, l2_cache_str
},
4058 { 0x84, 8, 32, 1024*1024, l2_cache_str
},
4059 { 0x83, 8, 32, 512*1024, l2_cache_str
},
4060 { 0x82, 8, 32, 256*1024, l2_cache_str
},
4061 { 0x80, 8, 64, 512*1024, l2_cache_str
},
4062 { 0x7f, 2, 64, 512*1024, l2_cache_str
},
4063 { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str
},
4064 { 0x7c, 8, 64, 1024*1024, sl2_cache_str
},
4065 { 0x7b, 8, 64, 512*1024, sl2_cache_str
},
4066 { 0x7a, 8, 64, 256*1024, sl2_cache_str
},
4067 { 0x79, 8, 64, 128*1024, sl2_cache_str
},
4068 { 0x78, 8, 64, 1024*1024, l2_cache_str
},
4069 { 0x73, 8, 0, 64*1024, itrace_str
},
4070 { 0x72, 8, 0, 32*1024, itrace_str
},
4071 { 0x71, 8, 0, 16*1024, itrace_str
},
4072 { 0x70, 8, 0, 12*1024, itrace_str
},
4073 { 0x68, 4, 64, 32*1024, sl1_dcache_str
},
4074 { 0x67, 4, 64, 16*1024, sl1_dcache_str
},
4075 { 0x66, 4, 64, 8*1024, sl1_dcache_str
},
4076 { 0x60, 8, 64, 16*1024, sl1_dcache_str
},
4077 { 0x5d, 0, 0, 256, dtlb44_str
},
4078 { 0x5c, 0, 0, 128, dtlb44_str
},
4079 { 0x5b, 0, 0, 64, dtlb44_str
},
4080 { 0x5a, 4, 0, 32, dtlb24_str
},
4081 { 0x59, 0, 0, 16, dtlb4k_str
},
4082 { 0x57, 4, 0, 16, dtlb4k_str
},
4083 { 0x56, 4, 0, 16, dtlb4M_str
},
4084 { 0x55, 0, 0, 7, itlb24_str
},
4085 { 0x52, 0, 0, 256, itlb424_str
},
4086 { 0x51, 0, 0, 128, itlb424_str
},
4087 { 0x50, 0, 0, 64, itlb424_str
},
4088 { 0x4f, 0, 0, 32, itlb4k_str
},
4089 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str
},
4090 { 0x4d, 16, 64, 16*1024*1024, l3_cache_str
},
4091 { 0x4c, 12, 64, 12*1024*1024, l3_cache_str
},
4092 { 0x4b, 16, 64, 8*1024*1024, l3_cache_str
},
4093 { 0x4a, 12, 64, 6*1024*1024, l3_cache_str
},
4094 { 0x49, 16, 64, 4*1024*1024, l3_cache_str
},
4095 { 0x48, 12, 64, 3*1024*1024, l2_cache_str
},
4096 { 0x47, 8, 64, 8*1024*1024, l3_cache_str
},
4097 { 0x46, 4, 64, 4*1024*1024, l3_cache_str
},
4098 { 0x45, 4, 32, 2*1024*1024, l2_cache_str
},
4099 { 0x44, 4, 32, 1024*1024, l2_cache_str
},
4100 { 0x43, 4, 32, 512*1024, l2_cache_str
},
4101 { 0x42, 4, 32, 256*1024, l2_cache_str
},
4102 { 0x41, 4, 32, 128*1024, l2_cache_str
},
4103 { 0x3e, 4, 64, 512*1024, sl2_cache_str
},
4104 { 0x3d, 6, 64, 384*1024, sl2_cache_str
},
4105 { 0x3c, 4, 64, 256*1024, sl2_cache_str
},
4106 { 0x3b, 2, 64, 128*1024, sl2_cache_str
},
4107 { 0x3a, 6, 64, 192*1024, sl2_cache_str
},
4108 { 0x39, 4, 64, 128*1024, sl2_cache_str
},
4109 { 0x30, 8, 64, 32*1024, l1_icache_str
},
4110 { 0x2c, 8, 64, 32*1024, l1_dcache_str
},
4111 { 0x29, 8, 64, 4096*1024, sl3_cache_str
},
4112 { 0x25, 8, 64, 2048*1024, sl3_cache_str
},
4113 { 0x23, 8, 64, 1024*1024, sl3_cache_str
},
4114 { 0x22, 4, 64, 512*1024, sl3_cache_str
},
4115 { 0x0e, 6, 64, 24*1024, l1_dcache_str
},
4116 { 0x0d, 4, 32, 16*1024, l1_dcache_str
},
4117 { 0x0c, 4, 32, 16*1024, l1_dcache_str
},
4118 { 0x0b, 4, 0, 4, itlb4M_str
},
4119 { 0x0a, 2, 32, 8*1024, l1_dcache_str
},
4120 { 0x08, 4, 32, 16*1024, l1_icache_str
},
4121 { 0x06, 4, 32, 8*1024, l1_icache_str
},
4122 { 0x05, 4, 0, 32, dtlb4M_str
},
4123 { 0x04, 4, 0, 8, dtlb4M_str
},
4124 { 0x03, 4, 0, 64, dtlb4k_str
},
4125 { 0x02, 4, 0, 2, itlb4M_str
},
4126 { 0x01, 4, 0, 32, itlb4k_str
},
4130 static const struct cachetab cyrix_ctab
[] = {
4131 { 0x70, 4, 0, 32, "tlb-4K" },
4132 { 0x80, 4, 16, 16*1024, "l1-cache" },
4137 * Search a cache table for a matching entry
4139 static const struct cachetab
*
4140 find_cacheent(const struct cachetab
*ct
, uint_t code
)
4143 for (; ct
->ct_code
!= 0; ct
++)
4144 if (ct
->ct_code
<= code
)
4146 if (ct
->ct_code
== code
)
4153 * Populate cachetab entry with L2 or L3 cache-information using
4154 * cpuid function 4. This function is called from intel_walk_cacheinfo()
4155 * when descriptor 0x49 is encountered. It returns 0 if no such cache
4156 * information is found.
4159 intel_cpuid_4_cache_info(struct cachetab
*ct
, struct cpuid_info
*cpi
)
4164 for (i
= 0; i
< cpi
->cpi_std_4_size
; i
++) {
4165 level
= CPI_CACHE_LVL(cpi
->cpi_std_4
[i
]);
4167 if (level
== 2 || level
== 3) {
4168 ct
->ct_assoc
= CPI_CACHE_WAYS(cpi
->cpi_std_4
[i
]) + 1;
4170 CPI_CACHE_COH_LN_SZ(cpi
->cpi_std_4
[i
]) + 1;
4171 ct
->ct_size
= ct
->ct_assoc
*
4172 (CPI_CACHE_PARTS(cpi
->cpi_std_4
[i
]) + 1) *
4174 (cpi
->cpi_std_4
[i
]->cp_ecx
+ 1);
4177 ct
->ct_label
= l2_cache_str
;
4178 } else if (level
== 3) {
4179 ct
->ct_label
= l3_cache_str
;
4189 * Walk the cacheinfo descriptor, applying 'func' to every valid element
4190 * The walk is terminated if the walker returns non-zero.
4193 intel_walk_cacheinfo(struct cpuid_info
*cpi
,
4194 void *arg
, int (*func
)(void *, const struct cachetab
*))
4196 const struct cachetab
*ct
;
4197 struct cachetab des_49_ct
, des_b1_ct
;
4201 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
4203 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
4205 * For overloaded descriptor 0x49 we use cpuid function 4
4206 * if supported by the current processor, to create
4207 * cache information.
4208 * For overloaded descriptor 0xb1 we use X86_PAE flag
4209 * to disambiguate the cache information.
4211 if (*dp
== 0x49 && cpi
->cpi_maxeax
>= 0x4 &&
4212 intel_cpuid_4_cache_info(&des_49_ct
, cpi
) == 1) {
4214 } else if (*dp
== 0xb1) {
4215 des_b1_ct
.ct_code
= 0xb1;
4216 des_b1_ct
.ct_assoc
= 4;
4217 des_b1_ct
.ct_line_size
= 0;
4218 if (is_x86_feature(x86_featureset
, X86FSET_PAE
)) {
4219 des_b1_ct
.ct_size
= 8;
4220 des_b1_ct
.ct_label
= itlb2M_str
;
4222 des_b1_ct
.ct_size
= 4;
4223 des_b1_ct
.ct_label
= itlb4M_str
;
4227 if ((ct
= find_cacheent(intel_ctab
, *dp
)) == NULL
) {
4232 if (func(arg
, ct
) != 0) {
4239 * (Like the Intel one, except for Cyrix CPUs)
4242 cyrix_walk_cacheinfo(struct cpuid_info
*cpi
,
4243 void *arg
, int (*func
)(void *, const struct cachetab
*))
4245 const struct cachetab
*ct
;
4249 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
4251 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
4253 * Search Cyrix-specific descriptor table first ..
4255 if ((ct
= find_cacheent(cyrix_ctab
, *dp
)) != NULL
) {
4256 if (func(arg
, ct
) != 0)
4261 * .. else fall back to the Intel one
4263 if ((ct
= find_cacheent(intel_ctab
, *dp
)) != NULL
) {
4264 if (func(arg
, ct
) != 0)
4272 * A cacheinfo walker that adds associativity, line-size, and size properties
4273 * to the devinfo node it is passed as an argument.
4276 add_cacheent_props(void *arg
, const struct cachetab
*ct
)
4278 dev_info_t
*devi
= arg
;
4280 add_cache_prop(devi
, ct
->ct_label
, assoc_str
, ct
->ct_assoc
);
4281 if (ct
->ct_line_size
!= 0)
4282 add_cache_prop(devi
, ct
->ct_label
, line_str
,
4284 add_cache_prop(devi
, ct
->ct_label
, size_str
, ct
->ct_size
);
4289 static const char fully_assoc
[] = "fully-associative?";
4292 * AMD style cache/tlb description
4294 * Extended functions 5 and 6 directly describe properties of
4295 * tlbs and various cache levels.
4298 add_amd_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
4301 case 0: /* reserved; ignore */
4304 add_cache_prop(devi
, label
, assoc_str
, assoc
);
4307 add_cache_prop(devi
, label
, fully_assoc
, 1);
4313 add_amd_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
4317 add_cache_prop(devi
, label
, size_str
, size
);
4318 add_amd_assoc(devi
, label
, assoc
);
4322 add_amd_cache(dev_info_t
*devi
, const char *label
,
4323 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
4325 if (size
== 0 || line_size
== 0)
4327 add_amd_assoc(devi
, label
, assoc
);
4329 * Most AMD parts have a sectored cache. Multiple cache lines are
4330 * associated with each tag. A sector consists of all cache lines
4331 * associated with a tag. For example, the AMD K6-III has a sector
4332 * size of 2 cache lines per tag.
4334 if (lines_per_tag
!= 0)
4335 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
4336 add_cache_prop(devi
, label
, line_str
, line_size
);
4337 add_cache_prop(devi
, label
, size_str
, size
* 1024);
4341 add_amd_l2_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
4349 add_cache_prop(devi
, label
, assoc_str
, assoc
);
4352 add_cache_prop(devi
, label
, assoc_str
, 8);
4355 add_cache_prop(devi
, label
, assoc_str
, 16);
4358 add_cache_prop(devi
, label
, fully_assoc
, 1);
4360 default: /* reserved; ignore */
4366 add_amd_l2_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
4368 if (size
== 0 || assoc
== 0)
4370 add_amd_l2_assoc(devi
, label
, assoc
);
4371 add_cache_prop(devi
, label
, size_str
, size
);
4375 add_amd_l2_cache(dev_info_t
*devi
, const char *label
,
4376 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
4378 if (size
== 0 || assoc
== 0 || line_size
== 0)
4380 add_amd_l2_assoc(devi
, label
, assoc
);
4381 if (lines_per_tag
!= 0)
4382 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
4383 add_cache_prop(devi
, label
, line_str
, line_size
);
4384 add_cache_prop(devi
, label
, size_str
, size
* 1024);
4388 amd_cache_info(struct cpuid_info
*cpi
, dev_info_t
*devi
)
4390 struct cpuid_regs
*cp
;
4392 if (cpi
->cpi_xmaxeax
< 0x80000005)
4394 cp
= &cpi
->cpi_extd
[5];
4397 * 4M/2M L1 TLB configuration
4399 * We report the size for 2M pages because AMD uses two
4400 * TLB entries for one 4M page.
4402 add_amd_tlb(devi
, "dtlb-2M",
4403 BITX(cp
->cp_eax
, 31, 24), BITX(cp
->cp_eax
, 23, 16));
4404 add_amd_tlb(devi
, "itlb-2M",
4405 BITX(cp
->cp_eax
, 15, 8), BITX(cp
->cp_eax
, 7, 0));
4408 * 4K L1 TLB configuration
4411 switch (cpi
->cpi_vendor
) {
4414 if (cpi
->cpi_family
>= 5) {
4416 * Crusoe processors have 256 TLB entries, but
4417 * cpuid data format constrains them to only
4418 * reporting 255 of them.
4420 if ((nentries
= BITX(cp
->cp_ebx
, 23, 16)) == 255)
4423 * Crusoe processors also have a unified TLB
4425 add_amd_tlb(devi
, "tlb-4K", BITX(cp
->cp_ebx
, 31, 24),
4431 add_amd_tlb(devi
, itlb4k_str
,
4432 BITX(cp
->cp_ebx
, 31, 24), BITX(cp
->cp_ebx
, 23, 16));
4433 add_amd_tlb(devi
, dtlb4k_str
,
4434 BITX(cp
->cp_ebx
, 15, 8), BITX(cp
->cp_ebx
, 7, 0));
4439 * data L1 cache configuration
4442 add_amd_cache(devi
, l1_dcache_str
,
4443 BITX(cp
->cp_ecx
, 31, 24), BITX(cp
->cp_ecx
, 23, 16),
4444 BITX(cp
->cp_ecx
, 15, 8), BITX(cp
->cp_ecx
, 7, 0));
4447 * code L1 cache configuration
4450 add_amd_cache(devi
, l1_icache_str
,
4451 BITX(cp
->cp_edx
, 31, 24), BITX(cp
->cp_edx
, 23, 16),
4452 BITX(cp
->cp_edx
, 15, 8), BITX(cp
->cp_edx
, 7, 0));
4454 if (cpi
->cpi_xmaxeax
< 0x80000006)
4456 cp
= &cpi
->cpi_extd
[6];
4458 /* Check for a unified L2 TLB for large pages */
4460 if (BITX(cp
->cp_eax
, 31, 16) == 0)
4461 add_amd_l2_tlb(devi
, "l2-tlb-2M",
4462 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4464 add_amd_l2_tlb(devi
, "l2-dtlb-2M",
4465 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4466 add_amd_l2_tlb(devi
, "l2-itlb-2M",
4467 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4470 /* Check for a unified L2 TLB for 4K pages */
4472 if (BITX(cp
->cp_ebx
, 31, 16) == 0) {
4473 add_amd_l2_tlb(devi
, "l2-tlb-4K",
4474 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4476 add_amd_l2_tlb(devi
, "l2-dtlb-4K",
4477 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4478 add_amd_l2_tlb(devi
, "l2-itlb-4K",
4479 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4482 add_amd_l2_cache(devi
, l2_cache_str
,
4483 BITX(cp
->cp_ecx
, 31, 16), BITX(cp
->cp_ecx
, 15, 12),
4484 BITX(cp
->cp_ecx
, 11, 8), BITX(cp
->cp_ecx
, 7, 0));
4488 * There are two basic ways that the x86 world describes it cache
4489 * and tlb architecture - Intel's way and AMD's way.
4491 * Return which flavor of cache architecture we should use
4494 x86_which_cacheinfo(struct cpuid_info
*cpi
)
4496 switch (cpi
->cpi_vendor
) {
4497 case X86_VENDOR_Intel
:
4498 if (cpi
->cpi_maxeax
>= 2)
4499 return (X86_VENDOR_Intel
);
4501 case X86_VENDOR_AMD
:
4503 * The K5 model 1 was the first part from AMD that reported
4504 * cache sizes via extended cpuid functions.
4506 if (cpi
->cpi_family
> 5 ||
4507 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
4508 return (X86_VENDOR_AMD
);
4511 if (cpi
->cpi_family
>= 5)
4512 return (X86_VENDOR_AMD
);
4516 * If they have extended CPU data for 0x80000005
4517 * then we assume they have AMD-format cache
4520 * If not, and the vendor happens to be Cyrix,
4521 * then try our-Cyrix specific handler.
4523 * If we're not Cyrix, then assume we're using Intel's
4524 * table-driven format instead.
4526 if (cpi
->cpi_xmaxeax
>= 0x80000005)
4527 return (X86_VENDOR_AMD
);
4528 else if (cpi
->cpi_vendor
== X86_VENDOR_Cyrix
)
4529 return (X86_VENDOR_Cyrix
);
4530 else if (cpi
->cpi_maxeax
>= 2)
4531 return (X86_VENDOR_Intel
);
4538 cpuid_set_cpu_properties(void *dip
, processorid_t cpu_id
,
4539 struct cpuid_info
*cpi
)
4541 dev_info_t
*cpu_devi
;
4544 cpu_devi
= (dev_info_t
*)dip
;
4547 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4548 "device_type", "cpu");
4551 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4554 /* cpu-mhz, and clock-frequency */
4558 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4559 "cpu-mhz", cpu_freq
);
4560 if ((mul
= cpu_freq
* 1000000LL) <= INT_MAX
)
4561 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4562 "clock-frequency", (int)mul
);
4565 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
)) {
4570 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4571 "vendor-id", cpi
->cpi_vendorstr
);
4573 if (cpi
->cpi_maxeax
== 0) {
4578 * family, model, and step
4580 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4581 "family", CPI_FAMILY(cpi
));
4582 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4583 "cpu-model", CPI_MODEL(cpi
));
4584 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4585 "stepping-id", CPI_STEP(cpi
));
4588 switch (cpi
->cpi_vendor
) {
4589 case X86_VENDOR_Intel
:
4597 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4598 "type", CPI_TYPE(cpi
));
4601 switch (cpi
->cpi_vendor
) {
4602 case X86_VENDOR_Intel
:
4603 case X86_VENDOR_AMD
:
4604 create
= cpi
->cpi_family
>= 0xf;
4611 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4612 "ext-family", CPI_FAMILY_XTD(cpi
));
4615 switch (cpi
->cpi_vendor
) {
4616 case X86_VENDOR_Intel
:
4617 create
= IS_EXTENDED_MODEL_INTEL(cpi
);
4619 case X86_VENDOR_AMD
:
4620 create
= CPI_FAMILY(cpi
) == 0xf;
4627 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4628 "ext-model", CPI_MODEL_XTD(cpi
));
4631 switch (cpi
->cpi_vendor
) {
4632 case X86_VENDOR_AMD
:
4634 * AMD K5 model 1 was the first part to support this
4636 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4643 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4644 "generation", BITX((cpi
)->cpi_extd
[1].cp_eax
, 11, 8));
4647 switch (cpi
->cpi_vendor
) {
4648 case X86_VENDOR_Intel
:
4650 * brand id first appeared on Pentium III Xeon model 8,
4651 * and Celeron model 8 processors and Opteron
4653 create
= cpi
->cpi_family
> 6 ||
4654 (cpi
->cpi_family
== 6 && cpi
->cpi_model
>= 8);
4656 case X86_VENDOR_AMD
:
4657 create
= cpi
->cpi_family
>= 0xf;
4663 if (create
&& cpi
->cpi_brandid
!= 0) {
4664 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4665 "brand-id", cpi
->cpi_brandid
);
4668 /* chunks, and apic-id */
4669 switch (cpi
->cpi_vendor
) {
4671 * first available on Pentium IV and Opteron (K8)
4673 case X86_VENDOR_Intel
:
4674 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4676 case X86_VENDOR_AMD
:
4677 create
= cpi
->cpi_family
>= 0xf;
4684 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4685 "chunks", CPI_CHUNKS(cpi
));
4686 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4687 "apic-id", cpi
->cpi_apicid
);
4688 if (cpi
->cpi_chipid
>= 0) {
4689 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4690 "chip#", cpi
->cpi_chipid
);
4691 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4692 "clog#", cpi
->cpi_clogid
);
4696 /* cpuid-features */
4697 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4698 "cpuid-features", CPI_FEATURES_EDX(cpi
));
4701 /* cpuid-features-ecx */
4702 switch (cpi
->cpi_vendor
) {
4703 case X86_VENDOR_Intel
:
4704 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4706 case X86_VENDOR_AMD
:
4707 create
= cpi
->cpi_family
>= 0xf;
4714 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4715 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi
));
4717 /* ext-cpuid-features */
4718 switch (cpi
->cpi_vendor
) {
4719 case X86_VENDOR_Intel
:
4720 case X86_VENDOR_AMD
:
4721 case X86_VENDOR_Cyrix
:
4723 case X86_VENDOR_Centaur
:
4724 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4731 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4732 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi
));
4733 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4734 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi
));
4738 * Brand String first appeared in Intel Pentium IV, AMD K5
4739 * model 1, and Cyrix GXm. On earlier models we try and
4740 * simulate something similar .. so this string should always
4741 * same -something- about the processor, however lame.
4743 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4744 "brand-string", cpi
->cpi_brandstr
);
4747 * Finally, cache and tlb information
4749 switch (x86_which_cacheinfo(cpi
)) {
4750 case X86_VENDOR_Intel
:
4751 intel_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4753 case X86_VENDOR_Cyrix
:
4754 cyrix_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4756 case X86_VENDOR_AMD
:
4757 amd_cache_info(cpi
, cpu_devi
);
4772 * A cacheinfo walker that fetches the size, line-size and associativity
4776 intel_l2cinfo(void *arg
, const struct cachetab
*ct
)
4778 struct l2info
*l2i
= arg
;
4781 if (ct
->ct_label
!= l2_cache_str
&&
4782 ct
->ct_label
!= sl2_cache_str
)
4783 return (0); /* not an L2 -- keep walking */
4785 if ((ip
= l2i
->l2i_csz
) != NULL
)
4787 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4788 *ip
= ct
->ct_line_size
;
4789 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4791 l2i
->l2i_ret
= ct
->ct_size
;
4792 return (1); /* was an L2 -- terminate walk */
4796 * AMD L2/L3 Cache and TLB Associativity Field Definition:
4798 * Unlike the associativity for the L1 cache and tlb where the 8 bit
4799 * value is the associativity, the associativity for the L2 cache and
4800 * tlb is encoded in the following table. The 4 bit L2 value serves as
4801 * an index into the amd_afd[] array to determine the associativity.
4802 * -1 is undefined. 0 is fully associative.
4805 static int amd_afd
[] =
4806 {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4809 amd_l2cacheinfo(struct cpuid_info
*cpi
, struct l2info
*l2i
)
4811 struct cpuid_regs
*cp
;
4816 if (cpi
->cpi_xmaxeax
< 0x80000006)
4818 cp
= &cpi
->cpi_extd
[6];
4820 if ((i
= BITX(cp
->cp_ecx
, 15, 12)) != 0 &&
4821 (size
= BITX(cp
->cp_ecx
, 31, 16)) != 0) {
4822 uint_t cachesz
= size
* 1024;
4825 ASSERT(assoc
!= -1);
4827 if ((ip
= l2i
->l2i_csz
) != NULL
)
4829 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4830 *ip
= BITX(cp
->cp_ecx
, 7, 0);
4831 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4833 l2i
->l2i_ret
= cachesz
;
4838 getl2cacheinfo(cpu_t
*cpu
, int *csz
, int *lsz
, int *assoc
)
4840 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
4841 struct l2info __l2info
, *l2i
= &__l2info
;
4845 l2i
->l2i_assoc
= assoc
;
4848 switch (x86_which_cacheinfo(cpi
)) {
4849 case X86_VENDOR_Intel
:
4850 intel_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4852 case X86_VENDOR_Cyrix
:
4853 cyrix_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4855 case X86_VENDOR_AMD
:
4856 amd_l2cacheinfo(cpi
, l2i
);
4861 return (l2i
->l2i_ret
);
4867 cpuid_mwait_alloc(cpu_t
*cpu
)
4872 ASSERT(cpuid_checkpass(CPU
, 2));
4874 mwait_size
= CPU
->cpu_m
.mcpu_cpi
->cpi_mwait
.mon_max
;
4875 if (mwait_size
== 0)
4879 * kmem_alloc() returns cache line size aligned data for mwait_size
4880 * allocations. mwait_size is currently cache line sized. Neither
4881 * of these implementation details are guarantied to be true in the
4884 * First try allocating mwait_size as kmem_alloc() currently returns
4885 * correctly aligned memory. If kmem_alloc() does not return
4886 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4888 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4889 * decide to free this memory.
4891 ret
= kmem_zalloc(mwait_size
, KM_SLEEP
);
4892 if (ret
== (uint32_t *)P2ROUNDUP((uintptr_t)ret
, mwait_size
)) {
4893 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= ret
;
4894 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= mwait_size
;
4895 *ret
= MWAIT_RUNNING
;
4898 kmem_free(ret
, mwait_size
);
4899 ret
= kmem_zalloc(mwait_size
* 2, KM_SLEEP
);
4900 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= ret
;
4901 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= mwait_size
* 2;
4902 ret
= (uint32_t *)P2ROUNDUP((uintptr_t)ret
, mwait_size
);
4903 *ret
= MWAIT_RUNNING
;
4909 cpuid_mwait_free(cpu_t
*cpu
)
4911 if (cpu
->cpu_m
.mcpu_cpi
== NULL
) {
4915 if (cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
!= NULL
&&
4916 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
> 0) {
4917 kmem_free(cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
,
4918 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
);
4921 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= NULL
;
4922 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= 0;
4926 patch_tsc_read(int flag
)
4932 cnt
= &_no_rdtsc_end
- &_no_rdtsc_start
;
4933 (void) memcpy((void *)tsc_read
, (void *)&_no_rdtsc_start
, cnt
);
4935 case TSC_RDTSC_MFENCE
:
4936 cnt
= &_tsc_mfence_end
- &_tsc_mfence_start
;
4937 (void) memcpy((void *)tsc_read
,
4938 (void *)&_tsc_mfence_start
, cnt
);
4940 case TSC_RDTSC_LFENCE
:
4941 cnt
= &_tsc_lfence_end
- &_tsc_lfence_start
;
4942 (void) memcpy((void *)tsc_read
,
4943 (void *)&_tsc_lfence_start
, cnt
);
4946 cnt
= &_tscp_end
- &_tscp_start
;
4947 (void) memcpy((void *)tsc_read
, (void *)&_tscp_start
, cnt
);
4950 /* Bail for unexpected TSC types. (TSC_NONE covers 0) */
4951 cmn_err(CE_PANIC
, "Unrecogized TSC type: %d", flag
);
4958 cpuid_deep_cstates_supported(void)
4960 struct cpuid_info
*cpi
;
4961 struct cpuid_regs regs
;
4963 ASSERT(cpuid_checkpass(CPU
, 1));
4965 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4967 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
))
4970 switch (cpi
->cpi_vendor
) {
4971 case X86_VENDOR_Intel
:
4972 if (cpi
->cpi_xmaxeax
< 0x80000007)
4976 * TSC run at a constant rate in all ACPI C-states?
4978 regs
.cp_eax
= 0x80000007;
4979 (void) __cpuid_insn(®s
);
4980 return (regs
.cp_edx
& CPUID_TSC_CSTATE_INVARIANCE
);
4990 post_startup_cpu_fixups(void)
4994 * Some AMD processors support C1E state. Entering this state will
4995 * cause the local APIC timer to stop, which we can't deal with at
4998 if (cpuid_getvendor(CPU
) == X86_VENDOR_AMD
) {
5002 if (!on_trap(&otd
, OT_DATA_ACCESS
)) {
5003 reg
= rdmsr(MSR_AMD_INT_PENDING_CMP_HALT
);
5004 /* Disable C1E state if it is enabled by BIOS */
5005 if ((reg
>> AMD_ACTONCMPHALT_SHIFT
) &
5006 AMD_ACTONCMPHALT_MASK
) {
5007 reg
&= ~(AMD_ACTONCMPHALT_MASK
<<
5008 AMD_ACTONCMPHALT_SHIFT
);
5009 wrmsr(MSR_AMD_INT_PENDING_CMP_HALT
, reg
);
5020 if (x86_use_pcid
== -1)
5021 x86_use_pcid
= is_x86_feature(x86_featureset
, X86FSET_PCID
);
5023 if (x86_use_invpcid
== -1) {
5024 x86_use_invpcid
= is_x86_feature(x86_featureset
,
5032 * Intel say that on setting PCIDE, it immediately starts using the PCID
5033 * bits; better make sure there's nothing there.
5035 ASSERT((getcr3() & MMU_PAGEOFFSET
) == PCID_NONE
);
5037 setcr4(getcr4() | CR4_PCIDE
);
5041 * Setup necessary registers to enable XSAVE feature on this processor.
5042 * This function needs to be called early enough, so that no xsave/xrstor
5043 * ops will execute on the processor before the MSRs are properly set up.
5045 * Current implementation has the following assumption:
5046 * - cpuid_pass1() is done, so that X86 features are known.
5047 * - fpu_probe() is done, so that fp_save_mech is chosen.
5050 xsave_setup_msr(cpu_t
*cpu
)
5052 ASSERT(fp_save_mech
== FP_XSAVE
);
5053 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
5055 /* Enable OSXSAVE in CR4. */
5056 setcr4(getcr4() | CR4_OSXSAVE
);
5058 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
5061 cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_ecx
|= CPUID_INTC_ECX_OSXSAVE
;
5066 * Starting with the Westmere processor the local
5067 * APIC timer will continue running in all C-states,
5068 * including the deepest C-states.
5071 cpuid_arat_supported(void)
5073 struct cpuid_info
*cpi
;
5074 struct cpuid_regs regs
;
5076 ASSERT(cpuid_checkpass(CPU
, 1));
5077 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
5079 cpi
= CPU
->cpu_m
.mcpu_cpi
;
5081 switch (cpi
->cpi_vendor
) {
5082 case X86_VENDOR_Intel
:
5084 * Always-running Local APIC Timer is
5085 * indicated by CPUID.6.EAX[2].
5087 if (cpi
->cpi_maxeax
>= 6) {
5089 (void) cpuid_insn(NULL
, ®s
);
5090 return (regs
.cp_eax
& CPUID_CSTATE_ARAT
);
5100 * Check support for Intel ENERGY_PERF_BIAS feature
5103 cpuid_iepb_supported(struct cpu
*cp
)
5105 struct cpuid_info
*cpi
= cp
->cpu_m
.mcpu_cpi
;
5106 struct cpuid_regs regs
;
5108 ASSERT(cpuid_checkpass(cp
, 1));
5110 if (!(is_x86_feature(x86_featureset
, X86FSET_CPUID
)) ||
5111 !(is_x86_feature(x86_featureset
, X86FSET_MSR
))) {
5116 * Intel ENERGY_PERF_BIAS MSR is indicated by
5117 * capability bit CPUID.6.ECX.3
5119 if ((cpi
->cpi_vendor
!= X86_VENDOR_Intel
) || (cpi
->cpi_maxeax
< 6))
5123 (void) cpuid_insn(NULL
, ®s
);
5124 return (regs
.cp_ecx
& CPUID_EPB_SUPPORT
);
5128 * Check support for TSC deadline timer
5130 * TSC deadline timer provides a superior software programming
5131 * model over local APIC timer that eliminates "time drifts".
5132 * Instead of specifying a relative time, software specifies an
5133 * absolute time as the target at which the processor should
5134 * generate a timer event.
5137 cpuid_deadline_tsc_supported(void)
5139 struct cpuid_info
*cpi
= CPU
->cpu_m
.mcpu_cpi
;
5140 struct cpuid_regs regs
;
5142 ASSERT(cpuid_checkpass(CPU
, 1));
5143 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
5145 switch (cpi
->cpi_vendor
) {
5146 case X86_VENDOR_Intel
:
5147 if (cpi
->cpi_maxeax
>= 1) {
5149 (void) cpuid_insn(NULL
, ®s
);
5150 return (regs
.cp_ecx
& CPUID_DEADLINE_TSC
);
5159 #if defined(__amd64) && !defined(__xpv)
5161 * Patch in versions of bcopy for high performance Intel Nhm processors
5165 patch_memops(uint_t vendor
)
5170 if ((vendor
== X86_VENDOR_Intel
) &&
5171 is_x86_feature(x86_featureset
, X86FSET_SSE4_2
)) {
5172 cnt
= &bcopy_patch_end
- &bcopy_patch_start
;
5173 to
= &bcopy_ck_size
;
5174 from
= &bcopy_patch_start
;
5175 for (i
= 0; i
< cnt
; i
++) {
5180 #endif /* __amd64 && !__xpv */
5183 * This function finds the number of bits to represent the number of cores per
5184 * chip and the number of strands per core for the Intel platforms.
5185 * It re-uses the x2APIC cpuid code of the cpuid_pass2().
5188 cpuid_get_ext_topo(uint_t vendor
, uint_t
*core_nbits
, uint_t
*strand_nbits
)
5190 struct cpuid_regs regs
;
5191 struct cpuid_regs
*cp
= ®s
;
5193 if (vendor
!= X86_VENDOR_Intel
) {
5197 /* if the cpuid level is 0xB, extended topo is available. */
5199 if (__cpuid_insn(cp
) >= 0xB) {
5202 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
5203 (void) __cpuid_insn(cp
);
5206 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
5207 * indicates that the extended topology enumeration leaf is
5211 uint_t coreid_shift
= 0;
5212 uint_t chipid_shift
= 0;
5216 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
5220 (void) __cpuid_insn(cp
);
5221 level
= CPI_CPU_LEVEL_TYPE(cp
);
5225 * Thread level processor topology
5226 * Number of bits shift right APIC ID
5227 * to get the coreid.
5229 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
5230 } else if (level
== 2) {
5232 * Core level processor topology
5233 * Number of bits shift right APIC ID
5234 * to get the chipid.
5236 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
5240 if (coreid_shift
> 0 && chipid_shift
> coreid_shift
) {
5241 *strand_nbits
= coreid_shift
;
5242 *core_nbits
= chipid_shift
- coreid_shift
;