4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
28 * Copyright (c) 2010, Intel Corporation.
29 * All rights reserved.
32 * Portions Copyright 2009 Advanced Micro Devices, Inc.
35 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
38 * Various routines to handle identification
39 * and classification of x86 processors.
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
62 #include <sys/hypervisor.h>
64 #include <sys/ontrap.h>
68 * Pass 0 of cpuid feature analysis happens in locore. It contains special code
69 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
70 * them accordingly. For most modern processors, feature detection occurs here
73 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
74 * for the boot CPU and does the basic analysis that the early kernel needs.
75 * x86_featureset is set based on the return value of cpuid_pass1() of the boot
80 * o Determining vendor/model/family/stepping and setting x86_type and
81 * x86_vendor accordingly.
82 * o Processing the feature flags returned by the cpuid instruction while
83 * applying any workarounds or tricks for the specific processor.
84 * o Mapping the feature flags into Solaris feature bits (X86_*).
85 * o Processing extended feature flags if supported by the processor,
86 * again while applying specific processor knowledge.
87 * o Determining the CMT characteristics of the system.
89 * Pass 1 is done on non-boot CPUs during their initialization and the results
90 * are used only as a meager attempt at ensuring that all processors within the
91 * system support the same features.
93 * Pass 2 of cpuid feature analysis happens just at the beginning
94 * of startup(). It just copies in and corrects the remainder
95 * of the cpuid data we depend on: standard cpuid functions that we didn't
96 * need for pass1 feature analysis, and extended cpuid functions beyond the
97 * simple feature processing done in pass1.
99 * Pass 3 of cpuid analysis is invoked after basic kernel services; in
100 * particular kernel memory allocation has been made available. It creates a
101 * readable brand string based on the data collected in the first two passes.
103 * Pass 4 of cpuid analysis is invoked after post_startup() when all
104 * the support infrastructure for various hardware features has been
105 * initialized. It determines which processor features will be reported
106 * to userland via the aux vector.
108 * All passes are executed on all CPUs, but only the boot CPU determines what
109 * features the kernel will use.
111 * Much of the worst junk in this file is for the support of processors
112 * that didn't really implement the cpuid instruction properly.
114 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
115 * the pass numbers. Accordingly, changes to the pass code may require changes
116 * to the accessor code.
119 uint_t x86_vendor
= X86_VENDOR_IntelClone
;
120 uint_t x86_type
= X86_TYPE_OTHER
;
121 uint_t x86_clflush_size
= 0;
123 uint_t pentiumpro_bug4046376
;
125 uchar_t x86_featureset
[BT_SIZEOFMAP(NUM_X86_FEATURES
)];
127 static char *x86_feature_names
[NUM_X86_FEATURES
] = {
176 is_x86_feature(void *featureset
, uint_t feature
)
178 ASSERT(feature
< NUM_X86_FEATURES
);
179 return (BT_TEST((ulong_t
*)featureset
, feature
));
183 add_x86_feature(void *featureset
, uint_t feature
)
185 ASSERT(feature
< NUM_X86_FEATURES
);
186 BT_SET((ulong_t
*)featureset
, feature
);
190 remove_x86_feature(void *featureset
, uint_t feature
)
192 ASSERT(feature
< NUM_X86_FEATURES
);
193 BT_CLEAR((ulong_t
*)featureset
, feature
);
197 compare_x86_featureset(void *setA
, void *setB
)
200 * We assume that the unused bits of the bitmap are always zero.
202 if (memcmp(setA
, setB
, BT_SIZEOFMAP(NUM_X86_FEATURES
)) == 0) {
210 print_x86_featureset(void *featureset
)
214 for (i
= 0; i
< NUM_X86_FEATURES
; i
++) {
215 if (is_x86_feature(featureset
, i
)) {
216 cmn_err(CE_CONT
, "?x86_feature: %s\n",
217 x86_feature_names
[i
]);
222 static size_t xsave_state_size
= 0;
223 uint64_t xsave_bv_all
= (XFEATURE_LEGACY_FP
| XFEATURE_SSE
);
224 boolean_t xsave_force_disable
= B_FALSE
;
227 * This is set to platform type we are running on.
229 static int platform_type
= -1;
233 * Variable to patch if hypervisor platform detection needs to be
234 * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
236 int enable_platform_detection
= 1;
240 * monitor/mwait info.
242 * size_actual and buf_actual are the real address and size allocated to get
243 * proper mwait_buf alignement. buf_actual and size_actual should be passed
244 * to kmem_free(). Currently kmem_alloc() and mwait happen to both use
245 * processor cache-line alignment, but this is not guarantied in the furture.
248 size_t mon_min
; /* min size to avoid missed wakeups */
249 size_t mon_max
; /* size to avoid false wakeups */
250 size_t size_actual
; /* size actually allocated */
251 void *buf_actual
; /* memory actually allocated */
252 uint32_t support
; /* processor support of monitor/mwait */
256 * xsave/xrestor info.
258 * This structure contains HW feature bits and size of the xsave save area.
259 * Note: the kernel will use the maximum size required for all hardware
260 * features. It is not optimize for potential memory savings if features at
261 * the end of the save area are not enabled.
264 uint32_t xsav_hw_features_low
; /* Supported HW features */
265 uint32_t xsav_hw_features_high
; /* Supported HW features */
266 size_t xsav_max_size
; /* max size save area for HW features */
267 size_t ymm_size
; /* AVX: size of ymm save area */
268 size_t ymm_offset
; /* AVX: offset for ymm save area */
273 * These constants determine how many of the elements of the
274 * cpuid we cache in the cpuid_info data structure; the
275 * remaining elements are accessible via the cpuid instruction.
278 #define NMAX_CPI_STD 8 /* eax = 0 .. 7 */
279 #define NMAX_CPI_EXTD 0x1f /* eax = 0x80000000 .. 0x8000001e */
282 * Some terminology needs to be explained:
283 * - Socket: Something that can be plugged into a motherboard.
284 * - Package: Same as socket
285 * - Chip: Same as socket. Note that AMD's documentation uses term "chip"
286 * differently: there, chip is the same as processor node (below)
287 * - Processor node: Some AMD processors have more than one
288 * "subprocessor" embedded in a package. These subprocessors (nodes)
289 * are fully-functional processors themselves with cores, caches,
290 * memory controllers, PCI configuration spaces. They are connected
291 * inside the package with Hypertransport links. On single-node
292 * processors, processor node is equivalent to chip/socket/package.
293 * - Compute Unit: Some AMD processors pair cores in "compute units" that
294 * share the FPU and the I$ and L2 caches.
298 uint_t cpi_pass
; /* last pass completed */
300 * standard function information
302 uint_t cpi_maxeax
; /* fn 0: %eax */
303 char cpi_vendorstr
[13]; /* fn 0: %ebx:%ecx:%edx */
304 uint_t cpi_vendor
; /* enum of cpi_vendorstr */
306 uint_t cpi_family
; /* fn 1: extended family */
307 uint_t cpi_model
; /* fn 1: extended model */
308 uint_t cpi_step
; /* fn 1: stepping */
309 chipid_t cpi_chipid
; /* fn 1: %ebx: Intel: chip # */
310 /* AMD: package/socket # */
311 uint_t cpi_brandid
; /* fn 1: %ebx: brand ID */
312 int cpi_clogid
; /* fn 1: %ebx: thread # */
313 uint_t cpi_ncpu_per_chip
; /* fn 1: %ebx: logical cpu count */
314 uint8_t cpi_cacheinfo
[16]; /* fn 2: intel-style cache desc */
315 uint_t cpi_ncache
; /* fn 2: number of elements */
316 uint_t cpi_ncpu_shr_last_cache
; /* fn 4: %eax: ncpus sharing cache */
317 id_t cpi_last_lvl_cacheid
; /* fn 4: %eax: derived cache id */
318 uint_t cpi_std_4_size
; /* fn 4: number of fn 4 elements */
319 struct cpuid_regs
**cpi_std_4
; /* fn 4: %ecx == 0 .. fn4_size */
320 struct cpuid_regs cpi_std
[NMAX_CPI_STD
]; /* 0 .. 7 */
322 * extended function information
324 uint_t cpi_xmaxeax
; /* fn 0x80000000: %eax */
325 char cpi_brandstr
[49]; /* fn 0x8000000[234] */
326 uint8_t cpi_pabits
; /* fn 0x80000006: %eax */
327 uint8_t cpi_vabits
; /* fn 0x80000006: %eax */
328 struct cpuid_regs cpi_extd
[NMAX_CPI_EXTD
]; /* 0x800000XX */
330 id_t cpi_coreid
; /* same coreid => strands share core */
331 int cpi_pkgcoreid
; /* core number within single package */
332 uint_t cpi_ncore_per_chip
; /* AMD: fn 0x80000008: %ecx[7-0] */
333 /* Intel: fn 4: %eax[31-26] */
335 * supported feature information
337 uint32_t cpi_support
[6];
338 #define STD_EDX_FEATURES 0
339 #define AMD_EDX_FEATURES 1
340 #define TM_EDX_FEATURES 2
341 #define STD_ECX_FEATURES 3
342 #define AMD_ECX_FEATURES 4
343 #define STD_EBX_FEATURES 5
345 * Synthesized information, where known.
347 uint32_t cpi_chiprev
; /* See X86_CHIPREV_* in x86_archext.h */
348 const char *cpi_chiprevstr
; /* May be NULL if chiprev unknown */
349 uint32_t cpi_socket
; /* Chip package/socket type */
351 struct mwait_info cpi_mwait
; /* fn 5: monitor/mwait info */
353 uint_t cpi_procnodeid
; /* AMD: nodeID on HT, Intel: chipid */
354 uint_t cpi_procnodes_per_pkg
; /* AMD: # of nodes in the package */
356 uint_t cpi_compunitid
; /* AMD: ComputeUnit ID, Intel: coreid */
357 uint_t cpi_cores_per_compunit
; /* AMD: # of cores in the ComputeUnit */
359 struct xsave_info cpi_xsave
; /* fn D: xsave/xrestor info */
363 static struct cpuid_info cpuid_info0
;
366 * These bit fields are defined by the Intel Application Note AP-485
367 * "Intel Processor Identification and the CPUID Instruction"
369 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
370 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
371 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
372 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
373 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
374 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
376 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
377 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
378 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
379 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
380 #define CPI_FEATURES_7_0_EBX(cpi) ((cpi)->cpi_std[7].cp_ebx)
382 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
383 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
384 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
385 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
387 #define CPI_MAXEAX_MAX 0x100 /* sanity control */
388 #define CPI_XMAXEAX_MAX 0x80000100
389 #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */
390 #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */
393 * Function 4 (Deterministic Cache Parameters) macros
394 * Defined by Intel Application Note AP-485
396 #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26)
397 #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14)
398 #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9)
399 #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8)
400 #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5)
401 #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0)
402 #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8)
404 #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22)
405 #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12)
406 #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0)
408 #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0)
410 #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0)
414 * A couple of shorthand macros to identify "later" P6-family chips
415 * like the Pentium M and Core. First, the "older" P6-based stuff
416 * (loosely defined as "pre-Pentium-4"):
417 * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
420 #define IS_LEGACY_P6(cpi) ( \
421 cpi->cpi_family == 6 && \
422 (cpi->cpi_model == 1 || \
423 cpi->cpi_model == 3 || \
424 cpi->cpi_model == 5 || \
425 cpi->cpi_model == 6 || \
426 cpi->cpi_model == 7 || \
427 cpi->cpi_model == 8 || \
428 cpi->cpi_model == 0xA || \
429 cpi->cpi_model == 0xB) \
432 /* A "new F6" is everything with family 6 that's not the above */
433 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
435 /* Extended family/model support */
436 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
437 cpi->cpi_family >= 0xf)
440 * Info for monitor/mwait idle loop.
442 * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
443 * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
445 * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
446 * Documentation Updates" #33633, Rev 2.05, December 2006.
448 #define MWAIT_SUPPORT (0x00000001) /* mwait supported */
449 #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */
450 #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */
451 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
452 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
453 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
454 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
455 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
457 * Number of sub-cstates for a given c-state.
459 #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \
460 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
463 * XSAVE leaf 0xD enumeration
465 #define CPUID_LEAFD_2_YMM_OFFSET 576
466 #define CPUID_LEAFD_2_YMM_SIZE 256
469 * Functions we consune from cpuid_subr.c; don't publish these in a header
470 * file to try and keep people using the expected cpuid_* interfaces.
472 extern uint32_t _cpuid_skt(uint_t
, uint_t
, uint_t
, uint_t
);
473 extern const char *_cpuid_sktstr(uint_t
, uint_t
, uint_t
, uint_t
);
474 extern uint32_t _cpuid_chiprev(uint_t
, uint_t
, uint_t
, uint_t
);
475 extern const char *_cpuid_chiprevstr(uint_t
, uint_t
, uint_t
, uint_t
);
476 extern uint_t
_cpuid_vendorstr_to_vendorcode(char *);
479 * Apply up various platform-dependent restrictions where the
480 * underlying platform restrictions mean the CPU can be marked
481 * as less capable than its cpuid instruction would imply.
485 platform_cpuid_mangle(uint_t vendor
, uint32_t eax
, struct cpuid_regs
*cp
)
489 uint32_t mcamask
= DOMAIN_IS_INITDOMAIN(xen_info
) ?
490 0 : CPUID_INTC_EDX_MCA
;
494 CPUID_INTC_EDX_VME
| CPUID_INTC_EDX_DE
|
495 CPUID_INTC_EDX_SEP
| CPUID_INTC_EDX_MTRR
|
496 CPUID_INTC_EDX_PGE
| CPUID_INTC_EDX_PAT
|
497 CPUID_AMD_EDX_SYSC
| CPUID_INTC_EDX_SEP
|
498 CPUID_INTC_EDX_PSE36
| CPUID_INTC_EDX_HTT
);
504 ~(CPUID_AMD_EDX_PSE
|
505 CPUID_INTC_EDX_VME
| CPUID_INTC_EDX_DE
|
506 CPUID_AMD_EDX_MTRR
| CPUID_AMD_EDX_PGE
|
507 CPUID_AMD_EDX_PAT
| CPUID_AMD_EDX_PSE36
|
508 CPUID_AMD_EDX_SYSC
| CPUID_INTC_EDX_SEP
|
510 cp
->cp_ecx
&= ~CPUID_AMD_ECX_CMP_LGCY
;
517 case X86_VENDOR_Intel
:
521 * Zero out the (ncores-per-chip - 1) field
523 cp
->cp_eax
&= 0x03fffffff;
533 cp
->cp_ecx
&= ~CPUID_AMD_ECX_CR8D
;
538 * Zero out the (ncores-per-chip - 1) field
540 cp
->cp_ecx
&= 0xffffff00;
551 #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */
555 * Some undocumented ways of patching the results of the cpuid
556 * instruction to permit running Solaris 10 on future cpus that
557 * we don't currently support. Could be set to non-zero values
558 * via settings in eeprom.
561 uint32_t cpuid_feature_ecx_include
;
562 uint32_t cpuid_feature_ecx_exclude
;
563 uint32_t cpuid_feature_edx_include
;
564 uint32_t cpuid_feature_edx_exclude
;
567 * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
570 cpuid_alloc_space(cpu_t
*cpu
)
573 * By convention, cpu0 is the boot cpu, which is set up
574 * before memory allocation is available. All other cpus get
575 * their cpuid_info struct allocated here.
577 ASSERT(cpu
->cpu_id
!= 0);
578 ASSERT(cpu
->cpu_m
.mcpu_cpi
== NULL
);
579 cpu
->cpu_m
.mcpu_cpi
=
580 kmem_zalloc(sizeof (*cpu
->cpu_m
.mcpu_cpi
), KM_SLEEP
);
584 cpuid_free_space(cpu_t
*cpu
)
586 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
590 ASSERT(cpi
!= &cpuid_info0
);
593 * Free up any function 4 related dynamic storage
595 for (i
= 1; i
< cpi
->cpi_std_4_size
; i
++)
596 kmem_free(cpi
->cpi_std_4
[i
], sizeof (struct cpuid_regs
));
597 if (cpi
->cpi_std_4_size
> 0)
598 kmem_free(cpi
->cpi_std_4
,
599 cpi
->cpi_std_4_size
* sizeof (struct cpuid_regs
*));
601 kmem_free(cpi
, sizeof (*cpi
));
602 cpu
->cpu_m
.mcpu_cpi
= NULL
;
607 * Determine the type of the underlying platform. This is used to customize
608 * initialization of various subsystems (e.g. TSC). determine_platform() must
609 * only ever be called once to prevent two processors from seeing different
610 * values of platform_type. Must be called before cpuid_pass1(), the earliest
611 * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
614 determine_platform(void)
616 struct cpuid_regs cp
;
619 char *hvstr
= (char *)regs
;
621 ASSERT(platform_type
== -1);
623 platform_type
= HW_NATIVE
;
625 if (!enable_platform_detection
)
629 * If Hypervisor CPUID bit is set, try to determine hypervisor
630 * vendor signature, and set platform type accordingly.
633 * http://lkml.org/lkml/2008/10/1/246
634 * http://kb.vmware.com/kb/1009458
637 (void) __cpuid_insn(&cp
);
638 if ((cp
.cp_ecx
& CPUID_INTC_ECX_HV
) != 0) {
639 cp
.cp_eax
= 0x40000000;
640 (void) __cpuid_insn(&cp
);
645 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0) {
646 platform_type
= HW_XEN_HVM
;
649 if (strcmp(hvstr
, HVSIG_VMWARE
) == 0) {
650 platform_type
= HW_VMWARE
;
653 if (strcmp(hvstr
, HVSIG_KVM
) == 0) {
654 platform_type
= HW_KVM
;
657 if (strcmp(hvstr
, HVSIG_MICROSOFT
) == 0)
658 platform_type
= HW_MICROSOFT
;
661 * Check older VMware hardware versions. VMware hypervisor is
662 * detected by performing an IN operation to VMware hypervisor
663 * port and checking that value returned in %ebx is VMware
664 * hypervisor magic value.
666 * References: http://kb.vmware.com/kb/1009458
668 vmware_port(VMWARE_HVCMD_GETVERSION
, regs
);
669 if (regs
[1] == VMWARE_HVMAGIC
) {
670 platform_type
= HW_VMWARE
;
676 * Check Xen hypervisor. In a fully virtualized domain,
677 * Xen's pseudo-cpuid function returns a string representing the
678 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
679 * supported cpuid function. We need at least a (base + 2) leaf value
680 * to do what we want to do. Try different base values, since the
681 * hypervisor might use a different one depending on whether Hyper-V
682 * emulation is switched on by default or not.
684 for (base
= 0x40000000; base
< 0x40010000; base
+= 0x100) {
686 (void) __cpuid_insn(&cp
);
691 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0 &&
692 cp
.cp_eax
>= (base
+ 2)) {
693 platform_type
&= ~HW_NATIVE
;
694 platform_type
|= HW_XEN_HVM
;
703 ASSERT(platform_type
!= -1);
704 return (platform_type
);
724 return (DOMAIN_IS_INITDOMAIN(xen_info
));
730 cpuid_intel_getids(cpu_t
*cpu
, void *feature
)
733 uint_t chipid_shift
= 0;
734 uint_t coreid_shift
= 0;
735 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
737 for (i
= 1; i
< cpi
->cpi_ncpu_per_chip
; i
<<= 1)
740 cpi
->cpi_chipid
= cpi
->cpi_apicid
>> chipid_shift
;
741 cpi
->cpi_clogid
= cpi
->cpi_apicid
& ((1 << chipid_shift
) - 1);
743 if (is_x86_feature(feature
, X86FSET_CMP
)) {
745 * Multi-core (and possibly multi-threaded)
748 uint_t ncpu_per_core
;
749 if (cpi
->cpi_ncore_per_chip
== 1)
750 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
;
751 else if (cpi
->cpi_ncore_per_chip
> 1)
752 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
/
753 cpi
->cpi_ncore_per_chip
;
755 * 8bit APIC IDs on dual core Pentiums
758 * +-----------------------+------+------+
759 * | Physical Package ID | MC | HT |
760 * +-----------------------+------+------+
761 * <------- chipid -------->
762 * <------- coreid --------------->
767 * Where the number of bits necessary to
768 * represent MC and HT fields together equals
769 * to the minimum number of bits necessary to
770 * store the value of cpi->cpi_ncpu_per_chip.
771 * Of those bits, the MC part uses the number
772 * of bits necessary to store the value of
773 * cpi->cpi_ncore_per_chip.
775 for (i
= 1; i
< ncpu_per_core
; i
<<= 1)
777 cpi
->cpi_coreid
= cpi
->cpi_apicid
>> coreid_shift
;
778 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
779 } else if (is_x86_feature(feature
, X86FSET_HTT
)) {
781 * Single-core multi-threaded processors.
783 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
784 cpi
->cpi_pkgcoreid
= 0;
786 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
787 cpi
->cpi_compunitid
= cpi
->cpi_coreid
;
791 cpuid_amd_getids(cpu_t
*cpu
)
793 int i
, first_half
, coreidsz
;
794 uint32_t nb_caps_reg
;
796 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
797 struct cpuid_regs
*cp
;
800 * AMD CMP chips currently have a single thread per core.
802 * Since no two cpus share a core we must assign a distinct coreid
803 * per cpu, and we do this by using the cpu_id. This scheme does not,
804 * however, guarantee that sibling cores of a chip will have sequential
805 * coreids starting at a multiple of the number of cores per chip -
806 * that is usually the case, but if the ACPI MADT table is presented
807 * in a different order then we need to perform a few more gymnastics
810 * All processors in the system have the same number of enabled
811 * cores. Cores within a processor are always numbered sequentially
812 * from 0 regardless of how many or which are disabled, and there
813 * is no way for operating system to discover the real core id when some
816 * In family 0x15, the cores come in pairs called compute units. They
817 * share I$ and L2 caches and the FPU. Enumeration of this feature is
818 * simplified by the new topology extensions CPUID leaf, indicated by
819 * the X86 feature X86FSET_TOPOEXT.
822 cpi
->cpi_coreid
= cpu
->cpu_id
;
823 cpi
->cpi_compunitid
= cpu
->cpu_id
;
825 if (cpi
->cpi_xmaxeax
>= 0x80000008) {
827 coreidsz
= BITX((cpi
)->cpi_extd
[8].cp_ecx
, 15, 12);
830 * In AMD parlance chip is really a node while Solaris
831 * sees chip as equivalent to socket/package.
833 cpi
->cpi_ncore_per_chip
=
834 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
836 /* Use legacy method */
837 for (i
= 1; i
< cpi
->cpi_ncore_per_chip
; i
<<= 1)
843 /* Assume single-core part */
844 cpi
->cpi_ncore_per_chip
= 1;
848 cpi
->cpi_clogid
= cpi
->cpi_pkgcoreid
=
849 cpi
->cpi_apicid
& ((1<<coreidsz
) - 1);
850 cpi
->cpi_ncpu_per_chip
= cpi
->cpi_ncore_per_chip
;
852 /* Get node ID, compute unit ID */
853 if (is_x86_feature(x86_featureset
, X86FSET_TOPOEXT
) &&
854 cpi
->cpi_xmaxeax
>= 0x8000001e) {
855 cp
= &cpi
->cpi_extd
[0x1e];
856 cp
->cp_eax
= 0x8000001e;
857 (void) __cpuid_insn(cp
);
859 cpi
->cpi_procnodes_per_pkg
= BITX(cp
->cp_ecx
, 10, 8) + 1;
860 cpi
->cpi_procnodeid
= BITX(cp
->cp_ecx
, 7, 0);
861 cpi
->cpi_cores_per_compunit
= BITX(cp
->cp_ebx
, 15, 8) + 1;
862 cpi
->cpi_compunitid
= BITX(cp
->cp_ebx
, 7, 0)
863 + (cpi
->cpi_ncore_per_chip
/ cpi
->cpi_cores_per_compunit
)
864 * (cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
);
865 } else if (cpi
->cpi_family
== 0xf || cpi
->cpi_family
>= 0x11) {
866 cpi
->cpi_procnodeid
= (cpi
->cpi_apicid
>> coreidsz
) & 7;
867 } else if (cpi
->cpi_family
== 0x10) {
869 * See if we are a multi-node processor.
870 * All processors in the system have the same number of nodes
872 nb_caps_reg
= pci_getl_func(0, 24, 3, 0xe8);
873 if ((cpi
->cpi_model
< 8) || BITX(nb_caps_reg
, 29, 29) == 0) {
875 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 5,
880 * Multi-node revision D (2 nodes per package
883 cpi
->cpi_procnodes_per_pkg
= 2;
885 first_half
= (cpi
->cpi_pkgcoreid
<=
886 (cpi
->cpi_ncore_per_chip
/2 - 1));
888 if (cpi
->cpi_apicid
== cpi
->cpi_pkgcoreid
) {
890 cpi
->cpi_procnodeid
= (first_half
? 0 : 1);
894 /* NodeId[2:1] bits to use for reading F3xe8 */
895 node2_1
= BITX(cpi
->cpi_apicid
, 5, 4) << 1;
898 pci_getl_func(0, 24 + node2_1
, 3, 0xe8);
901 * Check IntNodeNum bit (31:30, but bit 31 is
902 * always 0 on dual-node processors)
904 if (BITX(nb_caps_reg
, 30, 30) == 0)
905 cpi
->cpi_procnodeid
= node2_1
+
908 cpi
->cpi_procnodeid
= node2_1
+
913 cpi
->cpi_procnodeid
= 0;
917 cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
;
921 * Setup XFeature_Enabled_Mask register. Required by xsave feature.
926 uint64_t flags
= XFEATURE_LEGACY_FP
;
928 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
930 if (is_x86_feature(x86_featureset
, X86FSET_SSE
))
931 flags
|= XFEATURE_SSE
;
933 if (is_x86_feature(x86_featureset
, X86FSET_AVX
))
934 flags
|= XFEATURE_AVX
;
936 set_xcr(XFEATURE_ENABLED_MASK
, flags
);
938 xsave_bv_all
= flags
;
942 cpuid_pass1(cpu_t
*cpu
, uchar_t
*featureset
)
944 uint32_t mask_ecx
, mask_edx
;
945 struct cpuid_info
*cpi
;
946 struct cpuid_regs
*cp
;
949 extern int idle_cpu_prefer_mwait
;
953 * Space statically allocated for BSP, ensure pointer is set
955 if (cpu
->cpu_id
== 0) {
956 if (cpu
->cpu_m
.mcpu_cpi
== NULL
)
957 cpu
->cpu_m
.mcpu_cpi
= &cpuid_info0
;
960 add_x86_feature(featureset
, X86FSET_CPUID
);
962 cpi
= cpu
->cpu_m
.mcpu_cpi
;
964 cp
= &cpi
->cpi_std
[0];
966 cpi
->cpi_maxeax
= __cpuid_insn(cp
);
968 uint32_t *iptr
= (uint32_t *)cpi
->cpi_vendorstr
;
969 *iptr
++ = cp
->cp_ebx
;
970 *iptr
++ = cp
->cp_edx
;
971 *iptr
++ = cp
->cp_ecx
;
972 *(char *)&cpi
->cpi_vendorstr
[12] = '\0';
975 cpi
->cpi_vendor
= _cpuid_vendorstr_to_vendorcode(cpi
->cpi_vendorstr
);
976 x86_vendor
= cpi
->cpi_vendor
; /* for compatibility */
979 * Limit the range in case of weird hardware
981 if (cpi
->cpi_maxeax
> CPI_MAXEAX_MAX
)
982 cpi
->cpi_maxeax
= CPI_MAXEAX_MAX
;
983 if (cpi
->cpi_maxeax
< 1)
986 cp
= &cpi
->cpi_std
[1];
988 (void) __cpuid_insn(cp
);
991 * Extract identifying constants for easy access.
993 cpi
->cpi_model
= CPI_MODEL(cpi
);
994 cpi
->cpi_family
= CPI_FAMILY(cpi
);
996 if (cpi
->cpi_family
== 0xf)
997 cpi
->cpi_family
+= CPI_FAMILY_XTD(cpi
);
1000 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
1001 * Intel, and presumably everyone else, uses model == 0xf, as
1002 * one would expect (max value means possible overflow). Sigh.
1005 switch (cpi
->cpi_vendor
) {
1006 case X86_VENDOR_Intel
:
1007 if (IS_EXTENDED_MODEL_INTEL(cpi
))
1008 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1010 case X86_VENDOR_AMD
:
1011 if (CPI_FAMILY(cpi
) == 0xf)
1012 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1015 if (cpi
->cpi_model
== 0xf)
1016 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
1020 cpi
->cpi_step
= CPI_STEP(cpi
);
1021 cpi
->cpi_brandid
= CPI_BRANDID(cpi
);
1024 * *default* assumptions:
1025 * - believe %edx feature word
1026 * - ignore %ecx feature word
1027 * - 32-bit virtual and physical addressing
1029 mask_edx
= 0xffffffff;
1032 cpi
->cpi_pabits
= cpi
->cpi_vabits
= 32;
1034 switch (cpi
->cpi_vendor
) {
1035 case X86_VENDOR_Intel
:
1036 if (cpi
->cpi_family
== 5)
1037 x86_type
= X86_TYPE_P5
;
1038 else if (IS_LEGACY_P6(cpi
)) {
1039 x86_type
= X86_TYPE_P6
;
1040 pentiumpro_bug4046376
= 1;
1042 * Clear the SEP bit when it was set erroneously
1044 if (cpi
->cpi_model
< 3 && cpi
->cpi_step
< 3)
1045 cp
->cp_edx
&= ~CPUID_INTC_EDX_SEP
;
1046 } else if (IS_NEW_F6(cpi
) || cpi
->cpi_family
== 0xf) {
1047 x86_type
= X86_TYPE_P4
;
1049 * We don't currently depend on any of the %ecx
1050 * features until Prescott, so we'll only check
1051 * this from P4 onwards. We might want to revisit
1054 mask_ecx
= 0xffffffff;
1055 } else if (cpi
->cpi_family
> 0xf)
1056 mask_ecx
= 0xffffffff;
1058 * We don't support MONITOR/MWAIT if leaf 5 is not available
1059 * to obtain the monitor linesize.
1061 if (cpi
->cpi_maxeax
< 5)
1062 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1064 case X86_VENDOR_IntelClone
:
1067 case X86_VENDOR_AMD
:
1068 #if defined(OPTERON_ERRATUM_108)
1069 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 0xe) {
1070 cp
->cp_eax
= (0xf0f & cp
->cp_eax
) | 0xc0;
1071 cpi
->cpi_model
= 0xc;
1074 if (cpi
->cpi_family
== 5) {
1078 * These CPUs have an incomplete implementation
1079 * of MCA/MCE which we mask away.
1081 mask_edx
&= ~(CPUID_INTC_EDX_MCE
| CPUID_INTC_EDX_MCA
);
1084 * Model 0 uses the wrong (APIC) bit
1085 * to indicate PGE. Fix it here.
1087 if (cpi
->cpi_model
== 0) {
1088 if (cp
->cp_edx
& 0x200) {
1089 cp
->cp_edx
&= ~0x200;
1090 cp
->cp_edx
|= CPUID_INTC_EDX_PGE
;
1095 * Early models had problems w/ MMX; disable.
1097 if (cpi
->cpi_model
< 6)
1098 mask_edx
&= ~CPUID_INTC_EDX_MMX
;
1102 * For newer families, SSE3 and CX16, at least, are valid;
1105 if (cpi
->cpi_family
>= 0xf)
1106 mask_ecx
= 0xffffffff;
1108 * We don't support MONITOR/MWAIT if leaf 5 is not available
1109 * to obtain the monitor linesize.
1111 if (cpi
->cpi_maxeax
< 5)
1112 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1116 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1117 * processors. AMD does not intend MWAIT to be used in the cpu
1118 * idle loop on current and future processors. 10h and future
1119 * AMD processors use more power in MWAIT than HLT.
1120 * Pre-family-10h Opterons do not have the MWAIT instruction.
1122 idle_cpu_prefer_mwait
= 0;
1128 * workaround the NT workaround in CMS 4.1
1130 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4 &&
1131 (cpi
->cpi_step
== 2 || cpi
->cpi_step
== 3))
1132 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1134 case X86_VENDOR_Centaur
:
1136 * workaround the NT workarounds again
1138 if (cpi
->cpi_family
== 6)
1139 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1141 case X86_VENDOR_Cyrix
:
1143 * We rely heavily on the probing in locore
1144 * to actually figure out what parts, if any,
1145 * of the Cyrix cpuid instruction to believe.
1148 case X86_TYPE_CYRIX_486
:
1151 case X86_TYPE_CYRIX_6x86
:
1154 case X86_TYPE_CYRIX_6x86L
:
1159 case X86_TYPE_CYRIX_6x86MX
:
1162 CPUID_INTC_EDX_MSR
|
1163 CPUID_INTC_EDX_CX8
|
1164 CPUID_INTC_EDX_PGE
|
1165 CPUID_INTC_EDX_CMOV
|
1168 case X86_TYPE_CYRIX_GXm
:
1170 CPUID_INTC_EDX_MSR
|
1171 CPUID_INTC_EDX_CX8
|
1172 CPUID_INTC_EDX_CMOV
|
1175 case X86_TYPE_CYRIX_MediaGX
:
1177 case X86_TYPE_CYRIX_MII
:
1178 case X86_TYPE_VIA_CYRIX_III
:
1181 CPUID_INTC_EDX_TSC
|
1182 CPUID_INTC_EDX_MSR
|
1183 CPUID_INTC_EDX_CX8
|
1184 CPUID_INTC_EDX_PGE
|
1185 CPUID_INTC_EDX_CMOV
|
1196 * Do not support MONITOR/MWAIT under a hypervisor
1198 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1200 * Do not support XSAVE under a hypervisor for now
1202 xsave_force_disable
= B_TRUE
;
1206 if (xsave_force_disable
) {
1207 mask_ecx
&= ~CPUID_INTC_ECX_XSAVE
;
1208 mask_ecx
&= ~CPUID_INTC_ECX_AVX
;
1209 mask_ecx
&= ~CPUID_INTC_ECX_F16C
;
1210 mask_ecx
&= ~CPUID_INTC_ECX_FMA
;
1214 * Now we've figured out the masks that determine
1215 * which bits we choose to believe, apply the masks
1216 * to the feature words, then map the kernel's view
1217 * of these feature words into its feature word.
1219 cp
->cp_edx
&= mask_edx
;
1220 cp
->cp_ecx
&= mask_ecx
;
1223 * apply any platform restrictions (we don't call this
1224 * immediately after __cpuid_insn here, because we need the
1225 * workarounds applied above first)
1227 platform_cpuid_mangle(cpi
->cpi_vendor
, 1, cp
);
1230 * In addition to ecx and edx, Intel is storing a bunch of instruction
1231 * set extensions in leaf 7's ebx.
1233 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
&& cpi
->cpi_maxeax
>= 7) {
1234 struct cpuid_regs
*ecp
;
1235 ecp
= &cpi
->cpi_std
[7];
1238 (void) __cpuid_insn(ecp
);
1240 * If XSAVE has been disabled, just ignore all of the AVX
1241 * dependent flags here.
1243 if (xsave_force_disable
) {
1244 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_BMI1
;
1245 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_BMI2
;
1246 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_AVX2
;
1251 * fold in overrides from the "eeprom" mechanism
1253 cp
->cp_edx
|= cpuid_feature_edx_include
;
1254 cp
->cp_edx
&= ~cpuid_feature_edx_exclude
;
1256 cp
->cp_ecx
|= cpuid_feature_ecx_include
;
1257 cp
->cp_ecx
&= ~cpuid_feature_ecx_exclude
;
1259 if (cp
->cp_edx
& CPUID_INTC_EDX_PSE
) {
1260 add_x86_feature(featureset
, X86FSET_LARGEPAGE
);
1262 if (cp
->cp_edx
& CPUID_INTC_EDX_TSC
) {
1263 add_x86_feature(featureset
, X86FSET_TSC
);
1265 if (cp
->cp_edx
& CPUID_INTC_EDX_MSR
) {
1266 add_x86_feature(featureset
, X86FSET_MSR
);
1268 if (cp
->cp_edx
& CPUID_INTC_EDX_MTRR
) {
1269 add_x86_feature(featureset
, X86FSET_MTRR
);
1271 if (cp
->cp_edx
& CPUID_INTC_EDX_PGE
) {
1272 add_x86_feature(featureset
, X86FSET_PGE
);
1274 if (cp
->cp_edx
& CPUID_INTC_EDX_CMOV
) {
1275 add_x86_feature(featureset
, X86FSET_CMOV
);
1277 if (cp
->cp_edx
& CPUID_INTC_EDX_MMX
) {
1278 add_x86_feature(featureset
, X86FSET_MMX
);
1280 if ((cp
->cp_edx
& CPUID_INTC_EDX_MCE
) != 0 &&
1281 (cp
->cp_edx
& CPUID_INTC_EDX_MCA
) != 0) {
1282 add_x86_feature(featureset
, X86FSET_MCA
);
1284 if (cp
->cp_edx
& CPUID_INTC_EDX_PAE
) {
1285 add_x86_feature(featureset
, X86FSET_PAE
);
1287 if (cp
->cp_edx
& CPUID_INTC_EDX_CX8
) {
1288 add_x86_feature(featureset
, X86FSET_CX8
);
1290 if (cp
->cp_ecx
& CPUID_INTC_ECX_CX16
) {
1291 add_x86_feature(featureset
, X86FSET_CX16
);
1293 if (cp
->cp_edx
& CPUID_INTC_EDX_PAT
) {
1294 add_x86_feature(featureset
, X86FSET_PAT
);
1296 if (cp
->cp_edx
& CPUID_INTC_EDX_SEP
) {
1297 add_x86_feature(featureset
, X86FSET_SEP
);
1299 if (cp
->cp_edx
& CPUID_INTC_EDX_FXSR
) {
1301 * In our implementation, fxsave/fxrstor
1302 * are prerequisites before we'll even
1303 * try and do SSE things.
1305 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE
) {
1306 add_x86_feature(featureset
, X86FSET_SSE
);
1308 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE2
) {
1309 add_x86_feature(featureset
, X86FSET_SSE2
);
1311 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE3
) {
1312 add_x86_feature(featureset
, X86FSET_SSE3
);
1314 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSSE3
) {
1315 add_x86_feature(featureset
, X86FSET_SSSE3
);
1317 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_1
) {
1318 add_x86_feature(featureset
, X86FSET_SSE4_1
);
1320 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_2
) {
1321 add_x86_feature(featureset
, X86FSET_SSE4_2
);
1323 if (cp
->cp_ecx
& CPUID_INTC_ECX_AES
) {
1324 add_x86_feature(featureset
, X86FSET_AES
);
1326 if (cp
->cp_ecx
& CPUID_INTC_ECX_PCLMULQDQ
) {
1327 add_x86_feature(featureset
, X86FSET_PCLMULQDQ
);
1330 if (cp
->cp_ecx
& CPUID_INTC_ECX_XSAVE
) {
1331 add_x86_feature(featureset
, X86FSET_XSAVE
);
1333 /* We only test AVX when there is XSAVE */
1334 if (cp
->cp_ecx
& CPUID_INTC_ECX_AVX
) {
1335 add_x86_feature(featureset
,
1339 * Intel says we can't check these without also
1342 if (cp
->cp_ecx
& CPUID_INTC_ECX_F16C
)
1343 add_x86_feature(featureset
,
1346 if (cp
->cp_ecx
& CPUID_INTC_ECX_FMA
)
1347 add_x86_feature(featureset
,
1350 if (cpi
->cpi_std
[7].cp_ebx
&
1351 CPUID_INTC_EBX_7_0_BMI1
)
1352 add_x86_feature(featureset
,
1355 if (cpi
->cpi_std
[7].cp_ebx
&
1356 CPUID_INTC_EBX_7_0_BMI2
)
1357 add_x86_feature(featureset
,
1360 if (cpi
->cpi_std
[7].cp_ebx
&
1361 CPUID_INTC_EBX_7_0_AVX2
)
1362 add_x86_feature(featureset
,
1367 if (cp
->cp_ecx
& CPUID_INTC_ECX_X2APIC
) {
1368 add_x86_feature(featureset
, X86FSET_X2APIC
);
1370 if (cp
->cp_edx
& CPUID_INTC_EDX_DE
) {
1371 add_x86_feature(featureset
, X86FSET_DE
);
1374 if (cp
->cp_ecx
& CPUID_INTC_ECX_MON
) {
1377 * We require the CLFLUSH instruction for erratum workaround
1378 * to use MONITOR/MWAIT.
1380 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1381 cpi
->cpi_mwait
.support
|= MWAIT_SUPPORT
;
1382 add_x86_feature(featureset
, X86FSET_MWAIT
);
1384 extern int idle_cpu_assert_cflush_monitor
;
1387 * All processors we are aware of which have
1388 * MONITOR/MWAIT also have CLFLUSH.
1390 if (idle_cpu_assert_cflush_monitor
) {
1391 ASSERT((cp
->cp_ecx
& CPUID_INTC_ECX_MON
) &&
1392 (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
));
1398 if (cp
->cp_ecx
& CPUID_INTC_ECX_VMX
) {
1399 add_x86_feature(featureset
, X86FSET_VMX
);
1402 if (cp
->cp_ecx
& CPUID_INTC_ECX_RDRAND
)
1403 add_x86_feature(featureset
, X86FSET_RDRAND
);
1406 * Only need it first time, rest of the cpus would follow suit.
1407 * we only capture this for the bootcpu.
1409 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1410 add_x86_feature(featureset
, X86FSET_CLFSH
);
1411 x86_clflush_size
= (BITX(cp
->cp_ebx
, 15, 8) * 8);
1413 if (is_x86_feature(featureset
, X86FSET_PAE
))
1414 cpi
->cpi_pabits
= 36;
1417 * Hyperthreading configuration is slightly tricky on Intel
1418 * and pure clones, and even trickier on AMD.
1420 * (AMD chose to set the HTT bit on their CMP processors,
1421 * even though they're not actually hyperthreaded. Thus it
1422 * takes a bit more work to figure out what's really going
1423 * on ... see the handling of the CMP_LGCY bit below)
1425 if (cp
->cp_edx
& CPUID_INTC_EDX_HTT
) {
1426 cpi
->cpi_ncpu_per_chip
= CPI_CPU_COUNT(cpi
);
1427 if (cpi
->cpi_ncpu_per_chip
> 1)
1428 add_x86_feature(featureset
, X86FSET_HTT
);
1430 cpi
->cpi_ncpu_per_chip
= 1;
1434 * Work on the "extended" feature information, doing
1435 * some basic initialization for cpuid_pass2()
1438 switch (cpi
->cpi_vendor
) {
1439 case X86_VENDOR_Intel
:
1440 if (IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf)
1443 case X86_VENDOR_AMD
:
1444 if (cpi
->cpi_family
> 5 ||
1445 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
1448 case X86_VENDOR_Cyrix
:
1450 * Only these Cyrix CPUs are -known- to support
1451 * extended cpuid operations.
1453 if (x86_type
== X86_TYPE_VIA_CYRIX_III
||
1454 x86_type
== X86_TYPE_CYRIX_GXm
)
1457 case X86_VENDOR_Centaur
:
1465 cp
= &cpi
->cpi_extd
[0];
1466 cp
->cp_eax
= 0x80000000;
1467 cpi
->cpi_xmaxeax
= __cpuid_insn(cp
);
1470 if (cpi
->cpi_xmaxeax
& 0x80000000) {
1472 if (cpi
->cpi_xmaxeax
> CPI_XMAXEAX_MAX
)
1473 cpi
->cpi_xmaxeax
= CPI_XMAXEAX_MAX
;
1475 switch (cpi
->cpi_vendor
) {
1476 case X86_VENDOR_Intel
:
1477 case X86_VENDOR_AMD
:
1478 if (cpi
->cpi_xmaxeax
< 0x80000001)
1480 cp
= &cpi
->cpi_extd
[1];
1481 cp
->cp_eax
= 0x80000001;
1482 (void) __cpuid_insn(cp
);
1484 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1485 cpi
->cpi_family
== 5 &&
1486 cpi
->cpi_model
== 6 &&
1487 cpi
->cpi_step
== 6) {
1489 * K6 model 6 uses bit 10 to indicate SYSC
1490 * Later models use bit 11. Fix it here.
1492 if (cp
->cp_edx
& 0x400) {
1493 cp
->cp_edx
&= ~0x400;
1494 cp
->cp_edx
|= CPUID_AMD_EDX_SYSC
;
1498 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000001, cp
);
1501 * Compute the additions to the kernel's feature word.
1503 if (cp
->cp_edx
& CPUID_AMD_EDX_NX
) {
1504 add_x86_feature(featureset
, X86FSET_NX
);
1508 * Regardless whether or not we boot 64-bit,
1509 * we should have a way to identify whether
1510 * the CPU is capable of running 64-bit.
1512 if (cp
->cp_edx
& CPUID_AMD_EDX_LM
) {
1513 add_x86_feature(featureset
, X86FSET_64
);
1516 #if defined(__amd64)
1517 /* 1 GB large page - enable only for 64 bit kernel */
1518 if (cp
->cp_edx
& CPUID_AMD_EDX_1GPG
) {
1519 add_x86_feature(featureset
, X86FSET_1GPG
);
1523 if ((cpi
->cpi_vendor
== X86_VENDOR_AMD
) &&
1524 (cpi
->cpi_std
[1].cp_edx
& CPUID_INTC_EDX_FXSR
) &&
1525 (cp
->cp_ecx
& CPUID_AMD_ECX_SSE4A
)) {
1526 add_x86_feature(featureset
, X86FSET_SSE4A
);
1530 * If both the HTT and CMP_LGCY bits are set,
1531 * then we're not actually HyperThreaded. Read
1532 * "AMD CPUID Specification" for more details.
1534 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1535 is_x86_feature(featureset
, X86FSET_HTT
) &&
1536 (cp
->cp_ecx
& CPUID_AMD_ECX_CMP_LGCY
)) {
1537 remove_x86_feature(featureset
, X86FSET_HTT
);
1538 add_x86_feature(featureset
, X86FSET_CMP
);
1540 #if defined(__amd64)
1542 * It's really tricky to support syscall/sysret in
1543 * the i386 kernel; we rely on sysenter/sysexit
1544 * instead. In the amd64 kernel, things are -way-
1547 if (cp
->cp_edx
& CPUID_AMD_EDX_SYSC
) {
1548 add_x86_feature(featureset
, X86FSET_ASYSC
);
1552 * While we're thinking about system calls, note
1553 * that AMD processors don't support sysenter
1554 * in long mode at all, so don't try to program them.
1556 if (x86_vendor
== X86_VENDOR_AMD
) {
1557 remove_x86_feature(featureset
, X86FSET_SEP
);
1560 if (cp
->cp_edx
& CPUID_AMD_EDX_TSCP
) {
1561 add_x86_feature(featureset
, X86FSET_TSCP
);
1564 if (cp
->cp_ecx
& CPUID_AMD_ECX_SVM
) {
1565 add_x86_feature(featureset
, X86FSET_SVM
);
1568 if (cp
->cp_ecx
& CPUID_AMD_ECX_TOPOEXT
) {
1569 add_x86_feature(featureset
, X86FSET_TOPOEXT
);
1577 * Get CPUID data about processor cores and hyperthreads.
1579 switch (cpi
->cpi_vendor
) {
1580 case X86_VENDOR_Intel
:
1581 if (cpi
->cpi_maxeax
>= 4) {
1582 cp
= &cpi
->cpi_std
[4];
1585 (void) __cpuid_insn(cp
);
1586 platform_cpuid_mangle(cpi
->cpi_vendor
, 4, cp
);
1589 case X86_VENDOR_AMD
:
1590 if (cpi
->cpi_xmaxeax
< 0x80000008)
1592 cp
= &cpi
->cpi_extd
[8];
1593 cp
->cp_eax
= 0x80000008;
1594 (void) __cpuid_insn(cp
);
1595 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000008, cp
);
1598 * Virtual and physical address limits from
1599 * cpuid override previously guessed values.
1601 cpi
->cpi_pabits
= BITX(cp
->cp_eax
, 7, 0);
1602 cpi
->cpi_vabits
= BITX(cp
->cp_eax
, 15, 8);
1609 * Derive the number of cores per chip
1611 switch (cpi
->cpi_vendor
) {
1612 case X86_VENDOR_Intel
:
1613 if (cpi
->cpi_maxeax
< 4) {
1614 cpi
->cpi_ncore_per_chip
= 1;
1617 cpi
->cpi_ncore_per_chip
=
1618 BITX((cpi
)->cpi_std
[4].cp_eax
, 31, 26) + 1;
1621 case X86_VENDOR_AMD
:
1622 if (cpi
->cpi_xmaxeax
< 0x80000008) {
1623 cpi
->cpi_ncore_per_chip
= 1;
1627 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1628 * 1 less than the number of physical cores on
1629 * the chip. In family 0x10 this value can
1630 * be affected by "downcoring" - it reflects
1631 * 1 less than the number of cores actually
1632 * enabled on this node.
1634 cpi
->cpi_ncore_per_chip
=
1635 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
1639 cpi
->cpi_ncore_per_chip
= 1;
1644 * Get CPUID data about TSC Invariance in Deep C-State.
1646 switch (cpi
->cpi_vendor
) {
1647 case X86_VENDOR_Intel
:
1648 if (cpi
->cpi_maxeax
>= 7) {
1649 cp
= &cpi
->cpi_extd
[7];
1650 cp
->cp_eax
= 0x80000007;
1652 (void) __cpuid_insn(cp
);
1659 cpi
->cpi_ncore_per_chip
= 1;
1663 * If more than one core, then this processor is CMP.
1665 if (cpi
->cpi_ncore_per_chip
> 1) {
1666 add_x86_feature(featureset
, X86FSET_CMP
);
1670 * If the number of cores is the same as the number
1671 * of CPUs, then we cannot have HyperThreading.
1673 if (cpi
->cpi_ncpu_per_chip
== cpi
->cpi_ncore_per_chip
) {
1674 remove_x86_feature(featureset
, X86FSET_HTT
);
1677 cpi
->cpi_apicid
= CPI_APIC_ID(cpi
);
1678 cpi
->cpi_procnodes_per_pkg
= 1;
1679 cpi
->cpi_cores_per_compunit
= 1;
1680 if (is_x86_feature(featureset
, X86FSET_HTT
) == B_FALSE
&&
1681 is_x86_feature(featureset
, X86FSET_CMP
) == B_FALSE
) {
1683 * Single-core single-threaded processors.
1685 cpi
->cpi_chipid
= -1;
1686 cpi
->cpi_clogid
= 0;
1687 cpi
->cpi_coreid
= cpu
->cpu_id
;
1688 cpi
->cpi_pkgcoreid
= 0;
1689 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1690 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 3, 0);
1692 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1693 } else if (cpi
->cpi_ncpu_per_chip
> 1) {
1694 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
)
1695 cpuid_intel_getids(cpu
, featureset
);
1696 else if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1697 cpuid_amd_getids(cpu
);
1700 * All other processors are currently
1701 * assumed to have single cores.
1703 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
1704 cpi
->cpi_pkgcoreid
= 0;
1705 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1706 cpi
->cpi_compunitid
= cpi
->cpi_chipid
;
1711 * Synthesize chip "revision" and socket type
1713 cpi
->cpi_chiprev
= _cpuid_chiprev(cpi
->cpi_vendor
, cpi
->cpi_family
,
1714 cpi
->cpi_model
, cpi
->cpi_step
);
1715 cpi
->cpi_chiprevstr
= _cpuid_chiprevstr(cpi
->cpi_vendor
,
1716 cpi
->cpi_family
, cpi
->cpi_model
, cpi
->cpi_step
);
1717 cpi
->cpi_socket
= _cpuid_skt(cpi
->cpi_vendor
, cpi
->cpi_family
,
1718 cpi
->cpi_model
, cpi
->cpi_step
);
1725 * Make copies of the cpuid table entries we depend on, in
1726 * part for ease of parsing now, in part so that we have only
1727 * one place to correct any of it, in part for ease of
1728 * later export to userland, and in part so we can look at
1729 * this stuff in a crash dump.
1734 cpuid_pass2(cpu_t
*cpu
)
1738 struct cpuid_regs
*cp
;
1741 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
1743 ASSERT(cpi
->cpi_pass
== 1);
1745 if (cpi
->cpi_maxeax
< 1)
1748 if ((nmax
= cpi
->cpi_maxeax
+ 1) > NMAX_CPI_STD
)
1749 nmax
= NMAX_CPI_STD
;
1751 * (We already handled n == 0 and n == 1 in pass 1)
1753 for (n
= 2, cp
= &cpi
->cpi_std
[2]; n
< nmax
; n
++, cp
++) {
1757 * CPUID function 4 expects %ecx to be initialized
1758 * with an index which indicates which cache to return
1759 * information about. The OS is expected to call function 4
1760 * with %ecx set to 0, 1, 2, ... until it returns with
1761 * EAX[4:0] set to 0, which indicates there are no more
1764 * Here, populate cpi_std[4] with the information returned by
1765 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1766 * when dynamic memory allocation becomes available.
1768 * Note: we need to explicitly initialize %ecx here, since
1769 * function 4 may have been previously invoked.
1774 (void) __cpuid_insn(cp
);
1775 platform_cpuid_mangle(cpi
->cpi_vendor
, n
, cp
);
1779 * "the lower 8 bits of the %eax register
1780 * contain a value that identifies the number
1781 * of times the cpuid [instruction] has to be
1782 * executed to obtain a complete image of the
1783 * processor's caching systems."
1785 * How *do* they make this stuff up?
1787 cpi
->cpi_ncache
= sizeof (*cp
) *
1788 BITX(cp
->cp_eax
, 7, 0);
1789 if (cpi
->cpi_ncache
== 0)
1791 cpi
->cpi_ncache
--; /* skip count byte */
1794 * Well, for now, rather than attempt to implement
1795 * this slightly dubious algorithm, we just look
1796 * at the first 15 ..
1798 if (cpi
->cpi_ncache
> (sizeof (*cp
) - 1))
1799 cpi
->cpi_ncache
= sizeof (*cp
) - 1;
1801 dp
= cpi
->cpi_cacheinfo
;
1802 if (BITX(cp
->cp_eax
, 31, 31) == 0) {
1803 uint8_t *p
= (void *)&cp
->cp_eax
;
1804 for (i
= 1; i
< 4; i
++)
1808 if (BITX(cp
->cp_ebx
, 31, 31) == 0) {
1809 uint8_t *p
= (void *)&cp
->cp_ebx
;
1810 for (i
= 0; i
< 4; i
++)
1814 if (BITX(cp
->cp_ecx
, 31, 31) == 0) {
1815 uint8_t *p
= (void *)&cp
->cp_ecx
;
1816 for (i
= 0; i
< 4; i
++)
1820 if (BITX(cp
->cp_edx
, 31, 31) == 0) {
1821 uint8_t *p
= (void *)&cp
->cp_edx
;
1822 for (i
= 0; i
< 4; i
++)
1828 case 3: /* Processor serial number, if PSN supported */
1831 case 4: /* Deterministic cache parameters */
1834 case 5: /* Monitor/Mwait parameters */
1839 * check cpi_mwait.support which was set in cpuid_pass1
1841 if (!(cpi
->cpi_mwait
.support
& MWAIT_SUPPORT
))
1845 * Protect ourself from insane mwait line size.
1846 * Workaround for incomplete hardware emulator(s).
1848 mwait_size
= (size_t)MWAIT_SIZE_MAX(cpi
);
1849 if (mwait_size
< sizeof (uint32_t) ||
1850 !ISP2(mwait_size
)) {
1852 cmn_err(CE_NOTE
, "Cannot handle cpu %d mwait "
1853 "size %ld", cpu
->cpu_id
, (long)mwait_size
);
1858 cpi
->cpi_mwait
.mon_min
= (size_t)MWAIT_SIZE_MIN(cpi
);
1859 cpi
->cpi_mwait
.mon_max
= mwait_size
;
1860 if (MWAIT_EXTENSION(cpi
)) {
1861 cpi
->cpi_mwait
.support
|= MWAIT_EXTENSIONS
;
1862 if (MWAIT_INT_ENABLE(cpi
))
1863 cpi
->cpi_mwait
.support
|=
1864 MWAIT_ECX_INT_ENABLE
;
1873 if (cpi
->cpi_maxeax
>= 0xB && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
1874 struct cpuid_regs regs
;
1878 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
1880 (void) __cpuid_insn(cp
);
1883 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1884 * indicates that the extended topology enumeration leaf is
1889 uint_t coreid_shift
= 0;
1890 uint_t ncpu_per_core
= 1;
1891 uint_t chipid_shift
= 0;
1892 uint_t ncpu_per_chip
= 1;
1896 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
1900 (void) __cpuid_insn(cp
);
1901 level
= CPI_CPU_LEVEL_TYPE(cp
);
1904 x2apic_id
= cp
->cp_edx
;
1905 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
1906 ncpu_per_core
= BITX(cp
->cp_ebx
, 15, 0);
1907 } else if (level
== 2) {
1908 x2apic_id
= cp
->cp_edx
;
1909 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
1910 ncpu_per_chip
= BITX(cp
->cp_ebx
, 15, 0);
1914 cpi
->cpi_apicid
= x2apic_id
;
1915 cpi
->cpi_ncpu_per_chip
= ncpu_per_chip
;
1916 cpi
->cpi_ncore_per_chip
= ncpu_per_chip
/
1918 cpi
->cpi_chipid
= x2apic_id
>> chipid_shift
;
1919 cpi
->cpi_clogid
= x2apic_id
& ((1 << chipid_shift
) - 1);
1920 cpi
->cpi_coreid
= x2apic_id
>> coreid_shift
;
1921 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
1924 /* Make cp NULL so that we don't stumble on others */
1931 if (cpi
->cpi_maxeax
>= 0xD) {
1932 struct cpuid_regs regs
;
1933 boolean_t cpuid_d_valid
= B_TRUE
;
1937 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
1939 (void) __cpuid_insn(cp
);
1942 * Sanity checks for debug
1944 if ((cp
->cp_eax
& XFEATURE_LEGACY_FP
) == 0 ||
1945 (cp
->cp_eax
& XFEATURE_SSE
) == 0) {
1946 cpuid_d_valid
= B_FALSE
;
1949 cpi
->cpi_xsave
.xsav_hw_features_low
= cp
->cp_eax
;
1950 cpi
->cpi_xsave
.xsav_hw_features_high
= cp
->cp_edx
;
1951 cpi
->cpi_xsave
.xsav_max_size
= cp
->cp_ecx
;
1954 * If the hw supports AVX, get the size and offset in the save
1955 * area for the ymm state.
1957 if (cpi
->cpi_xsave
.xsav_hw_features_low
& XFEATURE_AVX
) {
1960 cp
->cp_edx
= cp
->cp_ebx
= 0;
1962 (void) __cpuid_insn(cp
);
1964 if (cp
->cp_ebx
!= CPUID_LEAFD_2_YMM_OFFSET
||
1965 cp
->cp_eax
!= CPUID_LEAFD_2_YMM_SIZE
) {
1966 cpuid_d_valid
= B_FALSE
;
1969 cpi
->cpi_xsave
.ymm_size
= cp
->cp_eax
;
1970 cpi
->cpi_xsave
.ymm_offset
= cp
->cp_ebx
;
1973 if (is_x86_feature(x86_featureset
, X86FSET_XSAVE
)) {
1974 xsave_state_size
= 0;
1975 } else if (cpuid_d_valid
) {
1976 xsave_state_size
= cpi
->cpi_xsave
.xsav_max_size
;
1978 /* Broken CPUID 0xD, probably in HVM */
1979 cmn_err(CE_WARN
, "cpu%d: CPUID.0xD returns invalid "
1980 "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1981 ", ymm_size = %d, ymm_offset = %d\n",
1982 cpu
->cpu_id
, cpi
->cpi_xsave
.xsav_hw_features_low
,
1983 cpi
->cpi_xsave
.xsav_hw_features_high
,
1984 (int)cpi
->cpi_xsave
.xsav_max_size
,
1985 (int)cpi
->cpi_xsave
.ymm_size
,
1986 (int)cpi
->cpi_xsave
.ymm_offset
);
1988 if (xsave_state_size
!= 0) {
1990 * This must be a non-boot CPU. We cannot
1991 * continue, because boot cpu has already
1994 ASSERT(cpu
->cpu_id
!= 0);
1995 cmn_err(CE_PANIC
, "cpu%d: we have already "
1996 "enabled XSAVE on boot cpu, cannot "
1997 "continue.", cpu
->cpu_id
);
2000 * If we reached here on the boot CPU, it's also
2001 * almost certain that we'll reach here on the
2002 * non-boot CPUs. When we're here on a boot CPU
2003 * we should disable the feature, on a non-boot
2004 * CPU we need to confirm that we have.
2006 if (cpu
->cpu_id
== 0) {
2007 remove_x86_feature(x86_featureset
,
2009 remove_x86_feature(x86_featureset
,
2011 remove_x86_feature(x86_featureset
,
2013 remove_x86_feature(x86_featureset
,
2015 remove_x86_feature(x86_featureset
,
2017 remove_x86_feature(x86_featureset
,
2019 remove_x86_feature(x86_featureset
,
2021 CPI_FEATURES_ECX(cpi
) &=
2022 ~CPUID_INTC_ECX_XSAVE
;
2023 CPI_FEATURES_ECX(cpi
) &=
2024 ~CPUID_INTC_ECX_AVX
;
2025 CPI_FEATURES_ECX(cpi
) &=
2026 ~CPUID_INTC_ECX_F16C
;
2027 CPI_FEATURES_ECX(cpi
) &=
2028 ~CPUID_INTC_ECX_FMA
;
2029 CPI_FEATURES_7_0_EBX(cpi
) &=
2030 ~CPUID_INTC_EBX_7_0_BMI1
;
2031 CPI_FEATURES_7_0_EBX(cpi
) &=
2032 ~CPUID_INTC_EBX_7_0_BMI2
;
2033 CPI_FEATURES_7_0_EBX(cpi
) &=
2034 ~CPUID_INTC_EBX_7_0_AVX2
;
2035 xsave_force_disable
= B_TRUE
;
2037 VERIFY(is_x86_feature(x86_featureset
,
2038 X86FSET_XSAVE
) == B_FALSE
);
2045 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0)
2048 if ((nmax
= cpi
->cpi_xmaxeax
- 0x80000000 + 1) > NMAX_CPI_EXTD
)
2049 nmax
= NMAX_CPI_EXTD
;
2051 * Copy the extended properties, fixing them as we go.
2052 * (We already handled n == 0 and n == 1 in pass 1)
2054 iptr
= (void *)cpi
->cpi_brandstr
;
2055 for (n
= 2, cp
= &cpi
->cpi_extd
[2]; n
< nmax
; cp
++, n
++) {
2056 cp
->cp_eax
= 0x80000000 + n
;
2057 (void) __cpuid_insn(cp
);
2058 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000000 + n
, cp
);
2064 * Extract the brand string
2066 *iptr
++ = cp
->cp_eax
;
2067 *iptr
++ = cp
->cp_ebx
;
2068 *iptr
++ = cp
->cp_ecx
;
2069 *iptr
++ = cp
->cp_edx
;
2072 switch (cpi
->cpi_vendor
) {
2073 case X86_VENDOR_AMD
:
2075 * The Athlon and Duron were the first
2076 * parts to report the sizes of the
2077 * TLB for large pages. Before then,
2078 * we don't trust the data.
2080 if (cpi
->cpi_family
< 6 ||
2081 (cpi
->cpi_family
== 6 &&
2082 cpi
->cpi_model
< 1))
2090 switch (cpi
->cpi_vendor
) {
2091 case X86_VENDOR_AMD
:
2093 * The Athlon and Duron were the first
2094 * AMD parts with L2 TLB's.
2095 * Before then, don't trust the data.
2097 if (cpi
->cpi_family
< 6 ||
2098 cpi
->cpi_family
== 6 &&
2100 cp
->cp_eax
= cp
->cp_ebx
= 0;
2102 * AMD Duron rev A0 reports L2
2103 * cache size incorrectly as 1K
2104 * when it is really 64K
2106 if (cpi
->cpi_family
== 6 &&
2107 cpi
->cpi_model
== 3 &&
2108 cpi
->cpi_step
== 0) {
2109 cp
->cp_ecx
&= 0xffff;
2110 cp
->cp_ecx
|= 0x400000;
2113 case X86_VENDOR_Cyrix
: /* VIA C3 */
2115 * VIA C3 processors are a bit messed
2116 * up w.r.t. encoding cache sizes in %ecx
2118 if (cpi
->cpi_family
!= 6)
2121 * model 7 and 8 were incorrectly encoded
2123 * xxx is model 8 really broken?
2125 if (cpi
->cpi_model
== 7 ||
2126 cpi
->cpi_model
== 8)
2128 BITX(cp
->cp_ecx
, 31, 24) << 16 |
2129 BITX(cp
->cp_ecx
, 23, 16) << 12 |
2130 BITX(cp
->cp_ecx
, 15, 8) << 8 |
2131 BITX(cp
->cp_ecx
, 7, 0);
2133 * model 9 stepping 1 has wrong associativity
2135 if (cpi
->cpi_model
== 9 && cpi
->cpi_step
== 1)
2136 cp
->cp_ecx
|= 8 << 12;
2138 case X86_VENDOR_Intel
:
2140 * Extended L2 Cache features function.
2141 * First appeared on Prescott.
2157 intel_cpubrand(const struct cpuid_info
*cpi
)
2161 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2162 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2165 switch (cpi
->cpi_family
) {
2167 return ("Intel Pentium(r)");
2169 switch (cpi
->cpi_model
) {
2170 uint_t celeron
, xeon
;
2171 const struct cpuid_regs
*cp
;
2175 return ("Intel Pentium(r) Pro");
2178 return ("Intel Pentium(r) II");
2180 return ("Intel Celeron(r)");
2184 cp
= &cpi
->cpi_std
[2]; /* cache info */
2186 for (i
= 1; i
< 4; i
++) {
2189 tmp
= (cp
->cp_eax
>> (8 * i
)) & 0xff;
2192 if (tmp
>= 0x44 && tmp
<= 0x45)
2196 for (i
= 0; i
< 2; i
++) {
2199 tmp
= (cp
->cp_ebx
>> (8 * i
)) & 0xff;
2202 else if (tmp
>= 0x44 && tmp
<= 0x45)
2206 for (i
= 0; i
< 4; i
++) {
2209 tmp
= (cp
->cp_ecx
>> (8 * i
)) & 0xff;
2212 else if (tmp
>= 0x44 && tmp
<= 0x45)
2216 for (i
= 0; i
< 4; i
++) {
2219 tmp
= (cp
->cp_edx
>> (8 * i
)) & 0xff;
2222 else if (tmp
>= 0x44 && tmp
<= 0x45)
2227 return ("Intel Celeron(r)");
2229 return (cpi
->cpi_model
== 5 ?
2230 "Intel Pentium(r) II Xeon(tm)" :
2231 "Intel Pentium(r) III Xeon(tm)");
2232 return (cpi
->cpi_model
== 5 ?
2233 "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2234 "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2242 /* BrandID is present if the field is nonzero */
2243 if (cpi
->cpi_brandid
!= 0) {
2244 static const struct {
2248 { 0x1, "Intel(r) Celeron(r)" },
2249 { 0x2, "Intel(r) Pentium(r) III" },
2250 { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" },
2251 { 0x4, "Intel(r) Pentium(r) III" },
2252 { 0x6, "Mobile Intel(r) Pentium(r) III" },
2253 { 0x7, "Mobile Intel(r) Celeron(r)" },
2254 { 0x8, "Intel(r) Pentium(r) 4" },
2255 { 0x9, "Intel(r) Pentium(r) 4" },
2256 { 0xa, "Intel(r) Celeron(r)" },
2257 { 0xb, "Intel(r) Xeon(tm)" },
2258 { 0xc, "Intel(r) Xeon(tm) MP" },
2259 { 0xe, "Mobile Intel(r) Pentium(r) 4" },
2260 { 0xf, "Mobile Intel(r) Celeron(r)" },
2261 { 0x11, "Mobile Genuine Intel(r)" },
2262 { 0x12, "Intel(r) Celeron(r) M" },
2263 { 0x13, "Mobile Intel(r) Celeron(r)" },
2264 { 0x14, "Intel(r) Celeron(r)" },
2265 { 0x15, "Mobile Genuine Intel(r)" },
2266 { 0x16, "Intel(r) Pentium(r) M" },
2267 { 0x17, "Mobile Intel(r) Celeron(r)" }
2269 uint_t btblmax
= sizeof (brand_tbl
) / sizeof (brand_tbl
[0]);
2272 sgn
= (cpi
->cpi_family
<< 8) |
2273 (cpi
->cpi_model
<< 4) | cpi
->cpi_step
;
2275 for (i
= 0; i
< btblmax
; i
++)
2276 if (brand_tbl
[i
].bt_bid
== cpi
->cpi_brandid
)
2279 if (sgn
== 0x6b1 && cpi
->cpi_brandid
== 3)
2280 return ("Intel(r) Celeron(r)");
2281 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xb)
2282 return ("Intel(r) Xeon(tm) MP");
2283 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xe)
2284 return ("Intel(r) Xeon(tm)");
2285 return (brand_tbl
[i
].bt_str
);
2293 amd_cpubrand(const struct cpuid_info
*cpi
)
2295 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2296 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2297 return ("i486 compatible");
2299 switch (cpi
->cpi_family
) {
2301 switch (cpi
->cpi_model
) {
2308 return ("AMD-K5(r)");
2311 return ("AMD-K6(r)");
2313 return ("AMD-K6(r)-2");
2315 return ("AMD-K6(r)-III");
2317 return ("AMD (family 5)");
2320 switch (cpi
->cpi_model
) {
2322 return ("AMD-K7(tm)");
2326 return ("AMD Athlon(tm)");
2329 return ("AMD Duron(tm)");
2334 * Use the L2 cache size to distinguish
2336 return ((cpi
->cpi_extd
[6].cp_ecx
>> 16) >= 256 ?
2337 "AMD Athlon(tm)" : "AMD Duron(tm)");
2339 return ("AMD (family 6)");
2345 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 5 &&
2346 cpi
->cpi_brandid
!= 0) {
2347 switch (BITX(cpi
->cpi_brandid
, 7, 5)) {
2349 return ("AMD Opteron(tm) UP 1xx");
2351 return ("AMD Opteron(tm) DP 2xx");
2353 return ("AMD Opteron(tm) MP 8xx");
2355 return ("AMD Opteron(tm)");
2363 cyrix_cpubrand(struct cpuid_info
*cpi
, uint_t type
)
2365 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2366 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5 ||
2367 type
== X86_TYPE_CYRIX_486
)
2368 return ("i486 compatible");
2371 case X86_TYPE_CYRIX_6x86
:
2372 return ("Cyrix 6x86");
2373 case X86_TYPE_CYRIX_6x86L
:
2374 return ("Cyrix 6x86L");
2375 case X86_TYPE_CYRIX_6x86MX
:
2376 return ("Cyrix 6x86MX");
2377 case X86_TYPE_CYRIX_GXm
:
2378 return ("Cyrix GXm");
2379 case X86_TYPE_CYRIX_MediaGX
:
2380 return ("Cyrix MediaGX");
2381 case X86_TYPE_CYRIX_MII
:
2382 return ("Cyrix M2");
2383 case X86_TYPE_VIA_CYRIX_III
:
2384 return ("VIA Cyrix M3");
2387 * Have another wild guess ..
2389 if (cpi
->cpi_family
== 4 && cpi
->cpi_model
== 9)
2390 return ("Cyrix 5x86");
2391 else if (cpi
->cpi_family
== 5) {
2392 switch (cpi
->cpi_model
) {
2394 return ("Cyrix 6x86"); /* Cyrix M1 */
2396 return ("Cyrix MediaGX");
2400 } else if (cpi
->cpi_family
== 6) {
2401 switch (cpi
->cpi_model
) {
2403 return ("Cyrix 6x86MX"); /* Cyrix M2? */
2420 * This only gets called in the case that the CPU extended
2421 * feature brand string (0x80000002, 0x80000003, 0x80000004)
2422 * aren't available, or contain null bytes for some reason.
2425 fabricate_brandstr(struct cpuid_info
*cpi
)
2427 const char *brand
= NULL
;
2429 switch (cpi
->cpi_vendor
) {
2430 case X86_VENDOR_Intel
:
2431 brand
= intel_cpubrand(cpi
);
2433 case X86_VENDOR_AMD
:
2434 brand
= amd_cpubrand(cpi
);
2436 case X86_VENDOR_Cyrix
:
2437 brand
= cyrix_cpubrand(cpi
, x86_type
);
2439 case X86_VENDOR_NexGen
:
2440 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2441 brand
= "NexGen Nx586";
2443 case X86_VENDOR_Centaur
:
2444 if (cpi
->cpi_family
== 5)
2445 switch (cpi
->cpi_model
) {
2447 brand
= "Centaur C6";
2450 brand
= "Centaur C2";
2453 brand
= "Centaur C3";
2459 case X86_VENDOR_Rise
:
2460 if (cpi
->cpi_family
== 5 &&
2461 (cpi
->cpi_model
== 0 || cpi
->cpi_model
== 2))
2464 case X86_VENDOR_SiS
:
2465 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2469 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4)
2470 brand
= "Transmeta Crusoe TM3x00 or TM5x00";
2472 case X86_VENDOR_NSC
:
2473 case X86_VENDOR_UMC
:
2478 (void) strcpy((char *)cpi
->cpi_brandstr
, brand
);
2483 * If all else fails ...
2485 (void) snprintf(cpi
->cpi_brandstr
, sizeof (cpi
->cpi_brandstr
),
2486 "%s %d.%d.%d", cpi
->cpi_vendorstr
, cpi
->cpi_family
,
2487 cpi
->cpi_model
, cpi
->cpi_step
);
2491 * This routine is called just after kernel memory allocation
2492 * becomes available on cpu0, and as part of mp_startup() on
2495 * Fixup the brand string, and collect any information from cpuid
2496 * that requires dynamically allocated storage to represent.
2500 cpuid_pass3(cpu_t
*cpu
)
2502 int i
, max
, shft
, level
, size
;
2503 struct cpuid_regs regs
;
2504 struct cpuid_regs
*cp
;
2505 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2507 ASSERT(cpi
->cpi_pass
== 2);
2510 * Function 4: Deterministic cache parameters
2512 * Take this opportunity to detect the number of threads
2513 * sharing the last level cache, and construct a corresponding
2514 * cache id. The respective cpuid_info members are initialized
2515 * to the default case of "no last level cache sharing".
2517 cpi
->cpi_ncpu_shr_last_cache
= 1;
2518 cpi
->cpi_last_lvl_cacheid
= cpu
->cpu_id
;
2520 if (cpi
->cpi_maxeax
>= 4 && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
2523 * Find the # of elements (size) returned by fn 4, and along
2524 * the way detect last level cache sharing details.
2526 bzero(®s
, sizeof (regs
));
2528 for (i
= 0, max
= 0; i
< CPI_FN4_ECX_MAX
; i
++) {
2532 (void) __cpuid_insn(cp
);
2534 if (CPI_CACHE_TYPE(cp
) == 0)
2536 level
= CPI_CACHE_LVL(cp
);
2539 cpi
->cpi_ncpu_shr_last_cache
=
2540 CPI_NTHR_SHR_CACHE(cp
) + 1;
2543 cpi
->cpi_std_4_size
= size
= i
;
2546 * Allocate the cpi_std_4 array. The first element
2547 * references the regs for fn 4, %ecx == 0, which
2548 * cpuid_pass2() stashed in cpi->cpi_std[4].
2552 kmem_alloc(size
* sizeof (cp
), KM_SLEEP
);
2553 cpi
->cpi_std_4
[0] = &cpi
->cpi_std
[4];
2556 * Allocate storage to hold the additional regs
2557 * for function 4, %ecx == 1 .. cpi_std_4_size.
2559 * The regs for fn 4, %ecx == 0 has already
2560 * been allocated as indicated above.
2562 for (i
= 1; i
< size
; i
++) {
2563 cp
= cpi
->cpi_std_4
[i
] =
2564 kmem_zalloc(sizeof (regs
), KM_SLEEP
);
2568 (void) __cpuid_insn(cp
);
2572 * Determine the number of bits needed to represent
2573 * the number of CPUs sharing the last level cache.
2575 * Shift off that number of bits from the APIC id to
2576 * derive the cache id.
2579 for (i
= 1; i
< cpi
->cpi_ncpu_shr_last_cache
; i
<<= 1)
2581 cpi
->cpi_last_lvl_cacheid
= cpi
->cpi_apicid
>> shft
;
2585 * Now fixup the brand string
2587 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0) {
2588 fabricate_brandstr(cpi
);
2592 * If we successfully extracted a brand string from the cpuid
2593 * instruction, clean it up by removing leading spaces and
2596 if (cpi
->cpi_brandstr
[0]) {
2597 size_t maxlen
= sizeof (cpi
->cpi_brandstr
);
2600 dst
= src
= (char *)cpi
->cpi_brandstr
;
2601 src
[maxlen
- 1] = '\0';
2603 * strip leading spaces
2608 * Remove any 'Genuine' or "Authentic" prefixes
2610 if (strncmp(src
, "Genuine ", 8) == 0)
2612 if (strncmp(src
, "Authentic ", 10) == 0)
2616 * Now do an in-place copy.
2617 * Map (R) to (r) and (TM) to (tm).
2618 * The era of teletypes is long gone, and there's
2619 * -really- no need to shout.
2621 while (*src
!= '\0') {
2622 if (src
[0] == '(') {
2623 if (strncmp(src
+ 1, "R)", 2) == 0) {
2624 (void) strncpy(dst
, "(r)", 3);
2629 if (strncmp(src
+ 1, "TM)", 3) == 0) {
2630 (void) strncpy(dst
, "(tm)", 4);
2641 * Finally, remove any trailing spaces
2643 while (--dst
> cpi
->cpi_brandstr
)
2649 fabricate_brandstr(cpi
);
2655 * This routine is called out of bind_hwcap() much later in the life
2656 * of the kernel (post_startup()). The job of this routine is to resolve
2657 * the hardware feature support and kernel support for those features into
2658 * what we're actually going to tell applications via the aux vector.
2661 cpuid_pass4(cpu_t
*cpu
, uint_t
*hwcap_out
)
2663 struct cpuid_info
*cpi
;
2664 uint_t hwcap_flags
= 0, hwcap_flags_2
= 0;
2668 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2670 ASSERT(cpi
->cpi_pass
== 3);
2672 if (cpi
->cpi_maxeax
>= 1) {
2673 uint32_t *edx
= &cpi
->cpi_support
[STD_EDX_FEATURES
];
2674 uint32_t *ecx
= &cpi
->cpi_support
[STD_ECX_FEATURES
];
2675 uint32_t *ebx
= &cpi
->cpi_support
[STD_EBX_FEATURES
];
2677 *edx
= CPI_FEATURES_EDX(cpi
);
2678 *ecx
= CPI_FEATURES_ECX(cpi
);
2679 *ebx
= CPI_FEATURES_7_0_EBX(cpi
);
2682 * [these require explicit kernel support]
2684 if (!is_x86_feature(x86_featureset
, X86FSET_SEP
))
2685 *edx
&= ~CPUID_INTC_EDX_SEP
;
2687 if (!is_x86_feature(x86_featureset
, X86FSET_SSE
))
2688 *edx
&= ~(CPUID_INTC_EDX_FXSR
|CPUID_INTC_EDX_SSE
);
2689 if (!is_x86_feature(x86_featureset
, X86FSET_SSE2
))
2690 *edx
&= ~CPUID_INTC_EDX_SSE2
;
2692 if (!is_x86_feature(x86_featureset
, X86FSET_HTT
))
2693 *edx
&= ~CPUID_INTC_EDX_HTT
;
2695 if (!is_x86_feature(x86_featureset
, X86FSET_SSE3
))
2696 *ecx
&= ~CPUID_INTC_ECX_SSE3
;
2698 if (!is_x86_feature(x86_featureset
, X86FSET_SSSE3
))
2699 *ecx
&= ~CPUID_INTC_ECX_SSSE3
;
2700 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_1
))
2701 *ecx
&= ~CPUID_INTC_ECX_SSE4_1
;
2702 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_2
))
2703 *ecx
&= ~CPUID_INTC_ECX_SSE4_2
;
2704 if (!is_x86_feature(x86_featureset
, X86FSET_AES
))
2705 *ecx
&= ~CPUID_INTC_ECX_AES
;
2706 if (!is_x86_feature(x86_featureset
, X86FSET_PCLMULQDQ
))
2707 *ecx
&= ~CPUID_INTC_ECX_PCLMULQDQ
;
2708 if (!is_x86_feature(x86_featureset
, X86FSET_XSAVE
))
2709 *ecx
&= ~(CPUID_INTC_ECX_XSAVE
|
2710 CPUID_INTC_ECX_OSXSAVE
);
2711 if (!is_x86_feature(x86_featureset
, X86FSET_AVX
))
2712 *ecx
&= ~CPUID_INTC_ECX_AVX
;
2713 if (!is_x86_feature(x86_featureset
, X86FSET_F16C
))
2714 *ecx
&= ~CPUID_INTC_ECX_F16C
;
2715 if (!is_x86_feature(x86_featureset
, X86FSET_FMA
))
2716 *ecx
&= ~CPUID_INTC_ECX_FMA
;
2717 if (!is_x86_feature(x86_featureset
, X86FSET_BMI1
))
2718 *ebx
&= ~CPUID_INTC_EBX_7_0_BMI1
;
2719 if (!is_x86_feature(x86_featureset
, X86FSET_BMI2
))
2720 *ebx
&= ~CPUID_INTC_EBX_7_0_BMI2
;
2721 if (!is_x86_feature(x86_featureset
, X86FSET_AVX2
))
2722 *ebx
&= ~CPUID_INTC_EBX_7_0_AVX2
;
2725 * [no explicit support required beyond x87 fp context]
2728 *edx
&= ~(CPUID_INTC_EDX_FPU
| CPUID_INTC_EDX_MMX
);
2731 * Now map the supported feature vector to things that we
2732 * think userland will care about.
2734 if (*edx
& CPUID_INTC_EDX_SEP
)
2735 hwcap_flags
|= AV_386_SEP
;
2736 if (*edx
& CPUID_INTC_EDX_SSE
)
2737 hwcap_flags
|= AV_386_FXSR
| AV_386_SSE
;
2738 if (*edx
& CPUID_INTC_EDX_SSE2
)
2739 hwcap_flags
|= AV_386_SSE2
;
2740 if (*ecx
& CPUID_INTC_ECX_SSE3
)
2741 hwcap_flags
|= AV_386_SSE3
;
2742 if (*ecx
& CPUID_INTC_ECX_SSSE3
)
2743 hwcap_flags
|= AV_386_SSSE3
;
2744 if (*ecx
& CPUID_INTC_ECX_SSE4_1
)
2745 hwcap_flags
|= AV_386_SSE4_1
;
2746 if (*ecx
& CPUID_INTC_ECX_SSE4_2
)
2747 hwcap_flags
|= AV_386_SSE4_2
;
2748 if (*ecx
& CPUID_INTC_ECX_MOVBE
)
2749 hwcap_flags
|= AV_386_MOVBE
;
2750 if (*ecx
& CPUID_INTC_ECX_AES
)
2751 hwcap_flags
|= AV_386_AES
;
2752 if (*ecx
& CPUID_INTC_ECX_PCLMULQDQ
)
2753 hwcap_flags
|= AV_386_PCLMULQDQ
;
2754 if ((*ecx
& CPUID_INTC_ECX_XSAVE
) &&
2755 (*ecx
& CPUID_INTC_ECX_OSXSAVE
)) {
2756 hwcap_flags
|= AV_386_XSAVE
;
2758 if (*ecx
& CPUID_INTC_ECX_AVX
) {
2759 hwcap_flags
|= AV_386_AVX
;
2760 if (*ecx
& CPUID_INTC_ECX_F16C
)
2761 hwcap_flags_2
|= AV_386_2_F16C
;
2762 if (*ecx
& CPUID_INTC_ECX_FMA
)
2763 hwcap_flags_2
|= AV_386_2_FMA
;
2764 if (*ebx
& CPUID_INTC_EBX_7_0_BMI1
)
2765 hwcap_flags_2
|= AV_386_2_BMI1
;
2766 if (*ebx
& CPUID_INTC_EBX_7_0_BMI2
)
2767 hwcap_flags_2
|= AV_386_2_BMI2
;
2768 if (*ebx
& CPUID_INTC_EBX_7_0_AVX2
)
2769 hwcap_flags_2
|= AV_386_2_AVX2
;
2772 if (*ecx
& CPUID_INTC_ECX_VMX
)
2773 hwcap_flags
|= AV_386_VMX
;
2774 if (*ecx
& CPUID_INTC_ECX_POPCNT
)
2775 hwcap_flags
|= AV_386_POPCNT
;
2776 if (*edx
& CPUID_INTC_EDX_FPU
)
2777 hwcap_flags
|= AV_386_FPU
;
2778 if (*edx
& CPUID_INTC_EDX_MMX
)
2779 hwcap_flags
|= AV_386_MMX
;
2781 if (*edx
& CPUID_INTC_EDX_TSC
)
2782 hwcap_flags
|= AV_386_TSC
;
2783 if (*edx
& CPUID_INTC_EDX_CX8
)
2784 hwcap_flags
|= AV_386_CX8
;
2785 if (*edx
& CPUID_INTC_EDX_CMOV
)
2786 hwcap_flags
|= AV_386_CMOV
;
2787 if (*ecx
& CPUID_INTC_ECX_CX16
)
2788 hwcap_flags
|= AV_386_CX16
;
2790 if (*ecx
& CPUID_INTC_ECX_RDRAND
)
2791 hwcap_flags_2
|= AV_386_2_RDRAND
;
2794 if (cpi
->cpi_xmaxeax
< 0x80000001)
2797 switch (cpi
->cpi_vendor
) {
2798 struct cpuid_regs cp
;
2799 uint32_t *edx
, *ecx
;
2801 case X86_VENDOR_Intel
:
2803 * Seems like Intel duplicated what we necessary
2804 * here to make the initial crop of 64-bit OS's work.
2805 * Hopefully, those are the only "extended" bits
2810 case X86_VENDOR_AMD
:
2811 edx
= &cpi
->cpi_support
[AMD_EDX_FEATURES
];
2812 ecx
= &cpi
->cpi_support
[AMD_ECX_FEATURES
];
2814 *edx
= CPI_FEATURES_XTD_EDX(cpi
);
2815 *ecx
= CPI_FEATURES_XTD_ECX(cpi
);
2818 * [these features require explicit kernel support]
2820 switch (cpi
->cpi_vendor
) {
2821 case X86_VENDOR_Intel
:
2822 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
2823 *edx
&= ~CPUID_AMD_EDX_TSCP
;
2826 case X86_VENDOR_AMD
:
2827 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
2828 *edx
&= ~CPUID_AMD_EDX_TSCP
;
2829 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4A
))
2830 *ecx
&= ~CPUID_AMD_ECX_SSE4A
;
2838 * [no explicit support required beyond
2839 * x87 fp context and exception handlers]
2842 *edx
&= ~(CPUID_AMD_EDX_MMXamd
|
2843 CPUID_AMD_EDX_3DNow
| CPUID_AMD_EDX_3DNowx
);
2845 if (!is_x86_feature(x86_featureset
, X86FSET_NX
))
2846 *edx
&= ~CPUID_AMD_EDX_NX
;
2847 #if !defined(__amd64)
2848 *edx
&= ~CPUID_AMD_EDX_LM
;
2851 * Now map the supported feature vector to
2852 * things that we think userland will care about.
2854 #if defined(__amd64)
2855 if (*edx
& CPUID_AMD_EDX_SYSC
)
2856 hwcap_flags
|= AV_386_AMD_SYSC
;
2858 if (*edx
& CPUID_AMD_EDX_MMXamd
)
2859 hwcap_flags
|= AV_386_AMD_MMX
;
2860 if (*edx
& CPUID_AMD_EDX_3DNow
)
2861 hwcap_flags
|= AV_386_AMD_3DNow
;
2862 if (*edx
& CPUID_AMD_EDX_3DNowx
)
2863 hwcap_flags
|= AV_386_AMD_3DNowx
;
2864 if (*ecx
& CPUID_AMD_ECX_SVM
)
2865 hwcap_flags
|= AV_386_AMD_SVM
;
2867 switch (cpi
->cpi_vendor
) {
2868 case X86_VENDOR_AMD
:
2869 if (*edx
& CPUID_AMD_EDX_TSCP
)
2870 hwcap_flags
|= AV_386_TSCP
;
2871 if (*ecx
& CPUID_AMD_ECX_AHF64
)
2872 hwcap_flags
|= AV_386_AHF
;
2873 if (*ecx
& CPUID_AMD_ECX_SSE4A
)
2874 hwcap_flags
|= AV_386_AMD_SSE4A
;
2875 if (*ecx
& CPUID_AMD_ECX_LZCNT
)
2876 hwcap_flags
|= AV_386_AMD_LZCNT
;
2879 case X86_VENDOR_Intel
:
2880 if (*edx
& CPUID_AMD_EDX_TSCP
)
2881 hwcap_flags
|= AV_386_TSCP
;
2884 * Intel uses a different bit in the same word.
2886 if (*ecx
& CPUID_INTC_ECX_AHF64
)
2887 hwcap_flags
|= AV_386_AHF
;
2896 cp
.cp_eax
= 0x80860001;
2897 (void) __cpuid_insn(&cp
);
2898 cpi
->cpi_support
[TM_EDX_FEATURES
] = cp
.cp_edx
;
2907 if (hwcap_out
!= NULL
) {
2908 hwcap_out
[0] = hwcap_flags
;
2909 hwcap_out
[1] = hwcap_flags_2
;
2915 * Simulate the cpuid instruction using the data we previously
2916 * captured about this CPU. We try our best to return the truth
2917 * about the hardware, independently of kernel support.
2920 cpuid_insn(cpu_t
*cpu
, struct cpuid_regs
*cp
)
2922 struct cpuid_info
*cpi
;
2923 struct cpuid_regs
*xcp
;
2927 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2929 ASSERT(cpuid_checkpass(cpu
, 3));
2932 * CPUID data is cached in two separate places: cpi_std for standard
2933 * CPUID functions, and cpi_extd for extended CPUID functions.
2935 if (cp
->cp_eax
<= cpi
->cpi_maxeax
&& cp
->cp_eax
< NMAX_CPI_STD
)
2936 xcp
= &cpi
->cpi_std
[cp
->cp_eax
];
2937 else if (cp
->cp_eax
>= 0x80000000 && cp
->cp_eax
<= cpi
->cpi_xmaxeax
&&
2938 cp
->cp_eax
< 0x80000000 + NMAX_CPI_EXTD
)
2939 xcp
= &cpi
->cpi_extd
[cp
->cp_eax
- 0x80000000];
2942 * The caller is asking for data from an input parameter which
2943 * the kernel has not cached. In this case we go fetch from
2944 * the hardware and return the data directly to the user.
2946 return (__cpuid_insn(cp
));
2948 cp
->cp_eax
= xcp
->cp_eax
;
2949 cp
->cp_ebx
= xcp
->cp_ebx
;
2950 cp
->cp_ecx
= xcp
->cp_ecx
;
2951 cp
->cp_edx
= xcp
->cp_edx
;
2952 return (cp
->cp_eax
);
2956 cpuid_checkpass(cpu_t
*cpu
, int pass
)
2958 return (cpu
!= NULL
&& cpu
->cpu_m
.mcpu_cpi
!= NULL
&&
2959 cpu
->cpu_m
.mcpu_cpi
->cpi_pass
>= pass
);
2963 cpuid_getbrandstr(cpu_t
*cpu
, char *s
, size_t n
)
2965 ASSERT(cpuid_checkpass(cpu
, 3));
2967 return (snprintf(s
, n
, "%s", cpu
->cpu_m
.mcpu_cpi
->cpi_brandstr
));
2971 cpuid_is_cmt(cpu_t
*cpu
)
2976 ASSERT(cpuid_checkpass(cpu
, 1));
2978 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
>= 0);
2982 * AMD and Intel both implement the 64-bit variant of the syscall
2983 * instruction (syscallq), so if there's -any- support for syscall,
2984 * cpuid currently says "yes, we support this".
2986 * However, Intel decided to -not- implement the 32-bit variant of the
2987 * syscall instruction, so we provide a predicate to allow our caller
2988 * to test that subtlety here.
2990 * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor,
2991 * even in the case where the hardware would in fact support it.
2995 cpuid_syscall32_insn(cpu_t
*cpu
)
2997 ASSERT(cpuid_checkpass((cpu
== NULL
? CPU
: cpu
), 1));
3005 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3007 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
3008 cpi
->cpi_xmaxeax
>= 0x80000001 &&
3009 (CPI_FEATURES_XTD_EDX(cpi
) & CPUID_AMD_EDX_SYSC
))
3017 cpuid_getidstr(cpu_t
*cpu
, char *s
, size_t n
)
3019 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3021 static const char fmt
[] =
3022 "x86 (%s %X family %d model %d step %d clock %d MHz)";
3023 static const char fmt_ht
[] =
3024 "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
3026 ASSERT(cpuid_checkpass(cpu
, 1));
3028 if (cpuid_is_cmt(cpu
))
3029 return (snprintf(s
, n
, fmt_ht
, cpi
->cpi_chipid
,
3030 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
3031 cpi
->cpi_family
, cpi
->cpi_model
,
3032 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
3033 return (snprintf(s
, n
, fmt
,
3034 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
3035 cpi
->cpi_family
, cpi
->cpi_model
,
3036 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
3040 cpuid_getvendorstr(cpu_t
*cpu
)
3042 ASSERT(cpuid_checkpass(cpu
, 1));
3043 return ((const char *)cpu
->cpu_m
.mcpu_cpi
->cpi_vendorstr
);
3047 cpuid_getvendor(cpu_t
*cpu
)
3049 ASSERT(cpuid_checkpass(cpu
, 1));
3050 return (cpu
->cpu_m
.mcpu_cpi
->cpi_vendor
);
3054 cpuid_getfamily(cpu_t
*cpu
)
3056 ASSERT(cpuid_checkpass(cpu
, 1));
3057 return (cpu
->cpu_m
.mcpu_cpi
->cpi_family
);
3061 cpuid_getmodel(cpu_t
*cpu
)
3063 ASSERT(cpuid_checkpass(cpu
, 1));
3064 return (cpu
->cpu_m
.mcpu_cpi
->cpi_model
);
3068 cpuid_get_ncpu_per_chip(cpu_t
*cpu
)
3070 ASSERT(cpuid_checkpass(cpu
, 1));
3071 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_per_chip
);
3075 cpuid_get_ncore_per_chip(cpu_t
*cpu
)
3077 ASSERT(cpuid_checkpass(cpu
, 1));
3078 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncore_per_chip
);
3082 cpuid_get_ncpu_sharing_last_cache(cpu_t
*cpu
)
3084 ASSERT(cpuid_checkpass(cpu
, 2));
3085 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_shr_last_cache
);
3089 cpuid_get_last_lvl_cacheid(cpu_t
*cpu
)
3091 ASSERT(cpuid_checkpass(cpu
, 2));
3092 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3096 cpuid_getstep(cpu_t
*cpu
)
3098 ASSERT(cpuid_checkpass(cpu
, 1));
3099 return (cpu
->cpu_m
.mcpu_cpi
->cpi_step
);
3103 cpuid_getsig(struct cpu
*cpu
)
3105 ASSERT(cpuid_checkpass(cpu
, 1));
3106 return (cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_eax
);
3110 cpuid_getchiprev(struct cpu
*cpu
)
3112 ASSERT(cpuid_checkpass(cpu
, 1));
3113 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprev
);
3117 cpuid_getchiprevstr(struct cpu
*cpu
)
3119 ASSERT(cpuid_checkpass(cpu
, 1));
3120 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprevstr
);
3124 cpuid_getsockettype(struct cpu
*cpu
)
3126 ASSERT(cpuid_checkpass(cpu
, 1));
3127 return (cpu
->cpu_m
.mcpu_cpi
->cpi_socket
);
3131 cpuid_getsocketstr(cpu_t
*cpu
)
3133 static const char *socketstr
= NULL
;
3134 struct cpuid_info
*cpi
;
3136 ASSERT(cpuid_checkpass(cpu
, 1));
3137 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3139 /* Assume that socket types are the same across the system */
3140 if (socketstr
== NULL
)
3141 socketstr
= _cpuid_sktstr(cpi
->cpi_vendor
, cpi
->cpi_family
,
3142 cpi
->cpi_model
, cpi
->cpi_step
);
3149 cpuid_get_chipid(cpu_t
*cpu
)
3151 ASSERT(cpuid_checkpass(cpu
, 1));
3153 if (cpuid_is_cmt(cpu
))
3154 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
);
3155 return (cpu
->cpu_id
);
3159 cpuid_get_coreid(cpu_t
*cpu
)
3161 ASSERT(cpuid_checkpass(cpu
, 1));
3162 return (cpu
->cpu_m
.mcpu_cpi
->cpi_coreid
);
3166 cpuid_get_pkgcoreid(cpu_t
*cpu
)
3168 ASSERT(cpuid_checkpass(cpu
, 1));
3169 return (cpu
->cpu_m
.mcpu_cpi
->cpi_pkgcoreid
);
3173 cpuid_get_clogid(cpu_t
*cpu
)
3175 ASSERT(cpuid_checkpass(cpu
, 1));
3176 return (cpu
->cpu_m
.mcpu_cpi
->cpi_clogid
);
3180 cpuid_get_cacheid(cpu_t
*cpu
)
3182 ASSERT(cpuid_checkpass(cpu
, 1));
3183 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3187 cpuid_get_procnodeid(cpu_t
*cpu
)
3189 ASSERT(cpuid_checkpass(cpu
, 1));
3190 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodeid
);
3194 cpuid_get_procnodes_per_pkg(cpu_t
*cpu
)
3196 ASSERT(cpuid_checkpass(cpu
, 1));
3197 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodes_per_pkg
);
3201 cpuid_get_compunitid(cpu_t
*cpu
)
3203 ASSERT(cpuid_checkpass(cpu
, 1));
3204 return (cpu
->cpu_m
.mcpu_cpi
->cpi_compunitid
);
3208 cpuid_get_cores_per_compunit(cpu_t
*cpu
)
3210 ASSERT(cpuid_checkpass(cpu
, 1));
3211 return (cpu
->cpu_m
.mcpu_cpi
->cpi_cores_per_compunit
);
3216 cpuid_have_cr8access(cpu_t
*cpu
)
3218 #if defined(__amd64)
3221 struct cpuid_info
*cpi
;
3223 ASSERT(cpu
!= NULL
);
3224 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3225 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&& cpi
->cpi_maxeax
>= 1 &&
3226 (CPI_FEATURES_XTD_ECX(cpi
) & CPUID_AMD_ECX_CR8D
) != 0)
3233 cpuid_get_apicid(cpu_t
*cpu
)
3235 ASSERT(cpuid_checkpass(cpu
, 1));
3236 if (cpu
->cpu_m
.mcpu_cpi
->cpi_maxeax
< 1) {
3237 return (UINT32_MAX
);
3239 return (cpu
->cpu_m
.mcpu_cpi
->cpi_apicid
);
3244 cpuid_get_addrsize(cpu_t
*cpu
, uint_t
*pabits
, uint_t
*vabits
)
3246 struct cpuid_info
*cpi
;
3250 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3252 ASSERT(cpuid_checkpass(cpu
, 1));
3255 *pabits
= cpi
->cpi_pabits
;
3257 *vabits
= cpi
->cpi_vabits
;
3261 * Returns the number of data TLB entries for a corresponding
3262 * pagesize. If it can't be computed, or isn't known, the
3263 * routine returns zero. If you ask about an architecturally
3264 * impossible pagesize, the routine will panic (so that the
3265 * hat implementor knows that things are inconsistent.)
3268 cpuid_get_dtlb_nent(cpu_t
*cpu
, size_t pagesize
)
3270 struct cpuid_info
*cpi
;
3271 uint_t dtlb_nent
= 0;
3275 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3277 ASSERT(cpuid_checkpass(cpu
, 1));
3280 * Check the L2 TLB info
3282 if (cpi
->cpi_xmaxeax
>= 0x80000006) {
3283 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[6];
3289 * All zero in the top 16 bits of the register
3290 * indicates a unified TLB. Size is in low 16 bits.
3292 if ((cp
->cp_ebx
& 0xffff0000) == 0)
3293 dtlb_nent
= cp
->cp_ebx
& 0x0000ffff;
3295 dtlb_nent
= BITX(cp
->cp_ebx
, 27, 16);
3298 case 2 * 1024 * 1024:
3299 if ((cp
->cp_eax
& 0xffff0000) == 0)
3300 dtlb_nent
= cp
->cp_eax
& 0x0000ffff;
3302 dtlb_nent
= BITX(cp
->cp_eax
, 27, 16);
3306 panic("unknown L2 pagesize");
3315 * No L2 TLB support for this size, try L1.
3317 if (cpi
->cpi_xmaxeax
>= 0x80000005) {
3318 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[5];
3322 dtlb_nent
= BITX(cp
->cp_ebx
, 23, 16);
3324 case 2 * 1024 * 1024:
3325 dtlb_nent
= BITX(cp
->cp_eax
, 23, 16);
3328 panic("unknown L1 d-TLB pagesize");
3337 * Return 0 if the erratum is not present or not applicable, positive
3338 * if it is, and negative if the status of the erratum is unknown.
3340 * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3341 * Processors" #25759, Rev 3.57, August 2005
3344 cpuid_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3346 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3350 * Bail out if this CPU isn't an AMD CPU, or if it's
3351 * a legacy (32-bit) AMD CPU.
3353 if (cpi
->cpi_vendor
!= X86_VENDOR_AMD
||
3354 cpi
->cpi_family
== 4 || cpi
->cpi_family
== 5 ||
3355 cpi
->cpi_family
== 6)
3359 eax
= cpi
->cpi_std
[1].cp_eax
;
3361 #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50)
3362 #define SH_B3(eax) (eax == 0xf51)
3363 #define B(eax) (SH_B0(eax) || SH_B3(eax))
3365 #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58)
3367 #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3368 #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3369 #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2)
3370 #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3372 #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3373 #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0)
3374 #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0)
3375 #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3377 #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3378 #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */
3379 #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0)
3380 #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71)
3381 #define BH_E4(eax) (eax == 0x20fb1)
3382 #define SH_E5(eax) (eax == 0x20f42)
3383 #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2)
3384 #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32)
3385 #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3386 SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3387 DH_E6(eax) || JH_E6(eax))
3389 #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3390 #define DR_B0(eax) (eax == 0x100f20)
3391 #define DR_B1(eax) (eax == 0x100f21)
3392 #define DR_BA(eax) (eax == 0x100f2a)
3393 #define DR_B2(eax) (eax == 0x100f22)
3394 #define DR_B3(eax) (eax == 0x100f23)
3395 #define RB_C0(eax) (eax == 0x100f40)
3399 return (cpi
->cpi_family
< 0x10);
3400 case 51: /* what does the asterisk mean? */
3401 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3405 return (cpi
->cpi_family
<= 0x11);
3409 return (cpi
->cpi_family
<= 0x11);
3422 return (SH_B0(eax
));
3426 return (cpi
->cpi_family
< 0x10);
3430 return (cpi
->cpi_family
<= 0x11);
3432 return (B(eax
) || SH_C0(eax
));
3434 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3440 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3442 return (cpi
->cpi_family
< 0x10);
3444 return (SH_C0(eax
) || CG(eax
));
3446 #if !defined(__amd64)
3449 return (B(eax
) || SH_C0(eax
));
3452 return (cpi
->cpi_family
< 0x10);
3454 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3457 return (B(eax
) || SH_C0(eax
));
3459 return (SH_C0(eax
));
3461 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3463 #if !defined(__amd64)
3466 return (B(eax
) || SH_C0(eax
));
3469 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3472 return (SH_C0(eax
) || CG(eax
));
3474 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3476 return (B(eax
) || SH_C0(eax
));
3479 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3481 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3485 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3487 return (DH_CG(eax
));
3489 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3491 return (D0(eax
) || EX(eax
));
3495 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3497 return (eax
== 0x20fc0);
3499 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3501 return (SH_E0(eax
) || JH_E1(eax
));
3503 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3505 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3507 return (SH_E0(eax
) || JH_E1(eax
) || SH_E4(eax
) || BH_E4(eax
) ||
3510 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3512 return (cpi
->cpi_family
< 0x10 || cpi
->cpi_family
== 0x11);
3514 return (JH_E1(eax
) || BH_E4(eax
) || JH_E6(eax
));
3516 return (cpi
->cpi_family
< 0x10);
3519 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3520 * if this is a K8 family or newer processor
3522 if (CPI_FAMILY(cpi
) == 0xf) {
3523 struct cpuid_regs regs
;
3524 regs
.cp_eax
= 0x80000007;
3525 (void) __cpuid_insn(®s
);
3526 return (!(regs
.cp_edx
& 0x100));
3530 return (((((eax
>> 12) & 0xff00) + (eax
& 0xf00)) |
3531 (((eax
>> 4) & 0xf) | ((eax
>> 12) & 0xf0))) < 0xf40);
3535 * check for processors (pre-Shanghai) that do not provide
3536 * optimal management of 1gb ptes in its tlb.
3538 return (cpi
->cpi_family
== 0x10 && cpi
->cpi_model
< 4);
3541 return (DR_AX(eax
) || DR_B0(eax
) || DR_B1(eax
) || DR_BA(eax
) ||
3542 DR_B2(eax
) || RB_C0(eax
));
3545 #if defined(__amd64)
3546 return (cpi
->cpi_family
== 0x10 || cpi
->cpi_family
== 0x12);
3558 * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3559 * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3562 osvw_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3564 struct cpuid_info
*cpi
;
3566 static int osvwfeature
= -1;
3567 uint64_t osvwlength
;
3570 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3572 /* confirm OSVW supported */
3573 if (osvwfeature
== -1) {
3574 osvwfeature
= cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
;
3576 /* assert that osvw feature setting is consistent on all cpus */
3577 ASSERT(osvwfeature
==
3578 (cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
));
3583 osvwlength
= rdmsr(MSR_AMD_OSVW_ID_LEN
) & OSVW_ID_LEN_MASK
;
3586 case 298: /* osvwid is 0 */
3588 if (osvwlength
<= (uint64_t)osvwid
) {
3589 /* osvwid 0 is unknown */
3594 * Check the OSVW STATUS MSR to determine the state
3595 * of the erratum where:
3597 * 1 - BIOS has applied the workaround when BIOS
3598 * workaround is available. (Or for other errata,
3599 * OS workaround is required.)
3600 * For a value of 1, caller will confirm that the
3601 * erratum 298 workaround has indeed been applied by BIOS.
3603 * A 1 may be set in cpus that have a HW fix
3604 * in a mixed cpu system. Regarding erratum 298:
3605 * In a multiprocessor platform, the workaround above
3606 * should be applied to all processors regardless of
3607 * silicon revision when an affected processor is
3611 return (rdmsr(MSR_AMD_OSVW_STATUS
+
3612 (osvwid
/ OSVW_ID_CNT_PER_MSR
)) &
3613 (1ULL << (osvwid
% OSVW_ID_CNT_PER_MSR
)));
3620 static const char assoc_str
[] = "associativity";
3621 static const char line_str
[] = "line-size";
3622 static const char size_str
[] = "size";
3625 add_cache_prop(dev_info_t
*devi
, const char *label
, const char *type
,
3631 * ndi_prop_update_int() is used because it is desirable for
3632 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3634 if (snprintf(buf
, sizeof (buf
), "%s-%s", label
, type
) < sizeof (buf
))
3635 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, devi
, buf
, val
);
3639 * Intel-style cache/tlb description
3641 * Standard cpuid level 2 gives a randomly ordered
3642 * selection of tags that index into a table that describes
3643 * cache and tlb properties.
3646 static const char l1_icache_str
[] = "l1-icache";
3647 static const char l1_dcache_str
[] = "l1-dcache";
3648 static const char l2_cache_str
[] = "l2-cache";
3649 static const char l3_cache_str
[] = "l3-cache";
3650 static const char itlb4k_str
[] = "itlb-4K";
3651 static const char dtlb4k_str
[] = "dtlb-4K";
3652 static const char itlb2M_str
[] = "itlb-2M";
3653 static const char itlb4M_str
[] = "itlb-4M";
3654 static const char dtlb4M_str
[] = "dtlb-4M";
3655 static const char dtlb24_str
[] = "dtlb0-2M-4M";
3656 static const char itlb424_str
[] = "itlb-4K-2M-4M";
3657 static const char itlb24_str
[] = "itlb-2M-4M";
3658 static const char dtlb44_str
[] = "dtlb-4K-4M";
3659 static const char sl1_dcache_str
[] = "sectored-l1-dcache";
3660 static const char sl2_cache_str
[] = "sectored-l2-cache";
3661 static const char itrace_str
[] = "itrace-cache";
3662 static const char sl3_cache_str
[] = "sectored-l3-cache";
3663 static const char sh_l2_tlb4k_str
[] = "shared-l2-tlb-4k";
3665 static const struct cachetab
{
3668 uint16_t ct_line_size
;
3670 const char *ct_label
;
3673 * maintain descending order!
3675 * Codes ignored - Reason
3676 * ----------------------
3677 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3678 * f0H/f1H - Currently we do not interpret prefetch size by design
3680 { 0xe4, 16, 64, 8*1024*1024, l3_cache_str
},
3681 { 0xe3, 16, 64, 4*1024*1024, l3_cache_str
},
3682 { 0xe2, 16, 64, 2*1024*1024, l3_cache_str
},
3683 { 0xde, 12, 64, 6*1024*1024, l3_cache_str
},
3684 { 0xdd, 12, 64, 3*1024*1024, l3_cache_str
},
3685 { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str
},
3686 { 0xd8, 8, 64, 4*1024*1024, l3_cache_str
},
3687 { 0xd7, 8, 64, 2*1024*1024, l3_cache_str
},
3688 { 0xd6, 8, 64, 1*1024*1024, l3_cache_str
},
3689 { 0xd2, 4, 64, 2*1024*1024, l3_cache_str
},
3690 { 0xd1, 4, 64, 1*1024*1024, l3_cache_str
},
3691 { 0xd0, 4, 64, 512*1024, l3_cache_str
},
3692 { 0xca, 4, 0, 512, sh_l2_tlb4k_str
},
3693 { 0xc0, 4, 0, 8, dtlb44_str
},
3694 { 0xba, 4, 0, 64, dtlb4k_str
},
3695 { 0xb4, 4, 0, 256, dtlb4k_str
},
3696 { 0xb3, 4, 0, 128, dtlb4k_str
},
3697 { 0xb2, 4, 0, 64, itlb4k_str
},
3698 { 0xb0, 4, 0, 128, itlb4k_str
},
3699 { 0x87, 8, 64, 1024*1024, l2_cache_str
},
3700 { 0x86, 4, 64, 512*1024, l2_cache_str
},
3701 { 0x85, 8, 32, 2*1024*1024, l2_cache_str
},
3702 { 0x84, 8, 32, 1024*1024, l2_cache_str
},
3703 { 0x83, 8, 32, 512*1024, l2_cache_str
},
3704 { 0x82, 8, 32, 256*1024, l2_cache_str
},
3705 { 0x80, 8, 64, 512*1024, l2_cache_str
},
3706 { 0x7f, 2, 64, 512*1024, l2_cache_str
},
3707 { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str
},
3708 { 0x7c, 8, 64, 1024*1024, sl2_cache_str
},
3709 { 0x7b, 8, 64, 512*1024, sl2_cache_str
},
3710 { 0x7a, 8, 64, 256*1024, sl2_cache_str
},
3711 { 0x79, 8, 64, 128*1024, sl2_cache_str
},
3712 { 0x78, 8, 64, 1024*1024, l2_cache_str
},
3713 { 0x73, 8, 0, 64*1024, itrace_str
},
3714 { 0x72, 8, 0, 32*1024, itrace_str
},
3715 { 0x71, 8, 0, 16*1024, itrace_str
},
3716 { 0x70, 8, 0, 12*1024, itrace_str
},
3717 { 0x68, 4, 64, 32*1024, sl1_dcache_str
},
3718 { 0x67, 4, 64, 16*1024, sl1_dcache_str
},
3719 { 0x66, 4, 64, 8*1024, sl1_dcache_str
},
3720 { 0x60, 8, 64, 16*1024, sl1_dcache_str
},
3721 { 0x5d, 0, 0, 256, dtlb44_str
},
3722 { 0x5c, 0, 0, 128, dtlb44_str
},
3723 { 0x5b, 0, 0, 64, dtlb44_str
},
3724 { 0x5a, 4, 0, 32, dtlb24_str
},
3725 { 0x59, 0, 0, 16, dtlb4k_str
},
3726 { 0x57, 4, 0, 16, dtlb4k_str
},
3727 { 0x56, 4, 0, 16, dtlb4M_str
},
3728 { 0x55, 0, 0, 7, itlb24_str
},
3729 { 0x52, 0, 0, 256, itlb424_str
},
3730 { 0x51, 0, 0, 128, itlb424_str
},
3731 { 0x50, 0, 0, 64, itlb424_str
},
3732 { 0x4f, 0, 0, 32, itlb4k_str
},
3733 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str
},
3734 { 0x4d, 16, 64, 16*1024*1024, l3_cache_str
},
3735 { 0x4c, 12, 64, 12*1024*1024, l3_cache_str
},
3736 { 0x4b, 16, 64, 8*1024*1024, l3_cache_str
},
3737 { 0x4a, 12, 64, 6*1024*1024, l3_cache_str
},
3738 { 0x49, 16, 64, 4*1024*1024, l3_cache_str
},
3739 { 0x48, 12, 64, 3*1024*1024, l2_cache_str
},
3740 { 0x47, 8, 64, 8*1024*1024, l3_cache_str
},
3741 { 0x46, 4, 64, 4*1024*1024, l3_cache_str
},
3742 { 0x45, 4, 32, 2*1024*1024, l2_cache_str
},
3743 { 0x44, 4, 32, 1024*1024, l2_cache_str
},
3744 { 0x43, 4, 32, 512*1024, l2_cache_str
},
3745 { 0x42, 4, 32, 256*1024, l2_cache_str
},
3746 { 0x41, 4, 32, 128*1024, l2_cache_str
},
3747 { 0x3e, 4, 64, 512*1024, sl2_cache_str
},
3748 { 0x3d, 6, 64, 384*1024, sl2_cache_str
},
3749 { 0x3c, 4, 64, 256*1024, sl2_cache_str
},
3750 { 0x3b, 2, 64, 128*1024, sl2_cache_str
},
3751 { 0x3a, 6, 64, 192*1024, sl2_cache_str
},
3752 { 0x39, 4, 64, 128*1024, sl2_cache_str
},
3753 { 0x30, 8, 64, 32*1024, l1_icache_str
},
3754 { 0x2c, 8, 64, 32*1024, l1_dcache_str
},
3755 { 0x29, 8, 64, 4096*1024, sl3_cache_str
},
3756 { 0x25, 8, 64, 2048*1024, sl3_cache_str
},
3757 { 0x23, 8, 64, 1024*1024, sl3_cache_str
},
3758 { 0x22, 4, 64, 512*1024, sl3_cache_str
},
3759 { 0x0e, 6, 64, 24*1024, l1_dcache_str
},
3760 { 0x0d, 4, 32, 16*1024, l1_dcache_str
},
3761 { 0x0c, 4, 32, 16*1024, l1_dcache_str
},
3762 { 0x0b, 4, 0, 4, itlb4M_str
},
3763 { 0x0a, 2, 32, 8*1024, l1_dcache_str
},
3764 { 0x08, 4, 32, 16*1024, l1_icache_str
},
3765 { 0x06, 4, 32, 8*1024, l1_icache_str
},
3766 { 0x05, 4, 0, 32, dtlb4M_str
},
3767 { 0x04, 4, 0, 8, dtlb4M_str
},
3768 { 0x03, 4, 0, 64, dtlb4k_str
},
3769 { 0x02, 4, 0, 2, itlb4M_str
},
3770 { 0x01, 4, 0, 32, itlb4k_str
},
3774 static const struct cachetab cyrix_ctab
[] = {
3775 { 0x70, 4, 0, 32, "tlb-4K" },
3776 { 0x80, 4, 16, 16*1024, "l1-cache" },
3781 * Search a cache table for a matching entry
3783 static const struct cachetab
*
3784 find_cacheent(const struct cachetab
*ct
, uint_t code
)
3787 for (; ct
->ct_code
!= 0; ct
++)
3788 if (ct
->ct_code
<= code
)
3790 if (ct
->ct_code
== code
)
3797 * Populate cachetab entry with L2 or L3 cache-information using
3798 * cpuid function 4. This function is called from intel_walk_cacheinfo()
3799 * when descriptor 0x49 is encountered. It returns 0 if no such cache
3800 * information is found.
3803 intel_cpuid_4_cache_info(struct cachetab
*ct
, struct cpuid_info
*cpi
)
3808 for (i
= 0; i
< cpi
->cpi_std_4_size
; i
++) {
3809 level
= CPI_CACHE_LVL(cpi
->cpi_std_4
[i
]);
3811 if (level
== 2 || level
== 3) {
3812 ct
->ct_assoc
= CPI_CACHE_WAYS(cpi
->cpi_std_4
[i
]) + 1;
3814 CPI_CACHE_COH_LN_SZ(cpi
->cpi_std_4
[i
]) + 1;
3815 ct
->ct_size
= ct
->ct_assoc
*
3816 (CPI_CACHE_PARTS(cpi
->cpi_std_4
[i
]) + 1) *
3818 (cpi
->cpi_std_4
[i
]->cp_ecx
+ 1);
3821 ct
->ct_label
= l2_cache_str
;
3822 } else if (level
== 3) {
3823 ct
->ct_label
= l3_cache_str
;
3833 * Walk the cacheinfo descriptor, applying 'func' to every valid element
3834 * The walk is terminated if the walker returns non-zero.
3837 intel_walk_cacheinfo(struct cpuid_info
*cpi
,
3838 void *arg
, int (*func
)(void *, const struct cachetab
*))
3840 const struct cachetab
*ct
;
3841 struct cachetab des_49_ct
, des_b1_ct
;
3845 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
3847 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
3849 * For overloaded descriptor 0x49 we use cpuid function 4
3850 * if supported by the current processor, to create
3851 * cache information.
3852 * For overloaded descriptor 0xb1 we use X86_PAE flag
3853 * to disambiguate the cache information.
3855 if (*dp
== 0x49 && cpi
->cpi_maxeax
>= 0x4 &&
3856 intel_cpuid_4_cache_info(&des_49_ct
, cpi
) == 1) {
3858 } else if (*dp
== 0xb1) {
3859 des_b1_ct
.ct_code
= 0xb1;
3860 des_b1_ct
.ct_assoc
= 4;
3861 des_b1_ct
.ct_line_size
= 0;
3862 if (is_x86_feature(x86_featureset
, X86FSET_PAE
)) {
3863 des_b1_ct
.ct_size
= 8;
3864 des_b1_ct
.ct_label
= itlb2M_str
;
3866 des_b1_ct
.ct_size
= 4;
3867 des_b1_ct
.ct_label
= itlb4M_str
;
3871 if ((ct
= find_cacheent(intel_ctab
, *dp
)) == NULL
) {
3876 if (func(arg
, ct
) != 0) {
3883 * (Like the Intel one, except for Cyrix CPUs)
3886 cyrix_walk_cacheinfo(struct cpuid_info
*cpi
,
3887 void *arg
, int (*func
)(void *, const struct cachetab
*))
3889 const struct cachetab
*ct
;
3893 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
3895 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
3897 * Search Cyrix-specific descriptor table first ..
3899 if ((ct
= find_cacheent(cyrix_ctab
, *dp
)) != NULL
) {
3900 if (func(arg
, ct
) != 0)
3905 * .. else fall back to the Intel one
3907 if ((ct
= find_cacheent(intel_ctab
, *dp
)) != NULL
) {
3908 if (func(arg
, ct
) != 0)
3916 * A cacheinfo walker that adds associativity, line-size, and size properties
3917 * to the devinfo node it is passed as an argument.
3920 add_cacheent_props(void *arg
, const struct cachetab
*ct
)
3922 dev_info_t
*devi
= arg
;
3924 add_cache_prop(devi
, ct
->ct_label
, assoc_str
, ct
->ct_assoc
);
3925 if (ct
->ct_line_size
!= 0)
3926 add_cache_prop(devi
, ct
->ct_label
, line_str
,
3928 add_cache_prop(devi
, ct
->ct_label
, size_str
, ct
->ct_size
);
3933 static const char fully_assoc
[] = "fully-associative?";
3936 * AMD style cache/tlb description
3938 * Extended functions 5 and 6 directly describe properties of
3939 * tlbs and various cache levels.
3942 add_amd_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
3945 case 0: /* reserved; ignore */
3948 add_cache_prop(devi
, label
, assoc_str
, assoc
);
3951 add_cache_prop(devi
, label
, fully_assoc
, 1);
3957 add_amd_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
3961 add_cache_prop(devi
, label
, size_str
, size
);
3962 add_amd_assoc(devi
, label
, assoc
);
3966 add_amd_cache(dev_info_t
*devi
, const char *label
,
3967 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
3969 if (size
== 0 || line_size
== 0)
3971 add_amd_assoc(devi
, label
, assoc
);
3973 * Most AMD parts have a sectored cache. Multiple cache lines are
3974 * associated with each tag. A sector consists of all cache lines
3975 * associated with a tag. For example, the AMD K6-III has a sector
3976 * size of 2 cache lines per tag.
3978 if (lines_per_tag
!= 0)
3979 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
3980 add_cache_prop(devi
, label
, line_str
, line_size
);
3981 add_cache_prop(devi
, label
, size_str
, size
* 1024);
3985 add_amd_l2_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
3993 add_cache_prop(devi
, label
, assoc_str
, assoc
);
3996 add_cache_prop(devi
, label
, assoc_str
, 8);
3999 add_cache_prop(devi
, label
, assoc_str
, 16);
4002 add_cache_prop(devi
, label
, fully_assoc
, 1);
4004 default: /* reserved; ignore */
4010 add_amd_l2_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
4012 if (size
== 0 || assoc
== 0)
4014 add_amd_l2_assoc(devi
, label
, assoc
);
4015 add_cache_prop(devi
, label
, size_str
, size
);
4019 add_amd_l2_cache(dev_info_t
*devi
, const char *label
,
4020 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
4022 if (size
== 0 || assoc
== 0 || line_size
== 0)
4024 add_amd_l2_assoc(devi
, label
, assoc
);
4025 if (lines_per_tag
!= 0)
4026 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
4027 add_cache_prop(devi
, label
, line_str
, line_size
);
4028 add_cache_prop(devi
, label
, size_str
, size
* 1024);
4032 amd_cache_info(struct cpuid_info
*cpi
, dev_info_t
*devi
)
4034 struct cpuid_regs
*cp
;
4036 if (cpi
->cpi_xmaxeax
< 0x80000005)
4038 cp
= &cpi
->cpi_extd
[5];
4041 * 4M/2M L1 TLB configuration
4043 * We report the size for 2M pages because AMD uses two
4044 * TLB entries for one 4M page.
4046 add_amd_tlb(devi
, "dtlb-2M",
4047 BITX(cp
->cp_eax
, 31, 24), BITX(cp
->cp_eax
, 23, 16));
4048 add_amd_tlb(devi
, "itlb-2M",
4049 BITX(cp
->cp_eax
, 15, 8), BITX(cp
->cp_eax
, 7, 0));
4052 * 4K L1 TLB configuration
4055 switch (cpi
->cpi_vendor
) {
4058 if (cpi
->cpi_family
>= 5) {
4060 * Crusoe processors have 256 TLB entries, but
4061 * cpuid data format constrains them to only
4062 * reporting 255 of them.
4064 if ((nentries
= BITX(cp
->cp_ebx
, 23, 16)) == 255)
4067 * Crusoe processors also have a unified TLB
4069 add_amd_tlb(devi
, "tlb-4K", BITX(cp
->cp_ebx
, 31, 24),
4075 add_amd_tlb(devi
, itlb4k_str
,
4076 BITX(cp
->cp_ebx
, 31, 24), BITX(cp
->cp_ebx
, 23, 16));
4077 add_amd_tlb(devi
, dtlb4k_str
,
4078 BITX(cp
->cp_ebx
, 15, 8), BITX(cp
->cp_ebx
, 7, 0));
4083 * data L1 cache configuration
4086 add_amd_cache(devi
, l1_dcache_str
,
4087 BITX(cp
->cp_ecx
, 31, 24), BITX(cp
->cp_ecx
, 23, 16),
4088 BITX(cp
->cp_ecx
, 15, 8), BITX(cp
->cp_ecx
, 7, 0));
4091 * code L1 cache configuration
4094 add_amd_cache(devi
, l1_icache_str
,
4095 BITX(cp
->cp_edx
, 31, 24), BITX(cp
->cp_edx
, 23, 16),
4096 BITX(cp
->cp_edx
, 15, 8), BITX(cp
->cp_edx
, 7, 0));
4098 if (cpi
->cpi_xmaxeax
< 0x80000006)
4100 cp
= &cpi
->cpi_extd
[6];
4102 /* Check for a unified L2 TLB for large pages */
4104 if (BITX(cp
->cp_eax
, 31, 16) == 0)
4105 add_amd_l2_tlb(devi
, "l2-tlb-2M",
4106 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4108 add_amd_l2_tlb(devi
, "l2-dtlb-2M",
4109 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4110 add_amd_l2_tlb(devi
, "l2-itlb-2M",
4111 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4114 /* Check for a unified L2 TLB for 4K pages */
4116 if (BITX(cp
->cp_ebx
, 31, 16) == 0) {
4117 add_amd_l2_tlb(devi
, "l2-tlb-4K",
4118 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4120 add_amd_l2_tlb(devi
, "l2-dtlb-4K",
4121 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4122 add_amd_l2_tlb(devi
, "l2-itlb-4K",
4123 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4126 add_amd_l2_cache(devi
, l2_cache_str
,
4127 BITX(cp
->cp_ecx
, 31, 16), BITX(cp
->cp_ecx
, 15, 12),
4128 BITX(cp
->cp_ecx
, 11, 8), BITX(cp
->cp_ecx
, 7, 0));
4132 * There are two basic ways that the x86 world describes it cache
4133 * and tlb architecture - Intel's way and AMD's way.
4135 * Return which flavor of cache architecture we should use
4138 x86_which_cacheinfo(struct cpuid_info
*cpi
)
4140 switch (cpi
->cpi_vendor
) {
4141 case X86_VENDOR_Intel
:
4142 if (cpi
->cpi_maxeax
>= 2)
4143 return (X86_VENDOR_Intel
);
4145 case X86_VENDOR_AMD
:
4147 * The K5 model 1 was the first part from AMD that reported
4148 * cache sizes via extended cpuid functions.
4150 if (cpi
->cpi_family
> 5 ||
4151 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
4152 return (X86_VENDOR_AMD
);
4155 if (cpi
->cpi_family
>= 5)
4156 return (X86_VENDOR_AMD
);
4160 * If they have extended CPU data for 0x80000005
4161 * then we assume they have AMD-format cache
4164 * If not, and the vendor happens to be Cyrix,
4165 * then try our-Cyrix specific handler.
4167 * If we're not Cyrix, then assume we're using Intel's
4168 * table-driven format instead.
4170 if (cpi
->cpi_xmaxeax
>= 0x80000005)
4171 return (X86_VENDOR_AMD
);
4172 else if (cpi
->cpi_vendor
== X86_VENDOR_Cyrix
)
4173 return (X86_VENDOR_Cyrix
);
4174 else if (cpi
->cpi_maxeax
>= 2)
4175 return (X86_VENDOR_Intel
);
4182 cpuid_set_cpu_properties(void *dip
, processorid_t cpu_id
,
4183 struct cpuid_info
*cpi
)
4185 dev_info_t
*cpu_devi
;
4188 cpu_devi
= (dev_info_t
*)dip
;
4191 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4192 "device_type", "cpu");
4195 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4198 /* cpu-mhz, and clock-frequency */
4202 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4203 "cpu-mhz", cpu_freq
);
4204 if ((mul
= cpu_freq
* 1000000LL) <= INT_MAX
)
4205 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4206 "clock-frequency", (int)mul
);
4209 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
)) {
4214 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4215 "vendor-id", cpi
->cpi_vendorstr
);
4217 if (cpi
->cpi_maxeax
== 0) {
4222 * family, model, and step
4224 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4225 "family", CPI_FAMILY(cpi
));
4226 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4227 "cpu-model", CPI_MODEL(cpi
));
4228 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4229 "stepping-id", CPI_STEP(cpi
));
4232 switch (cpi
->cpi_vendor
) {
4233 case X86_VENDOR_Intel
:
4241 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4242 "type", CPI_TYPE(cpi
));
4245 switch (cpi
->cpi_vendor
) {
4246 case X86_VENDOR_Intel
:
4247 case X86_VENDOR_AMD
:
4248 create
= cpi
->cpi_family
>= 0xf;
4255 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4256 "ext-family", CPI_FAMILY_XTD(cpi
));
4259 switch (cpi
->cpi_vendor
) {
4260 case X86_VENDOR_Intel
:
4261 create
= IS_EXTENDED_MODEL_INTEL(cpi
);
4263 case X86_VENDOR_AMD
:
4264 create
= CPI_FAMILY(cpi
) == 0xf;
4271 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4272 "ext-model", CPI_MODEL_XTD(cpi
));
4275 switch (cpi
->cpi_vendor
) {
4276 case X86_VENDOR_AMD
:
4278 * AMD K5 model 1 was the first part to support this
4280 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4287 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4288 "generation", BITX((cpi
)->cpi_extd
[1].cp_eax
, 11, 8));
4291 switch (cpi
->cpi_vendor
) {
4292 case X86_VENDOR_Intel
:
4294 * brand id first appeared on Pentium III Xeon model 8,
4295 * and Celeron model 8 processors and Opteron
4297 create
= cpi
->cpi_family
> 6 ||
4298 (cpi
->cpi_family
== 6 && cpi
->cpi_model
>= 8);
4300 case X86_VENDOR_AMD
:
4301 create
= cpi
->cpi_family
>= 0xf;
4307 if (create
&& cpi
->cpi_brandid
!= 0) {
4308 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4309 "brand-id", cpi
->cpi_brandid
);
4312 /* chunks, and apic-id */
4313 switch (cpi
->cpi_vendor
) {
4315 * first available on Pentium IV and Opteron (K8)
4317 case X86_VENDOR_Intel
:
4318 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4320 case X86_VENDOR_AMD
:
4321 create
= cpi
->cpi_family
>= 0xf;
4328 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4329 "chunks", CPI_CHUNKS(cpi
));
4330 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4331 "apic-id", cpi
->cpi_apicid
);
4332 if (cpi
->cpi_chipid
>= 0) {
4333 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4334 "chip#", cpi
->cpi_chipid
);
4335 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4336 "clog#", cpi
->cpi_clogid
);
4340 /* cpuid-features */
4341 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4342 "cpuid-features", CPI_FEATURES_EDX(cpi
));
4345 /* cpuid-features-ecx */
4346 switch (cpi
->cpi_vendor
) {
4347 case X86_VENDOR_Intel
:
4348 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4350 case X86_VENDOR_AMD
:
4351 create
= cpi
->cpi_family
>= 0xf;
4358 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4359 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi
));
4361 /* ext-cpuid-features */
4362 switch (cpi
->cpi_vendor
) {
4363 case X86_VENDOR_Intel
:
4364 case X86_VENDOR_AMD
:
4365 case X86_VENDOR_Cyrix
:
4367 case X86_VENDOR_Centaur
:
4368 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4375 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4376 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi
));
4377 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4378 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi
));
4382 * Brand String first appeared in Intel Pentium IV, AMD K5
4383 * model 1, and Cyrix GXm. On earlier models we try and
4384 * simulate something similar .. so this string should always
4385 * same -something- about the processor, however lame.
4387 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4388 "brand-string", cpi
->cpi_brandstr
);
4391 * Finally, cache and tlb information
4393 switch (x86_which_cacheinfo(cpi
)) {
4394 case X86_VENDOR_Intel
:
4395 intel_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4397 case X86_VENDOR_Cyrix
:
4398 cyrix_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4400 case X86_VENDOR_AMD
:
4401 amd_cache_info(cpi
, cpu_devi
);
4416 * A cacheinfo walker that fetches the size, line-size and associativity
4420 intel_l2cinfo(void *arg
, const struct cachetab
*ct
)
4422 struct l2info
*l2i
= arg
;
4425 if (ct
->ct_label
!= l2_cache_str
&&
4426 ct
->ct_label
!= sl2_cache_str
)
4427 return (0); /* not an L2 -- keep walking */
4429 if ((ip
= l2i
->l2i_csz
) != NULL
)
4431 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4432 *ip
= ct
->ct_line_size
;
4433 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4435 l2i
->l2i_ret
= ct
->ct_size
;
4436 return (1); /* was an L2 -- terminate walk */
4440 * AMD L2/L3 Cache and TLB Associativity Field Definition:
4442 * Unlike the associativity for the L1 cache and tlb where the 8 bit
4443 * value is the associativity, the associativity for the L2 cache and
4444 * tlb is encoded in the following table. The 4 bit L2 value serves as
4445 * an index into the amd_afd[] array to determine the associativity.
4446 * -1 is undefined. 0 is fully associative.
4449 static int amd_afd
[] =
4450 {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4453 amd_l2cacheinfo(struct cpuid_info
*cpi
, struct l2info
*l2i
)
4455 struct cpuid_regs
*cp
;
4460 if (cpi
->cpi_xmaxeax
< 0x80000006)
4462 cp
= &cpi
->cpi_extd
[6];
4464 if ((i
= BITX(cp
->cp_ecx
, 15, 12)) != 0 &&
4465 (size
= BITX(cp
->cp_ecx
, 31, 16)) != 0) {
4466 uint_t cachesz
= size
* 1024;
4469 ASSERT(assoc
!= -1);
4471 if ((ip
= l2i
->l2i_csz
) != NULL
)
4473 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4474 *ip
= BITX(cp
->cp_ecx
, 7, 0);
4475 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4477 l2i
->l2i_ret
= cachesz
;
4482 getl2cacheinfo(cpu_t
*cpu
, int *csz
, int *lsz
, int *assoc
)
4484 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
4485 struct l2info __l2info
, *l2i
= &__l2info
;
4489 l2i
->l2i_assoc
= assoc
;
4492 switch (x86_which_cacheinfo(cpi
)) {
4493 case X86_VENDOR_Intel
:
4494 intel_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4496 case X86_VENDOR_Cyrix
:
4497 cyrix_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4499 case X86_VENDOR_AMD
:
4500 amd_l2cacheinfo(cpi
, l2i
);
4505 return (l2i
->l2i_ret
);
4511 cpuid_mwait_alloc(cpu_t
*cpu
)
4516 ASSERT(cpuid_checkpass(CPU
, 2));
4518 mwait_size
= CPU
->cpu_m
.mcpu_cpi
->cpi_mwait
.mon_max
;
4519 if (mwait_size
== 0)
4523 * kmem_alloc() returns cache line size aligned data for mwait_size
4524 * allocations. mwait_size is currently cache line sized. Neither
4525 * of these implementation details are guarantied to be true in the
4528 * First try allocating mwait_size as kmem_alloc() currently returns
4529 * correctly aligned memory. If kmem_alloc() does not return
4530 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4532 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4533 * decide to free this memory.
4535 ret
= kmem_zalloc(mwait_size
, KM_SLEEP
);
4536 if (ret
== (uint32_t *)P2ROUNDUP((uintptr_t)ret
, mwait_size
)) {
4537 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= ret
;
4538 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= mwait_size
;
4539 *ret
= MWAIT_RUNNING
;
4542 kmem_free(ret
, mwait_size
);
4543 ret
= kmem_zalloc(mwait_size
* 2, KM_SLEEP
);
4544 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= ret
;
4545 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= mwait_size
* 2;
4546 ret
= (uint32_t *)P2ROUNDUP((uintptr_t)ret
, mwait_size
);
4547 *ret
= MWAIT_RUNNING
;
4553 cpuid_mwait_free(cpu_t
*cpu
)
4555 if (cpu
->cpu_m
.mcpu_cpi
== NULL
) {
4559 if (cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
!= NULL
&&
4560 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
> 0) {
4561 kmem_free(cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
,
4562 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
);
4565 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= NULL
;
4566 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= 0;
4570 patch_tsc_read(int flag
)
4576 cnt
= &_no_rdtsc_end
- &_no_rdtsc_start
;
4577 (void) memcpy((void *)tsc_read
, (void *)&_no_rdtsc_start
, cnt
);
4580 cnt
= &_tscp_end
- &_tscp_start
;
4581 (void) memcpy((void *)tsc_read
, (void *)&_tscp_start
, cnt
);
4583 case X86_TSC_MFENCE
:
4584 cnt
= &_tsc_mfence_end
- &_tsc_mfence_start
;
4585 (void) memcpy((void *)tsc_read
,
4586 (void *)&_tsc_mfence_start
, cnt
);
4588 case X86_TSC_LFENCE
:
4589 cnt
= &_tsc_lfence_end
- &_tsc_lfence_start
;
4590 (void) memcpy((void *)tsc_read
,
4591 (void *)&_tsc_lfence_start
, cnt
);
4599 cpuid_deep_cstates_supported(void)
4601 struct cpuid_info
*cpi
;
4602 struct cpuid_regs regs
;
4604 ASSERT(cpuid_checkpass(CPU
, 1));
4606 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4608 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
))
4611 switch (cpi
->cpi_vendor
) {
4612 case X86_VENDOR_Intel
:
4613 if (cpi
->cpi_xmaxeax
< 0x80000007)
4617 * TSC run at a constant rate in all ACPI C-states?
4619 regs
.cp_eax
= 0x80000007;
4620 (void) __cpuid_insn(®s
);
4621 return (regs
.cp_edx
& CPUID_TSC_CSTATE_INVARIANCE
);
4631 post_startup_cpu_fixups(void)
4635 * Some AMD processors support C1E state. Entering this state will
4636 * cause the local APIC timer to stop, which we can't deal with at
4639 if (cpuid_getvendor(CPU
) == X86_VENDOR_AMD
) {
4643 if (!on_trap(&otd
, OT_DATA_ACCESS
)) {
4644 reg
= rdmsr(MSR_AMD_INT_PENDING_CMP_HALT
);
4645 /* Disable C1E state if it is enabled by BIOS */
4646 if ((reg
>> AMD_ACTONCMPHALT_SHIFT
) &
4647 AMD_ACTONCMPHALT_MASK
) {
4648 reg
&= ~(AMD_ACTONCMPHALT_MASK
<<
4649 AMD_ACTONCMPHALT_SHIFT
);
4650 wrmsr(MSR_AMD_INT_PENDING_CMP_HALT
, reg
);
4659 * Setup necessary registers to enable XSAVE feature on this processor.
4660 * This function needs to be called early enough, so that no xsave/xrstor
4661 * ops will execute on the processor before the MSRs are properly set up.
4663 * Current implementation has the following assumption:
4664 * - cpuid_pass1() is done, so that X86 features are known.
4665 * - fpu_probe() is done, so that fp_save_mech is chosen.
4668 xsave_setup_msr(cpu_t
*cpu
)
4670 ASSERT(fp_save_mech
== FP_XSAVE
);
4671 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
4673 /* Enable OSXSAVE in CR4. */
4674 setcr4(getcr4() | CR4_OSXSAVE
);
4676 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4679 cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_ecx
|= CPUID_INTC_ECX_OSXSAVE
;
4684 * Starting with the Westmere processor the local
4685 * APIC timer will continue running in all C-states,
4686 * including the deepest C-states.
4689 cpuid_arat_supported(void)
4691 struct cpuid_info
*cpi
;
4692 struct cpuid_regs regs
;
4694 ASSERT(cpuid_checkpass(CPU
, 1));
4695 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
4697 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4699 switch (cpi
->cpi_vendor
) {
4700 case X86_VENDOR_Intel
:
4702 * Always-running Local APIC Timer is
4703 * indicated by CPUID.6.EAX[2].
4705 if (cpi
->cpi_maxeax
>= 6) {
4707 (void) cpuid_insn(NULL
, ®s
);
4708 return (regs
.cp_eax
& CPUID_CSTATE_ARAT
);
4718 * Check support for Intel ENERGY_PERF_BIAS feature
4721 cpuid_iepb_supported(struct cpu
*cp
)
4723 struct cpuid_info
*cpi
= cp
->cpu_m
.mcpu_cpi
;
4724 struct cpuid_regs regs
;
4726 ASSERT(cpuid_checkpass(cp
, 1));
4728 if (!(is_x86_feature(x86_featureset
, X86FSET_CPUID
)) ||
4729 !(is_x86_feature(x86_featureset
, X86FSET_MSR
))) {
4734 * Intel ENERGY_PERF_BIAS MSR is indicated by
4735 * capability bit CPUID.6.ECX.3
4737 if ((cpi
->cpi_vendor
!= X86_VENDOR_Intel
) || (cpi
->cpi_maxeax
< 6))
4741 (void) cpuid_insn(NULL
, ®s
);
4742 return (regs
.cp_ecx
& CPUID_EPB_SUPPORT
);
4746 * Check support for TSC deadline timer
4748 * TSC deadline timer provides a superior software programming
4749 * model over local APIC timer that eliminates "time drifts".
4750 * Instead of specifying a relative time, software specifies an
4751 * absolute time as the target at which the processor should
4752 * generate a timer event.
4755 cpuid_deadline_tsc_supported(void)
4757 struct cpuid_info
*cpi
= CPU
->cpu_m
.mcpu_cpi
;
4758 struct cpuid_regs regs
;
4760 ASSERT(cpuid_checkpass(CPU
, 1));
4761 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
4763 switch (cpi
->cpi_vendor
) {
4764 case X86_VENDOR_Intel
:
4765 if (cpi
->cpi_maxeax
>= 1) {
4767 (void) cpuid_insn(NULL
, ®s
);
4768 return (regs
.cp_ecx
& CPUID_DEADLINE_TSC
);
4777 #if defined(__amd64) && !defined(__xpv)
4779 * Patch in versions of bcopy for high performance Intel Nhm processors
4783 patch_memops(uint_t vendor
)
4788 if ((vendor
== X86_VENDOR_Intel
) &&
4789 is_x86_feature(x86_featureset
, X86FSET_SSE4_2
)) {
4790 cnt
= &bcopy_patch_end
- &bcopy_patch_start
;
4791 to
= &bcopy_ck_size
;
4792 from
= &bcopy_patch_start
;
4793 for (i
= 0; i
< cnt
; i
++) {
4798 #endif /* __amd64 && !__xpv */
4801 * This function finds the number of bits to represent the number of cores per
4802 * chip and the number of strands per core for the Intel platforms.
4803 * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4806 cpuid_get_ext_topo(uint_t vendor
, uint_t
*core_nbits
, uint_t
*strand_nbits
)
4808 struct cpuid_regs regs
;
4809 struct cpuid_regs
*cp
= ®s
;
4811 if (vendor
!= X86_VENDOR_Intel
) {
4815 /* if the cpuid level is 0xB, extended topo is available. */
4817 if (__cpuid_insn(cp
) >= 0xB) {
4820 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
4821 (void) __cpuid_insn(cp
);
4824 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4825 * indicates that the extended topology enumeration leaf is
4829 uint_t coreid_shift
= 0;
4830 uint_t chipid_shift
= 0;
4834 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
4838 (void) __cpuid_insn(cp
);
4839 level
= CPI_CPU_LEVEL_TYPE(cp
);
4843 * Thread level processor topology
4844 * Number of bits shift right APIC ID
4845 * to get the coreid.
4847 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
4848 } else if (level
== 2) {
4850 * Core level processor topology
4851 * Number of bits shift right APIC ID
4852 * to get the chipid.
4854 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
4858 if (coreid_shift
> 0 && chipid_shift
> coreid_shift
) {
4859 *strand_nbits
= coreid_shift
;
4860 *core_nbits
= chipid_shift
- coreid_shift
;