4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
28 * Copyright (c) 2010, Intel Corporation.
29 * All rights reserved.
32 * Portions Copyright 2009 Advanced Micro Devices, Inc.
35 * Copyright 2016 Joyent, Inc.
38 * Various routines to handle identification
39 * and classification of x86 processors.
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
60 #include <sys/comm_page.h>
63 #include <sys/ontrap.h>
66 * Pass 0 of cpuid feature analysis happens in locore. It contains special code
67 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
68 * them accordingly. For most modern processors, feature detection occurs here
71 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
72 * for the boot CPU and does the basic analysis that the early kernel needs.
73 * x86_featureset is set based on the return value of cpuid_pass1() of the boot
78 * o Determining vendor/model/family/stepping and setting x86_type and
79 * x86_vendor accordingly.
80 * o Processing the feature flags returned by the cpuid instruction while
81 * applying any workarounds or tricks for the specific processor.
82 * o Mapping the feature flags into Solaris feature bits (X86_*).
83 * o Processing extended feature flags if supported by the processor,
84 * again while applying specific processor knowledge.
85 * o Determining the CMT characteristics of the system.
87 * Pass 1 is done on non-boot CPUs during their initialization and the results
88 * are used only as a meager attempt at ensuring that all processors within the
89 * system support the same features.
91 * Pass 2 of cpuid feature analysis happens just at the beginning
92 * of startup(). It just copies in and corrects the remainder
93 * of the cpuid data we depend on: standard cpuid functions that we didn't
94 * need for pass1 feature analysis, and extended cpuid functions beyond the
95 * simple feature processing done in pass1.
97 * Pass 3 of cpuid analysis is invoked after basic kernel services; in
98 * particular kernel memory allocation has been made available. It creates a
99 * readable brand string based on the data collected in the first two passes.
101 * Pass 4 of cpuid analysis is invoked after post_startup() when all
102 * the support infrastructure for various hardware features has been
103 * initialized. It determines which processor features will be reported
104 * to userland via the aux vector.
106 * All passes are executed on all CPUs, but only the boot CPU determines what
107 * features the kernel will use.
109 * Much of the worst junk in this file is for the support of processors
110 * that didn't really implement the cpuid instruction properly.
112 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
113 * the pass numbers. Accordingly, changes to the pass code may require changes
114 * to the accessor code.
117 uint_t x86_vendor
= X86_VENDOR_IntelClone
;
118 uint_t x86_type
= X86_TYPE_OTHER
;
119 uint_t x86_clflush_size
= 0;
121 uint_t pentiumpro_bug4046376
;
123 uchar_t x86_featureset
[BT_SIZEOFMAP(NUM_X86_FEATURES
)];
125 static char *x86_feature_names
[NUM_X86_FEATURES
] = {
178 is_x86_feature(void *featureset
, uint_t feature
)
180 ASSERT(feature
< NUM_X86_FEATURES
);
181 return (BT_TEST((ulong_t
*)featureset
, feature
));
185 add_x86_feature(void *featureset
, uint_t feature
)
187 ASSERT(feature
< NUM_X86_FEATURES
);
188 BT_SET((ulong_t
*)featureset
, feature
);
192 remove_x86_feature(void *featureset
, uint_t feature
)
194 ASSERT(feature
< NUM_X86_FEATURES
);
195 BT_CLEAR((ulong_t
*)featureset
, feature
);
199 compare_x86_featureset(void *setA
, void *setB
)
202 * We assume that the unused bits of the bitmap are always zero.
204 if (memcmp(setA
, setB
, BT_SIZEOFMAP(NUM_X86_FEATURES
)) == 0) {
212 print_x86_featureset(void *featureset
)
216 for (i
= 0; i
< NUM_X86_FEATURES
; i
++) {
217 if (is_x86_feature(featureset
, i
)) {
218 cmn_err(CE_CONT
, "?x86_feature: %s\n",
219 x86_feature_names
[i
]);
224 static size_t xsave_state_size
= 0;
225 uint64_t xsave_bv_all
= (XFEATURE_LEGACY_FP
| XFEATURE_SSE
);
226 boolean_t xsave_force_disable
= B_FALSE
;
227 extern int disable_smap
;
230 * This is set to platform type we are running on.
232 static int platform_type
= -1;
235 * Variable to patch if hypervisor platform detection needs to be
236 * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
238 int enable_platform_detection
= 1;
241 * monitor/mwait info.
243 * size_actual and buf_actual are the real address and size allocated to get
244 * proper mwait_buf alignement. buf_actual and size_actual should be passed
245 * to kmem_free(). Currently kmem_alloc() and mwait happen to both use
246 * processor cache-line alignment, but this is not guarantied in the furture.
249 size_t mon_min
; /* min size to avoid missed wakeups */
250 size_t mon_max
; /* size to avoid false wakeups */
251 size_t size_actual
; /* size actually allocated */
252 void *buf_actual
; /* memory actually allocated */
253 uint32_t support
; /* processor support of monitor/mwait */
257 * xsave/xrestor info.
259 * This structure contains HW feature bits and size of the xsave save area.
260 * Note: the kernel will use the maximum size required for all hardware
261 * features. It is not optimize for potential memory savings if features at
262 * the end of the save area are not enabled.
265 uint32_t xsav_hw_features_low
; /* Supported HW features */
266 uint32_t xsav_hw_features_high
; /* Supported HW features */
267 size_t xsav_max_size
; /* max size save area for HW features */
268 size_t ymm_size
; /* AVX: size of ymm save area */
269 size_t ymm_offset
; /* AVX: offset for ymm save area */
274 * These constants determine how many of the elements of the
275 * cpuid we cache in the cpuid_info data structure; the
276 * remaining elements are accessible via the cpuid instruction.
279 #define NMAX_CPI_STD 8 /* eax = 0 .. 7 */
280 #define NMAX_CPI_EXTD 0x1f /* eax = 0x80000000 .. 0x8000001e */
283 * Some terminology needs to be explained:
284 * - Socket: Something that can be plugged into a motherboard.
285 * - Package: Same as socket
286 * - Chip: Same as socket. Note that AMD's documentation uses term "chip"
287 * differently: there, chip is the same as processor node (below)
288 * - Processor node: Some AMD processors have more than one
289 * "subprocessor" embedded in a package. These subprocessors (nodes)
290 * are fully-functional processors themselves with cores, caches,
291 * memory controllers, PCI configuration spaces. They are connected
292 * inside the package with Hypertransport links. On single-node
293 * processors, processor node is equivalent to chip/socket/package.
294 * - Compute Unit: Some AMD processors pair cores in "compute units" that
295 * share the FPU and the I$ and L2 caches.
299 uint_t cpi_pass
; /* last pass completed */
301 * standard function information
303 uint_t cpi_maxeax
; /* fn 0: %eax */
304 char cpi_vendorstr
[13]; /* fn 0: %ebx:%ecx:%edx */
305 uint_t cpi_vendor
; /* enum of cpi_vendorstr */
307 uint_t cpi_family
; /* fn 1: extended family */
308 uint_t cpi_model
; /* fn 1: extended model */
309 uint_t cpi_step
; /* fn 1: stepping */
310 chipid_t cpi_chipid
; /* fn 1: %ebx: Intel: chip # */
311 /* AMD: package/socket # */
312 uint_t cpi_brandid
; /* fn 1: %ebx: brand ID */
313 int cpi_clogid
; /* fn 1: %ebx: thread # */
314 uint_t cpi_ncpu_per_chip
; /* fn 1: %ebx: logical cpu count */
315 uint8_t cpi_cacheinfo
[16]; /* fn 2: intel-style cache desc */
316 uint_t cpi_ncache
; /* fn 2: number of elements */
317 uint_t cpi_ncpu_shr_last_cache
; /* fn 4: %eax: ncpus sharing cache */
318 id_t cpi_last_lvl_cacheid
; /* fn 4: %eax: derived cache id */
319 uint_t cpi_std_4_size
; /* fn 4: number of fn 4 elements */
320 struct cpuid_regs
**cpi_std_4
; /* fn 4: %ecx == 0 .. fn4_size */
321 struct cpuid_regs cpi_std
[NMAX_CPI_STD
]; /* 0 .. 7 */
323 * extended function information
325 uint_t cpi_xmaxeax
; /* fn 0x80000000: %eax */
326 char cpi_brandstr
[49]; /* fn 0x8000000[234] */
327 uint8_t cpi_pabits
; /* fn 0x80000006: %eax */
328 uint8_t cpi_vabits
; /* fn 0x80000006: %eax */
329 struct cpuid_regs cpi_extd
[NMAX_CPI_EXTD
]; /* 0x800000XX */
331 id_t cpi_coreid
; /* same coreid => strands share core */
332 int cpi_pkgcoreid
; /* core number within single package */
333 uint_t cpi_ncore_per_chip
; /* AMD: fn 0x80000008: %ecx[7-0] */
334 /* Intel: fn 4: %eax[31-26] */
336 * supported feature information
338 uint32_t cpi_support
[6];
339 #define STD_EDX_FEATURES 0
340 #define AMD_EDX_FEATURES 1
341 #define TM_EDX_FEATURES 2
342 #define STD_ECX_FEATURES 3
343 #define AMD_ECX_FEATURES 4
344 #define STD_EBX_FEATURES 5
346 * Synthesized information, where known.
348 uint32_t cpi_chiprev
; /* See X86_CHIPREV_* in x86_archext.h */
349 const char *cpi_chiprevstr
; /* May be NULL if chiprev unknown */
350 uint32_t cpi_socket
; /* Chip package/socket type */
352 struct mwait_info cpi_mwait
; /* fn 5: monitor/mwait info */
354 uint_t cpi_procnodeid
; /* AMD: nodeID on HT, Intel: chipid */
355 uint_t cpi_procnodes_per_pkg
; /* AMD: # of nodes in the package */
357 uint_t cpi_compunitid
; /* AMD: ComputeUnit ID, Intel: coreid */
358 uint_t cpi_cores_per_compunit
; /* AMD: # of cores in the ComputeUnit */
360 struct xsave_info cpi_xsave
; /* fn D: xsave/xrestor info */
364 static struct cpuid_info cpuid_info0
;
367 * These bit fields are defined by the Intel Application Note AP-485
368 * "Intel Processor Identification and the CPUID Instruction"
370 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
371 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
372 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
373 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
374 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
375 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
377 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
378 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
379 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
380 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
381 #define CPI_FEATURES_7_0_EBX(cpi) ((cpi)->cpi_std[7].cp_ebx)
383 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
384 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
385 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
386 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
388 #define CPI_MAXEAX_MAX 0x100 /* sanity control */
389 #define CPI_XMAXEAX_MAX 0x80000100
390 #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */
391 #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */
394 * Function 4 (Deterministic Cache Parameters) macros
395 * Defined by Intel Application Note AP-485
397 #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26)
398 #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14)
399 #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9)
400 #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8)
401 #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5)
402 #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0)
403 #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8)
405 #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22)
406 #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12)
407 #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0)
409 #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0)
411 #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0)
415 * A couple of shorthand macros to identify "later" P6-family chips
416 * like the Pentium M and Core. First, the "older" P6-based stuff
417 * (loosely defined as "pre-Pentium-4"):
418 * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
420 #define IS_LEGACY_P6(cpi) ( \
421 cpi->cpi_family == 6 && \
422 (cpi->cpi_model == 1 || \
423 cpi->cpi_model == 3 || \
424 cpi->cpi_model == 5 || \
425 cpi->cpi_model == 6 || \
426 cpi->cpi_model == 7 || \
427 cpi->cpi_model == 8 || \
428 cpi->cpi_model == 0xA || \
429 cpi->cpi_model == 0xB) \
432 /* A "new F6" is everything with family 6 that's not the above */
433 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
435 /* Extended family/model support */
436 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
437 cpi->cpi_family >= 0xf)
440 * Info for monitor/mwait idle loop.
442 * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
443 * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
445 * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
446 * Documentation Updates" #33633, Rev 2.05, December 2006.
448 #define MWAIT_SUPPORT (0x00000001) /* mwait supported */
449 #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */
450 #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */
451 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
452 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
453 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
454 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
455 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
457 * Number of sub-cstates for a given c-state.
459 #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \
460 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
463 * XSAVE leaf 0xD enumeration
465 #define CPUID_LEAFD_2_YMM_OFFSET 576
466 #define CPUID_LEAFD_2_YMM_SIZE 256
469 * Functions we consune from cpuid_subr.c; don't publish these in a header
470 * file to try and keep people using the expected cpuid_* interfaces.
472 extern uint32_t _cpuid_skt(uint_t
, uint_t
, uint_t
, uint_t
);
473 extern const char *_cpuid_sktstr(uint_t
, uint_t
, uint_t
, uint_t
);
474 extern uint32_t _cpuid_chiprev(uint_t
, uint_t
, uint_t
, uint_t
);
475 extern const char *_cpuid_chiprevstr(uint_t
, uint_t
, uint_t
, uint_t
);
476 extern uint_t
_cpuid_vendorstr_to_vendorcode(char *);
479 * Apply up various platform-dependent restrictions where the
480 * underlying platform restrictions mean the CPU can be marked
481 * as less capable than its cpuid instruction would imply.
483 #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */
486 * Some undocumented ways of patching the results of the cpuid
487 * instruction to permit running Solaris 10 on future cpus that
488 * we don't currently support. Could be set to non-zero values
489 * via settings in eeprom.
492 uint32_t cpuid_feature_ecx_include
;
493 uint32_t cpuid_feature_ecx_exclude
;
494 uint32_t cpuid_feature_edx_include
;
495 uint32_t cpuid_feature_edx_exclude
;
498 * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
501 cpuid_alloc_space(cpu_t
*cpu
)
504 * By convention, cpu0 is the boot cpu, which is set up
505 * before memory allocation is available. All other cpus get
506 * their cpuid_info struct allocated here.
508 ASSERT(cpu
->cpu_id
!= 0);
509 ASSERT(cpu
->cpu_m
.mcpu_cpi
== NULL
);
510 cpu
->cpu_m
.mcpu_cpi
=
511 kmem_zalloc(sizeof (*cpu
->cpu_m
.mcpu_cpi
), KM_SLEEP
);
515 cpuid_free_space(cpu_t
*cpu
)
517 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
521 ASSERT(cpi
!= &cpuid_info0
);
524 * Free up any function 4 related dynamic storage
526 for (i
= 1; i
< cpi
->cpi_std_4_size
; i
++)
527 kmem_free(cpi
->cpi_std_4
[i
], sizeof (struct cpuid_regs
));
528 if (cpi
->cpi_std_4_size
> 0)
529 kmem_free(cpi
->cpi_std_4
,
530 cpi
->cpi_std_4_size
* sizeof (struct cpuid_regs
*));
532 kmem_free(cpi
, sizeof (*cpi
));
533 cpu
->cpu_m
.mcpu_cpi
= NULL
;
537 * Determine the type of the underlying platform. This is used to customize
538 * initialization of various subsystems (e.g. TSC). determine_platform() must
539 * only ever be called once to prevent two processors from seeing different
540 * values of platform_type. Must be called before cpuid_pass1(), the earliest
541 * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
544 determine_platform(void)
546 struct cpuid_regs cp
;
549 char *hvstr
= (char *)regs
;
551 ASSERT(platform_type
== -1);
553 platform_type
= HW_NATIVE
;
555 if (!enable_platform_detection
)
559 * If Hypervisor CPUID bit is set, try to determine hypervisor
560 * vendor signature, and set platform type accordingly.
563 * http://lkml.org/lkml/2008/10/1/246
564 * http://kb.vmware.com/kb/1009458
567 (void) __cpuid_insn(&cp
);
568 if ((cp
.cp_ecx
& CPUID_INTC_ECX_HV
) != 0) {
569 cp
.cp_eax
= 0x40000000;
570 (void) __cpuid_insn(&cp
);
575 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0) {
576 platform_type
= HW_XEN_HVM
;
579 if (strcmp(hvstr
, HVSIG_VMWARE
) == 0) {
580 platform_type
= HW_VMWARE
;
583 if (strcmp(hvstr
, HVSIG_KVM
) == 0) {
584 platform_type
= HW_KVM
;
587 if (strcmp(hvstr
, HVSIG_MICROSOFT
) == 0)
588 platform_type
= HW_MICROSOFT
;
591 * Check older VMware hardware versions. VMware hypervisor is
592 * detected by performing an IN operation to VMware hypervisor
593 * port and checking that value returned in %ebx is VMware
594 * hypervisor magic value.
596 * References: http://kb.vmware.com/kb/1009458
598 vmware_port(VMWARE_HVCMD_GETVERSION
, regs
);
599 if (regs
[1] == VMWARE_HVMAGIC
) {
600 platform_type
= HW_VMWARE
;
606 * Check Xen hypervisor. In a fully virtualized domain,
607 * Xen's pseudo-cpuid function returns a string representing the
608 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
609 * supported cpuid function. We need at least a (base + 2) leaf value
610 * to do what we want to do. Try different base values, since the
611 * hypervisor might use a different one depending on whether Hyper-V
612 * emulation is switched on by default or not.
614 for (base
= 0x40000000; base
< 0x40010000; base
+= 0x100) {
616 (void) __cpuid_insn(&cp
);
621 if (strcmp(hvstr
, HVSIG_XEN_HVM
) == 0 &&
622 cp
.cp_eax
>= (base
+ 2)) {
623 platform_type
&= ~HW_NATIVE
;
624 platform_type
|= HW_XEN_HVM
;
633 ASSERT(platform_type
!= -1);
634 return (platform_type
);
645 cpuid_intel_getids(cpu_t
*cpu
, void *feature
)
648 uint_t chipid_shift
= 0;
649 uint_t coreid_shift
= 0;
650 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
652 for (i
= 1; i
< cpi
->cpi_ncpu_per_chip
; i
<<= 1)
655 cpi
->cpi_chipid
= cpi
->cpi_apicid
>> chipid_shift
;
656 cpi
->cpi_clogid
= cpi
->cpi_apicid
& ((1 << chipid_shift
) - 1);
658 if (is_x86_feature(feature
, X86FSET_CMP
)) {
660 * Multi-core (and possibly multi-threaded)
663 uint_t ncpu_per_core
;
664 if (cpi
->cpi_ncore_per_chip
== 1)
665 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
;
666 else if (cpi
->cpi_ncore_per_chip
> 1)
667 ncpu_per_core
= cpi
->cpi_ncpu_per_chip
/
668 cpi
->cpi_ncore_per_chip
;
670 * 8bit APIC IDs on dual core Pentiums
673 * +-----------------------+------+------+
674 * | Physical Package ID | MC | HT |
675 * +-----------------------+------+------+
676 * <------- chipid -------->
677 * <------- coreid --------------->
682 * Where the number of bits necessary to
683 * represent MC and HT fields together equals
684 * to the minimum number of bits necessary to
685 * store the value of cpi->cpi_ncpu_per_chip.
686 * Of those bits, the MC part uses the number
687 * of bits necessary to store the value of
688 * cpi->cpi_ncore_per_chip.
690 for (i
= 1; i
< ncpu_per_core
; i
<<= 1)
692 cpi
->cpi_coreid
= cpi
->cpi_apicid
>> coreid_shift
;
693 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
694 } else if (is_x86_feature(feature
, X86FSET_HTT
)) {
696 * Single-core multi-threaded processors.
698 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
699 cpi
->cpi_pkgcoreid
= 0;
701 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
702 cpi
->cpi_compunitid
= cpi
->cpi_coreid
;
706 cpuid_amd_getids(cpu_t
*cpu
)
708 int i
, first_half
, coreidsz
;
709 uint32_t nb_caps_reg
;
711 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
712 struct cpuid_regs
*cp
;
715 * AMD CMP chips currently have a single thread per core.
717 * Since no two cpus share a core we must assign a distinct coreid
718 * per cpu, and we do this by using the cpu_id. This scheme does not,
719 * however, guarantee that sibling cores of a chip will have sequential
720 * coreids starting at a multiple of the number of cores per chip -
721 * that is usually the case, but if the ACPI MADT table is presented
722 * in a different order then we need to perform a few more gymnastics
725 * All processors in the system have the same number of enabled
726 * cores. Cores within a processor are always numbered sequentially
727 * from 0 regardless of how many or which are disabled, and there
728 * is no way for operating system to discover the real core id when some
731 * In family 0x15, the cores come in pairs called compute units. They
732 * share I$ and L2 caches and the FPU. Enumeration of this feature is
733 * simplified by the new topology extensions CPUID leaf, indicated by
734 * the X86 feature X86FSET_TOPOEXT.
737 cpi
->cpi_coreid
= cpu
->cpu_id
;
738 cpi
->cpi_compunitid
= cpu
->cpu_id
;
740 if (cpi
->cpi_xmaxeax
>= 0x80000008) {
742 coreidsz
= BITX((cpi
)->cpi_extd
[8].cp_ecx
, 15, 12);
745 * In AMD parlance chip is really a node while Solaris
746 * sees chip as equivalent to socket/package.
748 cpi
->cpi_ncore_per_chip
=
749 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
751 /* Use legacy method */
752 for (i
= 1; i
< cpi
->cpi_ncore_per_chip
; i
<<= 1)
758 /* Assume single-core part */
759 cpi
->cpi_ncore_per_chip
= 1;
763 cpi
->cpi_clogid
= cpi
->cpi_pkgcoreid
=
764 cpi
->cpi_apicid
& ((1<<coreidsz
) - 1);
765 cpi
->cpi_ncpu_per_chip
= cpi
->cpi_ncore_per_chip
;
767 /* Get node ID, compute unit ID */
768 if (is_x86_feature(x86_featureset
, X86FSET_TOPOEXT
) &&
769 cpi
->cpi_xmaxeax
>= 0x8000001e) {
770 cp
= &cpi
->cpi_extd
[0x1e];
771 cp
->cp_eax
= 0x8000001e;
772 (void) __cpuid_insn(cp
);
774 cpi
->cpi_procnodes_per_pkg
= BITX(cp
->cp_ecx
, 10, 8) + 1;
775 cpi
->cpi_procnodeid
= BITX(cp
->cp_ecx
, 7, 0);
776 cpi
->cpi_cores_per_compunit
= BITX(cp
->cp_ebx
, 15, 8) + 1;
777 cpi
->cpi_compunitid
= BITX(cp
->cp_ebx
, 7, 0)
778 + (cpi
->cpi_ncore_per_chip
/ cpi
->cpi_cores_per_compunit
)
779 * (cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
);
780 } else if (cpi
->cpi_family
== 0xf || cpi
->cpi_family
>= 0x11) {
781 cpi
->cpi_procnodeid
= (cpi
->cpi_apicid
>> coreidsz
) & 7;
782 } else if (cpi
->cpi_family
== 0x10) {
784 * See if we are a multi-node processor.
785 * All processors in the system have the same number of nodes
787 nb_caps_reg
= pci_getl_func(0, 24, 3, 0xe8);
788 if ((cpi
->cpi_model
< 8) || BITX(nb_caps_reg
, 29, 29) == 0) {
790 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 5,
795 * Multi-node revision D (2 nodes per package
798 cpi
->cpi_procnodes_per_pkg
= 2;
800 first_half
= (cpi
->cpi_pkgcoreid
<=
801 (cpi
->cpi_ncore_per_chip
/2 - 1));
803 if (cpi
->cpi_apicid
== cpi
->cpi_pkgcoreid
) {
805 cpi
->cpi_procnodeid
= (first_half
? 0 : 1);
809 /* NodeId[2:1] bits to use for reading F3xe8 */
810 node2_1
= BITX(cpi
->cpi_apicid
, 5, 4) << 1;
813 pci_getl_func(0, 24 + node2_1
, 3, 0xe8);
816 * Check IntNodeNum bit (31:30, but bit 31 is
817 * always 0 on dual-node processors)
819 if (BITX(nb_caps_reg
, 30, 30) == 0)
820 cpi
->cpi_procnodeid
= node2_1
+
823 cpi
->cpi_procnodeid
= node2_1
+
828 cpi
->cpi_procnodeid
= 0;
832 cpi
->cpi_procnodeid
/ cpi
->cpi_procnodes_per_pkg
;
836 * Setup XFeature_Enabled_Mask register. Required by xsave feature.
841 uint64_t flags
= XFEATURE_LEGACY_FP
;
843 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
845 if (is_x86_feature(x86_featureset
, X86FSET_SSE
))
846 flags
|= XFEATURE_SSE
;
848 if (is_x86_feature(x86_featureset
, X86FSET_AVX
))
849 flags
|= XFEATURE_AVX
;
851 set_xcr(XFEATURE_ENABLED_MASK
, flags
);
853 xsave_bv_all
= flags
;
857 cpuid_pass1(cpu_t
*cpu
, uchar_t
*featureset
)
859 uint32_t mask_ecx
, mask_edx
;
860 struct cpuid_info
*cpi
;
861 struct cpuid_regs
*cp
;
863 extern int idle_cpu_prefer_mwait
;
866 * Space statically allocated for BSP, ensure pointer is set
868 if (cpu
->cpu_id
== 0) {
869 if (cpu
->cpu_m
.mcpu_cpi
== NULL
)
870 cpu
->cpu_m
.mcpu_cpi
= &cpuid_info0
;
873 add_x86_feature(featureset
, X86FSET_CPUID
);
875 cpi
= cpu
->cpu_m
.mcpu_cpi
;
877 cp
= &cpi
->cpi_std
[0];
879 cpi
->cpi_maxeax
= __cpuid_insn(cp
);
881 uint32_t *iptr
= (uint32_t *)cpi
->cpi_vendorstr
;
882 *iptr
++ = cp
->cp_ebx
;
883 *iptr
++ = cp
->cp_edx
;
884 *iptr
++ = cp
->cp_ecx
;
885 *(char *)&cpi
->cpi_vendorstr
[12] = '\0';
888 cpi
->cpi_vendor
= _cpuid_vendorstr_to_vendorcode(cpi
->cpi_vendorstr
);
889 x86_vendor
= cpi
->cpi_vendor
; /* for compatibility */
892 * Limit the range in case of weird hardware
894 if (cpi
->cpi_maxeax
> CPI_MAXEAX_MAX
)
895 cpi
->cpi_maxeax
= CPI_MAXEAX_MAX
;
896 if (cpi
->cpi_maxeax
< 1)
899 cp
= &cpi
->cpi_std
[1];
901 (void) __cpuid_insn(cp
);
904 * Extract identifying constants for easy access.
906 cpi
->cpi_model
= CPI_MODEL(cpi
);
907 cpi
->cpi_family
= CPI_FAMILY(cpi
);
909 if (cpi
->cpi_family
== 0xf)
910 cpi
->cpi_family
+= CPI_FAMILY_XTD(cpi
);
913 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
914 * Intel, and presumably everyone else, uses model == 0xf, as
915 * one would expect (max value means possible overflow). Sigh.
918 switch (cpi
->cpi_vendor
) {
919 case X86_VENDOR_Intel
:
920 if (IS_EXTENDED_MODEL_INTEL(cpi
))
921 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
924 if (CPI_FAMILY(cpi
) == 0xf)
925 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
928 if (cpi
->cpi_model
== 0xf)
929 cpi
->cpi_model
+= CPI_MODEL_XTD(cpi
) << 4;
933 cpi
->cpi_step
= CPI_STEP(cpi
);
934 cpi
->cpi_brandid
= CPI_BRANDID(cpi
);
937 * *default* assumptions:
938 * - believe %edx feature word
939 * - ignore %ecx feature word
940 * - 32-bit virtual and physical addressing
942 mask_edx
= 0xffffffff;
945 cpi
->cpi_pabits
= cpi
->cpi_vabits
= 32;
947 switch (cpi
->cpi_vendor
) {
948 case X86_VENDOR_Intel
:
949 if (cpi
->cpi_family
== 5)
950 x86_type
= X86_TYPE_P5
;
951 else if (IS_LEGACY_P6(cpi
)) {
952 x86_type
= X86_TYPE_P6
;
953 pentiumpro_bug4046376
= 1;
955 * Clear the SEP bit when it was set erroneously
957 if (cpi
->cpi_model
< 3 && cpi
->cpi_step
< 3)
958 cp
->cp_edx
&= ~CPUID_INTC_EDX_SEP
;
959 } else if (IS_NEW_F6(cpi
) || cpi
->cpi_family
== 0xf) {
960 x86_type
= X86_TYPE_P4
;
962 * We don't currently depend on any of the %ecx
963 * features until Prescott, so we'll only check
964 * this from P4 onwards. We might want to revisit
967 mask_ecx
= 0xffffffff;
968 } else if (cpi
->cpi_family
> 0xf)
969 mask_ecx
= 0xffffffff;
971 * We don't support MONITOR/MWAIT if leaf 5 is not available
972 * to obtain the monitor linesize.
974 if (cpi
->cpi_maxeax
< 5)
975 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
977 case X86_VENDOR_IntelClone
:
981 #if defined(OPTERON_ERRATUM_108)
982 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 0xe) {
983 cp
->cp_eax
= (0xf0f & cp
->cp_eax
) | 0xc0;
984 cpi
->cpi_model
= 0xc;
987 if (cpi
->cpi_family
== 5) {
991 * These CPUs have an incomplete implementation
992 * of MCA/MCE which we mask away.
994 mask_edx
&= ~(CPUID_INTC_EDX_MCE
| CPUID_INTC_EDX_MCA
);
997 * Model 0 uses the wrong (APIC) bit
998 * to indicate PGE. Fix it here.
1000 if (cpi
->cpi_model
== 0) {
1001 if (cp
->cp_edx
& 0x200) {
1002 cp
->cp_edx
&= ~0x200;
1003 cp
->cp_edx
|= CPUID_INTC_EDX_PGE
;
1008 * Early models had problems w/ MMX; disable.
1010 if (cpi
->cpi_model
< 6)
1011 mask_edx
&= ~CPUID_INTC_EDX_MMX
;
1015 * For newer families, SSE3 and CX16, at least, are valid;
1018 if (cpi
->cpi_family
>= 0xf)
1019 mask_ecx
= 0xffffffff;
1021 * We don't support MONITOR/MWAIT if leaf 5 is not available
1022 * to obtain the monitor linesize.
1024 if (cpi
->cpi_maxeax
< 5)
1025 mask_ecx
&= ~CPUID_INTC_ECX_MON
;
1028 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1029 * processors. AMD does not intend MWAIT to be used in the cpu
1030 * idle loop on current and future processors. 10h and future
1031 * AMD processors use more power in MWAIT than HLT.
1032 * Pre-family-10h Opterons do not have the MWAIT instruction.
1034 idle_cpu_prefer_mwait
= 0;
1039 * workaround the NT workaround in CMS 4.1
1041 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4 &&
1042 (cpi
->cpi_step
== 2 || cpi
->cpi_step
== 3))
1043 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1045 case X86_VENDOR_Centaur
:
1047 * workaround the NT workarounds again
1049 if (cpi
->cpi_family
== 6)
1050 cp
->cp_edx
|= CPUID_INTC_EDX_CX8
;
1052 case X86_VENDOR_Cyrix
:
1054 * We rely heavily on the probing in locore
1055 * to actually figure out what parts, if any,
1056 * of the Cyrix cpuid instruction to believe.
1059 case X86_TYPE_CYRIX_486
:
1062 case X86_TYPE_CYRIX_6x86
:
1065 case X86_TYPE_CYRIX_6x86L
:
1070 case X86_TYPE_CYRIX_6x86MX
:
1073 CPUID_INTC_EDX_MSR
|
1074 CPUID_INTC_EDX_CX8
|
1075 CPUID_INTC_EDX_PGE
|
1076 CPUID_INTC_EDX_CMOV
|
1079 case X86_TYPE_CYRIX_GXm
:
1081 CPUID_INTC_EDX_MSR
|
1082 CPUID_INTC_EDX_CX8
|
1083 CPUID_INTC_EDX_CMOV
|
1086 case X86_TYPE_CYRIX_MediaGX
:
1088 case X86_TYPE_CYRIX_MII
:
1089 case X86_TYPE_VIA_CYRIX_III
:
1092 CPUID_INTC_EDX_TSC
|
1093 CPUID_INTC_EDX_MSR
|
1094 CPUID_INTC_EDX_CX8
|
1095 CPUID_INTC_EDX_PGE
|
1096 CPUID_INTC_EDX_CMOV
|
1106 if (xsave_force_disable
) {
1107 mask_ecx
&= ~CPUID_INTC_ECX_XSAVE
;
1108 mask_ecx
&= ~CPUID_INTC_ECX_AVX
;
1109 mask_ecx
&= ~CPUID_INTC_ECX_F16C
;
1110 mask_ecx
&= ~CPUID_INTC_ECX_FMA
;
1114 * Now we've figured out the masks that determine
1115 * which bits we choose to believe, apply the masks
1116 * to the feature words, then map the kernel's view
1117 * of these feature words into its feature word.
1119 cp
->cp_edx
&= mask_edx
;
1120 cp
->cp_ecx
&= mask_ecx
;
1123 * apply any platform restrictions (we don't call this
1124 * immediately after __cpuid_insn here, because we need the
1125 * workarounds applied above first)
1127 platform_cpuid_mangle(cpi
->cpi_vendor
, 1, cp
);
1130 * In addition to ecx and edx, Intel is storing a bunch of instruction
1131 * set extensions in leaf 7's ebx.
1133 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
&& cpi
->cpi_maxeax
>= 7) {
1134 struct cpuid_regs
*ecp
;
1135 ecp
= &cpi
->cpi_std
[7];
1138 (void) __cpuid_insn(ecp
);
1140 * If XSAVE has been disabled, just ignore all of the AVX
1141 * dependent flags here.
1143 if (xsave_force_disable
) {
1144 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_BMI1
;
1145 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_BMI2
;
1146 ecp
->cp_ebx
&= ~CPUID_INTC_EBX_7_0_AVX2
;
1149 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_SMEP
)
1150 add_x86_feature(featureset
, X86FSET_SMEP
);
1153 * We check disable_smap here in addition to in startup_smap()
1154 * to ensure CPUs that aren't the boot CPU don't accidentally
1155 * include it in the feature set and thus generate a mismatched
1156 * x86 feature set across CPUs. Note that at this time we only
1157 * enable SMAP for the 64-bit kernel.
1159 #if defined(__amd64)
1160 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_SMAP
&&
1162 add_x86_feature(featureset
, X86FSET_SMAP
);
1164 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_RDSEED
)
1165 add_x86_feature(featureset
, X86FSET_RDSEED
);
1167 if (ecp
->cp_ebx
& CPUID_INTC_EBX_7_0_ADX
)
1168 add_x86_feature(featureset
, X86FSET_ADX
);
1172 * fold in overrides from the "eeprom" mechanism
1174 cp
->cp_edx
|= cpuid_feature_edx_include
;
1175 cp
->cp_edx
&= ~cpuid_feature_edx_exclude
;
1177 cp
->cp_ecx
|= cpuid_feature_ecx_include
;
1178 cp
->cp_ecx
&= ~cpuid_feature_ecx_exclude
;
1180 if (cp
->cp_edx
& CPUID_INTC_EDX_PSE
) {
1181 add_x86_feature(featureset
, X86FSET_LARGEPAGE
);
1183 if (cp
->cp_edx
& CPUID_INTC_EDX_TSC
) {
1184 add_x86_feature(featureset
, X86FSET_TSC
);
1186 if (cp
->cp_edx
& CPUID_INTC_EDX_MSR
) {
1187 add_x86_feature(featureset
, X86FSET_MSR
);
1189 if (cp
->cp_edx
& CPUID_INTC_EDX_MTRR
) {
1190 add_x86_feature(featureset
, X86FSET_MTRR
);
1192 if (cp
->cp_edx
& CPUID_INTC_EDX_PGE
) {
1193 add_x86_feature(featureset
, X86FSET_PGE
);
1195 if (cp
->cp_edx
& CPUID_INTC_EDX_CMOV
) {
1196 add_x86_feature(featureset
, X86FSET_CMOV
);
1198 if (cp
->cp_edx
& CPUID_INTC_EDX_MMX
) {
1199 add_x86_feature(featureset
, X86FSET_MMX
);
1201 if ((cp
->cp_edx
& CPUID_INTC_EDX_MCE
) != 0 &&
1202 (cp
->cp_edx
& CPUID_INTC_EDX_MCA
) != 0) {
1203 add_x86_feature(featureset
, X86FSET_MCA
);
1205 if (cp
->cp_edx
& CPUID_INTC_EDX_PAE
) {
1206 add_x86_feature(featureset
, X86FSET_PAE
);
1208 if (cp
->cp_edx
& CPUID_INTC_EDX_CX8
) {
1209 add_x86_feature(featureset
, X86FSET_CX8
);
1211 if (cp
->cp_ecx
& CPUID_INTC_ECX_CX16
) {
1212 add_x86_feature(featureset
, X86FSET_CX16
);
1214 if (cp
->cp_edx
& CPUID_INTC_EDX_PAT
) {
1215 add_x86_feature(featureset
, X86FSET_PAT
);
1217 if (cp
->cp_edx
& CPUID_INTC_EDX_SEP
) {
1218 add_x86_feature(featureset
, X86FSET_SEP
);
1220 if (cp
->cp_edx
& CPUID_INTC_EDX_FXSR
) {
1222 * In our implementation, fxsave/fxrstor
1223 * are prerequisites before we'll even
1224 * try and do SSE things.
1226 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE
) {
1227 add_x86_feature(featureset
, X86FSET_SSE
);
1229 if (cp
->cp_edx
& CPUID_INTC_EDX_SSE2
) {
1230 add_x86_feature(featureset
, X86FSET_SSE2
);
1232 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE3
) {
1233 add_x86_feature(featureset
, X86FSET_SSE3
);
1235 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSSE3
) {
1236 add_x86_feature(featureset
, X86FSET_SSSE3
);
1238 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_1
) {
1239 add_x86_feature(featureset
, X86FSET_SSE4_1
);
1241 if (cp
->cp_ecx
& CPUID_INTC_ECX_SSE4_2
) {
1242 add_x86_feature(featureset
, X86FSET_SSE4_2
);
1244 if (cp
->cp_ecx
& CPUID_INTC_ECX_AES
) {
1245 add_x86_feature(featureset
, X86FSET_AES
);
1247 if (cp
->cp_ecx
& CPUID_INTC_ECX_PCLMULQDQ
) {
1248 add_x86_feature(featureset
, X86FSET_PCLMULQDQ
);
1251 if (cp
->cp_ecx
& CPUID_INTC_ECX_XSAVE
) {
1252 add_x86_feature(featureset
, X86FSET_XSAVE
);
1254 /* We only test AVX when there is XSAVE */
1255 if (cp
->cp_ecx
& CPUID_INTC_ECX_AVX
) {
1256 add_x86_feature(featureset
,
1260 * Intel says we can't check these without also
1263 if (cp
->cp_ecx
& CPUID_INTC_ECX_F16C
)
1264 add_x86_feature(featureset
,
1267 if (cp
->cp_ecx
& CPUID_INTC_ECX_FMA
)
1268 add_x86_feature(featureset
,
1271 if (cpi
->cpi_std
[7].cp_ebx
&
1272 CPUID_INTC_EBX_7_0_BMI1
)
1273 add_x86_feature(featureset
,
1276 if (cpi
->cpi_std
[7].cp_ebx
&
1277 CPUID_INTC_EBX_7_0_BMI2
)
1278 add_x86_feature(featureset
,
1281 if (cpi
->cpi_std
[7].cp_ebx
&
1282 CPUID_INTC_EBX_7_0_AVX2
)
1283 add_x86_feature(featureset
,
1288 if (cp
->cp_ecx
& CPUID_INTC_ECX_X2APIC
) {
1289 add_x86_feature(featureset
, X86FSET_X2APIC
);
1291 if (cp
->cp_edx
& CPUID_INTC_EDX_DE
) {
1292 add_x86_feature(featureset
, X86FSET_DE
);
1294 if (cp
->cp_ecx
& CPUID_INTC_ECX_MON
) {
1297 * We require the CLFLUSH instruction for erratum workaround
1298 * to use MONITOR/MWAIT.
1300 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1301 cpi
->cpi_mwait
.support
|= MWAIT_SUPPORT
;
1302 add_x86_feature(featureset
, X86FSET_MWAIT
);
1304 extern int idle_cpu_assert_cflush_monitor
;
1307 * All processors we are aware of which have
1308 * MONITOR/MWAIT also have CLFLUSH.
1310 if (idle_cpu_assert_cflush_monitor
) {
1311 ASSERT((cp
->cp_ecx
& CPUID_INTC_ECX_MON
) &&
1312 (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
));
1317 if (cp
->cp_ecx
& CPUID_INTC_ECX_VMX
) {
1318 add_x86_feature(featureset
, X86FSET_VMX
);
1321 if (cp
->cp_ecx
& CPUID_INTC_ECX_RDRAND
)
1322 add_x86_feature(featureset
, X86FSET_RDRAND
);
1325 * Only need it first time, rest of the cpus would follow suit.
1326 * we only capture this for the bootcpu.
1328 if (cp
->cp_edx
& CPUID_INTC_EDX_CLFSH
) {
1329 add_x86_feature(featureset
, X86FSET_CLFSH
);
1330 x86_clflush_size
= (BITX(cp
->cp_ebx
, 15, 8) * 8);
1332 if (is_x86_feature(featureset
, X86FSET_PAE
))
1333 cpi
->cpi_pabits
= 36;
1336 * Hyperthreading configuration is slightly tricky on Intel
1337 * and pure clones, and even trickier on AMD.
1339 * (AMD chose to set the HTT bit on their CMP processors,
1340 * even though they're not actually hyperthreaded. Thus it
1341 * takes a bit more work to figure out what's really going
1342 * on ... see the handling of the CMP_LGCY bit below)
1344 if (cp
->cp_edx
& CPUID_INTC_EDX_HTT
) {
1345 cpi
->cpi_ncpu_per_chip
= CPI_CPU_COUNT(cpi
);
1346 if (cpi
->cpi_ncpu_per_chip
> 1)
1347 add_x86_feature(featureset
, X86FSET_HTT
);
1349 cpi
->cpi_ncpu_per_chip
= 1;
1353 * Work on the "extended" feature information, doing
1354 * some basic initialization for cpuid_pass2()
1357 switch (cpi
->cpi_vendor
) {
1358 case X86_VENDOR_Intel
:
1360 * On KVM we know we will have proper support for extended
1363 if (IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf ||
1364 (get_hwenv() == HW_KVM
&& cpi
->cpi_family
== 6 &&
1365 (cpi
->cpi_model
== 6 || cpi
->cpi_model
== 2)))
1368 case X86_VENDOR_AMD
:
1369 if (cpi
->cpi_family
> 5 ||
1370 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
1373 case X86_VENDOR_Cyrix
:
1375 * Only these Cyrix CPUs are -known- to support
1376 * extended cpuid operations.
1378 if (x86_type
== X86_TYPE_VIA_CYRIX_III
||
1379 x86_type
== X86_TYPE_CYRIX_GXm
)
1382 case X86_VENDOR_Centaur
:
1390 cp
= &cpi
->cpi_extd
[0];
1391 cp
->cp_eax
= 0x80000000;
1392 cpi
->cpi_xmaxeax
= __cpuid_insn(cp
);
1395 if (cpi
->cpi_xmaxeax
& 0x80000000) {
1397 if (cpi
->cpi_xmaxeax
> CPI_XMAXEAX_MAX
)
1398 cpi
->cpi_xmaxeax
= CPI_XMAXEAX_MAX
;
1400 switch (cpi
->cpi_vendor
) {
1401 case X86_VENDOR_Intel
:
1402 case X86_VENDOR_AMD
:
1403 if (cpi
->cpi_xmaxeax
< 0x80000001)
1405 cp
= &cpi
->cpi_extd
[1];
1406 cp
->cp_eax
= 0x80000001;
1407 (void) __cpuid_insn(cp
);
1409 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1410 cpi
->cpi_family
== 5 &&
1411 cpi
->cpi_model
== 6 &&
1412 cpi
->cpi_step
== 6) {
1414 * K6 model 6 uses bit 10 to indicate SYSC
1415 * Later models use bit 11. Fix it here.
1417 if (cp
->cp_edx
& 0x400) {
1418 cp
->cp_edx
&= ~0x400;
1419 cp
->cp_edx
|= CPUID_AMD_EDX_SYSC
;
1423 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000001, cp
);
1426 * Compute the additions to the kernel's feature word.
1428 if (cp
->cp_edx
& CPUID_AMD_EDX_NX
) {
1429 add_x86_feature(featureset
, X86FSET_NX
);
1433 * Regardless whether or not we boot 64-bit,
1434 * we should have a way to identify whether
1435 * the CPU is capable of running 64-bit.
1437 if (cp
->cp_edx
& CPUID_AMD_EDX_LM
) {
1438 add_x86_feature(featureset
, X86FSET_64
);
1441 #if defined(__amd64)
1442 /* 1 GB large page - enable only for 64 bit kernel */
1443 if (cp
->cp_edx
& CPUID_AMD_EDX_1GPG
) {
1444 add_x86_feature(featureset
, X86FSET_1GPG
);
1448 if ((cpi
->cpi_vendor
== X86_VENDOR_AMD
) &&
1449 (cpi
->cpi_std
[1].cp_edx
& CPUID_INTC_EDX_FXSR
) &&
1450 (cp
->cp_ecx
& CPUID_AMD_ECX_SSE4A
)) {
1451 add_x86_feature(featureset
, X86FSET_SSE4A
);
1455 * If both the HTT and CMP_LGCY bits are set,
1456 * then we're not actually HyperThreaded. Read
1457 * "AMD CPUID Specification" for more details.
1459 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
1460 is_x86_feature(featureset
, X86FSET_HTT
) &&
1461 (cp
->cp_ecx
& CPUID_AMD_ECX_CMP_LGCY
)) {
1462 remove_x86_feature(featureset
, X86FSET_HTT
);
1463 add_x86_feature(featureset
, X86FSET_CMP
);
1465 #if defined(__amd64)
1467 * It's really tricky to support syscall/sysret in
1468 * the i386 kernel; we rely on sysenter/sysexit
1469 * instead. In the amd64 kernel, things are -way-
1472 if (cp
->cp_edx
& CPUID_AMD_EDX_SYSC
) {
1473 add_x86_feature(featureset
, X86FSET_ASYSC
);
1477 * While we're thinking about system calls, note
1478 * that AMD processors don't support sysenter
1479 * in long mode at all, so don't try to program them.
1481 if (x86_vendor
== X86_VENDOR_AMD
) {
1482 remove_x86_feature(featureset
, X86FSET_SEP
);
1485 if (cp
->cp_edx
& CPUID_AMD_EDX_TSCP
) {
1486 add_x86_feature(featureset
, X86FSET_TSCP
);
1489 if (cp
->cp_ecx
& CPUID_AMD_ECX_SVM
) {
1490 add_x86_feature(featureset
, X86FSET_SVM
);
1493 if (cp
->cp_ecx
& CPUID_AMD_ECX_TOPOEXT
) {
1494 add_x86_feature(featureset
, X86FSET_TOPOEXT
);
1502 * Get CPUID data about processor cores and hyperthreads.
1504 switch (cpi
->cpi_vendor
) {
1505 case X86_VENDOR_Intel
:
1506 if (cpi
->cpi_maxeax
>= 4) {
1507 cp
= &cpi
->cpi_std
[4];
1510 (void) __cpuid_insn(cp
);
1511 platform_cpuid_mangle(cpi
->cpi_vendor
, 4, cp
);
1514 case X86_VENDOR_AMD
:
1515 if (cpi
->cpi_xmaxeax
< 0x80000008)
1517 cp
= &cpi
->cpi_extd
[8];
1518 cp
->cp_eax
= 0x80000008;
1519 (void) __cpuid_insn(cp
);
1520 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000008, cp
);
1523 * Virtual and physical address limits from
1524 * cpuid override previously guessed values.
1526 cpi
->cpi_pabits
= BITX(cp
->cp_eax
, 7, 0);
1527 cpi
->cpi_vabits
= BITX(cp
->cp_eax
, 15, 8);
1534 * Derive the number of cores per chip
1536 switch (cpi
->cpi_vendor
) {
1537 case X86_VENDOR_Intel
:
1538 if (cpi
->cpi_maxeax
< 4) {
1539 cpi
->cpi_ncore_per_chip
= 1;
1542 cpi
->cpi_ncore_per_chip
=
1543 BITX((cpi
)->cpi_std
[4].cp_eax
, 31, 26) + 1;
1546 case X86_VENDOR_AMD
:
1547 if (cpi
->cpi_xmaxeax
< 0x80000008) {
1548 cpi
->cpi_ncore_per_chip
= 1;
1552 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1553 * 1 less than the number of physical cores on
1554 * the chip. In family 0x10 this value can
1555 * be affected by "downcoring" - it reflects
1556 * 1 less than the number of cores actually
1557 * enabled on this node.
1559 cpi
->cpi_ncore_per_chip
=
1560 BITX((cpi
)->cpi_extd
[8].cp_ecx
, 7, 0) + 1;
1564 cpi
->cpi_ncore_per_chip
= 1;
1569 * Get CPUID data about TSC Invariance in Deep C-State.
1571 switch (cpi
->cpi_vendor
) {
1572 case X86_VENDOR_Intel
:
1573 if (cpi
->cpi_maxeax
>= 7) {
1574 cp
= &cpi
->cpi_extd
[7];
1575 cp
->cp_eax
= 0x80000007;
1577 (void) __cpuid_insn(cp
);
1584 cpi
->cpi_ncore_per_chip
= 1;
1588 * If more than one core, then this processor is CMP.
1590 if (cpi
->cpi_ncore_per_chip
> 1) {
1591 add_x86_feature(featureset
, X86FSET_CMP
);
1595 * If the number of cores is the same as the number
1596 * of CPUs, then we cannot have HyperThreading.
1598 if (cpi
->cpi_ncpu_per_chip
== cpi
->cpi_ncore_per_chip
) {
1599 remove_x86_feature(featureset
, X86FSET_HTT
);
1602 cpi
->cpi_apicid
= CPI_APIC_ID(cpi
);
1603 cpi
->cpi_procnodes_per_pkg
= 1;
1604 cpi
->cpi_cores_per_compunit
= 1;
1605 if (is_x86_feature(featureset
, X86FSET_HTT
) == B_FALSE
&&
1606 is_x86_feature(featureset
, X86FSET_CMP
) == B_FALSE
) {
1608 * Single-core single-threaded processors.
1610 cpi
->cpi_chipid
= -1;
1611 cpi
->cpi_clogid
= 0;
1612 cpi
->cpi_coreid
= cpu
->cpu_id
;
1613 cpi
->cpi_pkgcoreid
= 0;
1614 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1615 cpi
->cpi_procnodeid
= BITX(cpi
->cpi_apicid
, 3, 0);
1617 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1618 } else if (cpi
->cpi_ncpu_per_chip
> 1) {
1619 if (cpi
->cpi_vendor
== X86_VENDOR_Intel
)
1620 cpuid_intel_getids(cpu
, featureset
);
1621 else if (cpi
->cpi_vendor
== X86_VENDOR_AMD
)
1622 cpuid_amd_getids(cpu
);
1625 * All other processors are currently
1626 * assumed to have single cores.
1628 cpi
->cpi_coreid
= cpi
->cpi_chipid
;
1629 cpi
->cpi_pkgcoreid
= 0;
1630 cpi
->cpi_procnodeid
= cpi
->cpi_chipid
;
1631 cpi
->cpi_compunitid
= cpi
->cpi_chipid
;
1636 * Synthesize chip "revision" and socket type
1638 cpi
->cpi_chiprev
= _cpuid_chiprev(cpi
->cpi_vendor
, cpi
->cpi_family
,
1639 cpi
->cpi_model
, cpi
->cpi_step
);
1640 cpi
->cpi_chiprevstr
= _cpuid_chiprevstr(cpi
->cpi_vendor
,
1641 cpi
->cpi_family
, cpi
->cpi_model
, cpi
->cpi_step
);
1642 cpi
->cpi_socket
= _cpuid_skt(cpi
->cpi_vendor
, cpi
->cpi_family
,
1643 cpi
->cpi_model
, cpi
->cpi_step
);
1650 * Make copies of the cpuid table entries we depend on, in
1651 * part for ease of parsing now, in part so that we have only
1652 * one place to correct any of it, in part for ease of
1653 * later export to userland, and in part so we can look at
1654 * this stuff in a crash dump.
1659 cpuid_pass2(cpu_t
*cpu
)
1663 struct cpuid_regs
*cp
;
1666 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
1668 ASSERT(cpi
->cpi_pass
== 1);
1670 if (cpi
->cpi_maxeax
< 1)
1673 if ((nmax
= cpi
->cpi_maxeax
+ 1) > NMAX_CPI_STD
)
1674 nmax
= NMAX_CPI_STD
;
1676 * (We already handled n == 0 and n == 1 in pass 1)
1678 for (n
= 2, cp
= &cpi
->cpi_std
[2]; n
< nmax
; n
++, cp
++) {
1682 * CPUID function 4 expects %ecx to be initialized
1683 * with an index which indicates which cache to return
1684 * information about. The OS is expected to call function 4
1685 * with %ecx set to 0, 1, 2, ... until it returns with
1686 * EAX[4:0] set to 0, which indicates there are no more
1689 * Here, populate cpi_std[4] with the information returned by
1690 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1691 * when dynamic memory allocation becomes available.
1693 * Note: we need to explicitly initialize %ecx here, since
1694 * function 4 may have been previously invoked.
1696 * The same is all true for CPUID function 7.
1698 if (n
== 4 || n
== 7)
1701 (void) __cpuid_insn(cp
);
1702 platform_cpuid_mangle(cpi
->cpi_vendor
, n
, cp
);
1706 * "the lower 8 bits of the %eax register
1707 * contain a value that identifies the number
1708 * of times the cpuid [instruction] has to be
1709 * executed to obtain a complete image of the
1710 * processor's caching systems."
1712 * How *do* they make this stuff up?
1714 cpi
->cpi_ncache
= sizeof (*cp
) *
1715 BITX(cp
->cp_eax
, 7, 0);
1716 if (cpi
->cpi_ncache
== 0)
1718 cpi
->cpi_ncache
--; /* skip count byte */
1721 * Well, for now, rather than attempt to implement
1722 * this slightly dubious algorithm, we just look
1723 * at the first 15 ..
1725 if (cpi
->cpi_ncache
> (sizeof (*cp
) - 1))
1726 cpi
->cpi_ncache
= sizeof (*cp
) - 1;
1728 dp
= cpi
->cpi_cacheinfo
;
1729 if (BITX(cp
->cp_eax
, 31, 31) == 0) {
1730 uint8_t *p
= (void *)&cp
->cp_eax
;
1731 for (i
= 1; i
< 4; i
++)
1735 if (BITX(cp
->cp_ebx
, 31, 31) == 0) {
1736 uint8_t *p
= (void *)&cp
->cp_ebx
;
1737 for (i
= 0; i
< 4; i
++)
1741 if (BITX(cp
->cp_ecx
, 31, 31) == 0) {
1742 uint8_t *p
= (void *)&cp
->cp_ecx
;
1743 for (i
= 0; i
< 4; i
++)
1747 if (BITX(cp
->cp_edx
, 31, 31) == 0) {
1748 uint8_t *p
= (void *)&cp
->cp_edx
;
1749 for (i
= 0; i
< 4; i
++)
1755 case 3: /* Processor serial number, if PSN supported */
1758 case 4: /* Deterministic cache parameters */
1761 case 5: /* Monitor/Mwait parameters */
1766 * check cpi_mwait.support which was set in cpuid_pass1
1768 if (!(cpi
->cpi_mwait
.support
& MWAIT_SUPPORT
))
1772 * Protect ourself from insane mwait line size.
1773 * Workaround for incomplete hardware emulator(s).
1775 mwait_size
= (size_t)MWAIT_SIZE_MAX(cpi
);
1776 if (mwait_size
< sizeof (uint32_t) ||
1777 !ISP2(mwait_size
)) {
1779 cmn_err(CE_NOTE
, "Cannot handle cpu %d mwait "
1780 "size %ld", cpu
->cpu_id
, (long)mwait_size
);
1785 cpi
->cpi_mwait
.mon_min
= (size_t)MWAIT_SIZE_MIN(cpi
);
1786 cpi
->cpi_mwait
.mon_max
= mwait_size
;
1787 if (MWAIT_EXTENSION(cpi
)) {
1788 cpi
->cpi_mwait
.support
|= MWAIT_EXTENSIONS
;
1789 if (MWAIT_INT_ENABLE(cpi
))
1790 cpi
->cpi_mwait
.support
|=
1791 MWAIT_ECX_INT_ENABLE
;
1800 if (cpi
->cpi_maxeax
>= 0xB && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
1801 struct cpuid_regs regs
;
1805 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
1807 (void) __cpuid_insn(cp
);
1810 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1811 * indicates that the extended topology enumeration leaf is
1816 uint_t coreid_shift
= 0;
1817 uint_t ncpu_per_core
= 1;
1818 uint_t chipid_shift
= 0;
1819 uint_t ncpu_per_chip
= 1;
1823 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
1827 (void) __cpuid_insn(cp
);
1828 level
= CPI_CPU_LEVEL_TYPE(cp
);
1831 x2apic_id
= cp
->cp_edx
;
1832 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
1833 ncpu_per_core
= BITX(cp
->cp_ebx
, 15, 0);
1834 } else if (level
== 2) {
1835 x2apic_id
= cp
->cp_edx
;
1836 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
1837 ncpu_per_chip
= BITX(cp
->cp_ebx
, 15, 0);
1841 cpi
->cpi_apicid
= x2apic_id
;
1842 cpi
->cpi_ncpu_per_chip
= ncpu_per_chip
;
1843 cpi
->cpi_ncore_per_chip
= ncpu_per_chip
/
1845 cpi
->cpi_chipid
= x2apic_id
>> chipid_shift
;
1846 cpi
->cpi_clogid
= x2apic_id
& ((1 << chipid_shift
) - 1);
1847 cpi
->cpi_coreid
= x2apic_id
>> coreid_shift
;
1848 cpi
->cpi_pkgcoreid
= cpi
->cpi_clogid
>> coreid_shift
;
1851 /* Make cp NULL so that we don't stumble on others */
1858 if (cpi
->cpi_maxeax
>= 0xD) {
1859 struct cpuid_regs regs
;
1860 boolean_t cpuid_d_valid
= B_TRUE
;
1864 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
1866 (void) __cpuid_insn(cp
);
1869 * Sanity checks for debug
1871 if ((cp
->cp_eax
& XFEATURE_LEGACY_FP
) == 0 ||
1872 (cp
->cp_eax
& XFEATURE_SSE
) == 0) {
1873 cpuid_d_valid
= B_FALSE
;
1876 cpi
->cpi_xsave
.xsav_hw_features_low
= cp
->cp_eax
;
1877 cpi
->cpi_xsave
.xsav_hw_features_high
= cp
->cp_edx
;
1878 cpi
->cpi_xsave
.xsav_max_size
= cp
->cp_ecx
;
1881 * If the hw supports AVX, get the size and offset in the save
1882 * area for the ymm state.
1884 if (cpi
->cpi_xsave
.xsav_hw_features_low
& XFEATURE_AVX
) {
1887 cp
->cp_edx
= cp
->cp_ebx
= 0;
1889 (void) __cpuid_insn(cp
);
1891 if (cp
->cp_ebx
!= CPUID_LEAFD_2_YMM_OFFSET
||
1892 cp
->cp_eax
!= CPUID_LEAFD_2_YMM_SIZE
) {
1893 cpuid_d_valid
= B_FALSE
;
1896 cpi
->cpi_xsave
.ymm_size
= cp
->cp_eax
;
1897 cpi
->cpi_xsave
.ymm_offset
= cp
->cp_ebx
;
1900 if (is_x86_feature(x86_featureset
, X86FSET_XSAVE
)) {
1901 xsave_state_size
= 0;
1902 } else if (cpuid_d_valid
) {
1903 xsave_state_size
= cpi
->cpi_xsave
.xsav_max_size
;
1905 /* Broken CPUID 0xD, probably in HVM */
1906 cmn_err(CE_WARN
, "cpu%d: CPUID.0xD returns invalid "
1907 "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1908 ", ymm_size = %d, ymm_offset = %d\n",
1909 cpu
->cpu_id
, cpi
->cpi_xsave
.xsav_hw_features_low
,
1910 cpi
->cpi_xsave
.xsav_hw_features_high
,
1911 (int)cpi
->cpi_xsave
.xsav_max_size
,
1912 (int)cpi
->cpi_xsave
.ymm_size
,
1913 (int)cpi
->cpi_xsave
.ymm_offset
);
1915 if (xsave_state_size
!= 0) {
1917 * This must be a non-boot CPU. We cannot
1918 * continue, because boot cpu has already
1921 ASSERT(cpu
->cpu_id
!= 0);
1922 cmn_err(CE_PANIC
, "cpu%d: we have already "
1923 "enabled XSAVE on boot cpu, cannot "
1924 "continue.", cpu
->cpu_id
);
1927 * If we reached here on the boot CPU, it's also
1928 * almost certain that we'll reach here on the
1929 * non-boot CPUs. When we're here on a boot CPU
1930 * we should disable the feature, on a non-boot
1931 * CPU we need to confirm that we have.
1933 if (cpu
->cpu_id
== 0) {
1934 remove_x86_feature(x86_featureset
,
1936 remove_x86_feature(x86_featureset
,
1938 remove_x86_feature(x86_featureset
,
1940 remove_x86_feature(x86_featureset
,
1942 remove_x86_feature(x86_featureset
,
1944 remove_x86_feature(x86_featureset
,
1946 remove_x86_feature(x86_featureset
,
1948 CPI_FEATURES_ECX(cpi
) &=
1949 ~CPUID_INTC_ECX_XSAVE
;
1950 CPI_FEATURES_ECX(cpi
) &=
1951 ~CPUID_INTC_ECX_AVX
;
1952 CPI_FEATURES_ECX(cpi
) &=
1953 ~CPUID_INTC_ECX_F16C
;
1954 CPI_FEATURES_ECX(cpi
) &=
1955 ~CPUID_INTC_ECX_FMA
;
1956 CPI_FEATURES_7_0_EBX(cpi
) &=
1957 ~CPUID_INTC_EBX_7_0_BMI1
;
1958 CPI_FEATURES_7_0_EBX(cpi
) &=
1959 ~CPUID_INTC_EBX_7_0_BMI2
;
1960 CPI_FEATURES_7_0_EBX(cpi
) &=
1961 ~CPUID_INTC_EBX_7_0_AVX2
;
1962 xsave_force_disable
= B_TRUE
;
1964 VERIFY(is_x86_feature(x86_featureset
,
1965 X86FSET_XSAVE
) == B_FALSE
);
1972 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0)
1975 if ((nmax
= cpi
->cpi_xmaxeax
- 0x80000000 + 1) > NMAX_CPI_EXTD
)
1976 nmax
= NMAX_CPI_EXTD
;
1978 * Copy the extended properties, fixing them as we go.
1979 * (We already handled n == 0 and n == 1 in pass 1)
1981 iptr
= (void *)cpi
->cpi_brandstr
;
1982 for (n
= 2, cp
= &cpi
->cpi_extd
[2]; n
< nmax
; cp
++, n
++) {
1983 cp
->cp_eax
= 0x80000000 + n
;
1984 (void) __cpuid_insn(cp
);
1985 platform_cpuid_mangle(cpi
->cpi_vendor
, 0x80000000 + n
, cp
);
1991 * Extract the brand string
1993 *iptr
++ = cp
->cp_eax
;
1994 *iptr
++ = cp
->cp_ebx
;
1995 *iptr
++ = cp
->cp_ecx
;
1996 *iptr
++ = cp
->cp_edx
;
1999 switch (cpi
->cpi_vendor
) {
2000 case X86_VENDOR_AMD
:
2002 * The Athlon and Duron were the first
2003 * parts to report the sizes of the
2004 * TLB for large pages. Before then,
2005 * we don't trust the data.
2007 if (cpi
->cpi_family
< 6 ||
2008 (cpi
->cpi_family
== 6 &&
2009 cpi
->cpi_model
< 1))
2017 switch (cpi
->cpi_vendor
) {
2018 case X86_VENDOR_AMD
:
2020 * The Athlon and Duron were the first
2021 * AMD parts with L2 TLB's.
2022 * Before then, don't trust the data.
2024 if (cpi
->cpi_family
< 6 ||
2025 cpi
->cpi_family
== 6 &&
2027 cp
->cp_eax
= cp
->cp_ebx
= 0;
2029 * AMD Duron rev A0 reports L2
2030 * cache size incorrectly as 1K
2031 * when it is really 64K
2033 if (cpi
->cpi_family
== 6 &&
2034 cpi
->cpi_model
== 3 &&
2035 cpi
->cpi_step
== 0) {
2036 cp
->cp_ecx
&= 0xffff;
2037 cp
->cp_ecx
|= 0x400000;
2040 case X86_VENDOR_Cyrix
: /* VIA C3 */
2042 * VIA C3 processors are a bit messed
2043 * up w.r.t. encoding cache sizes in %ecx
2045 if (cpi
->cpi_family
!= 6)
2048 * model 7 and 8 were incorrectly encoded
2050 * xxx is model 8 really broken?
2052 if (cpi
->cpi_model
== 7 ||
2053 cpi
->cpi_model
== 8)
2055 BITX(cp
->cp_ecx
, 31, 24) << 16 |
2056 BITX(cp
->cp_ecx
, 23, 16) << 12 |
2057 BITX(cp
->cp_ecx
, 15, 8) << 8 |
2058 BITX(cp
->cp_ecx
, 7, 0);
2060 * model 9 stepping 1 has wrong associativity
2062 if (cpi
->cpi_model
== 9 && cpi
->cpi_step
== 1)
2063 cp
->cp_ecx
|= 8 << 12;
2065 case X86_VENDOR_Intel
:
2067 * Extended L2 Cache features function.
2068 * First appeared on Prescott.
2084 intel_cpubrand(const struct cpuid_info
*cpi
)
2088 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2089 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2092 switch (cpi
->cpi_family
) {
2094 return ("Intel Pentium(r)");
2096 switch (cpi
->cpi_model
) {
2097 uint_t celeron
, xeon
;
2098 const struct cpuid_regs
*cp
;
2102 return ("Intel Pentium(r) Pro");
2105 return ("Intel Pentium(r) II");
2107 return ("Intel Celeron(r)");
2111 cp
= &cpi
->cpi_std
[2]; /* cache info */
2113 for (i
= 1; i
< 4; i
++) {
2116 tmp
= (cp
->cp_eax
>> (8 * i
)) & 0xff;
2119 if (tmp
>= 0x44 && tmp
<= 0x45)
2123 for (i
= 0; i
< 2; i
++) {
2126 tmp
= (cp
->cp_ebx
>> (8 * i
)) & 0xff;
2129 else if (tmp
>= 0x44 && tmp
<= 0x45)
2133 for (i
= 0; i
< 4; i
++) {
2136 tmp
= (cp
->cp_ecx
>> (8 * i
)) & 0xff;
2139 else if (tmp
>= 0x44 && tmp
<= 0x45)
2143 for (i
= 0; i
< 4; i
++) {
2146 tmp
= (cp
->cp_edx
>> (8 * i
)) & 0xff;
2149 else if (tmp
>= 0x44 && tmp
<= 0x45)
2154 return ("Intel Celeron(r)");
2156 return (cpi
->cpi_model
== 5 ?
2157 "Intel Pentium(r) II Xeon(tm)" :
2158 "Intel Pentium(r) III Xeon(tm)");
2159 return (cpi
->cpi_model
== 5 ?
2160 "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2161 "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2169 /* BrandID is present if the field is nonzero */
2170 if (cpi
->cpi_brandid
!= 0) {
2171 static const struct {
2175 { 0x1, "Intel(r) Celeron(r)" },
2176 { 0x2, "Intel(r) Pentium(r) III" },
2177 { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" },
2178 { 0x4, "Intel(r) Pentium(r) III" },
2179 { 0x6, "Mobile Intel(r) Pentium(r) III" },
2180 { 0x7, "Mobile Intel(r) Celeron(r)" },
2181 { 0x8, "Intel(r) Pentium(r) 4" },
2182 { 0x9, "Intel(r) Pentium(r) 4" },
2183 { 0xa, "Intel(r) Celeron(r)" },
2184 { 0xb, "Intel(r) Xeon(tm)" },
2185 { 0xc, "Intel(r) Xeon(tm) MP" },
2186 { 0xe, "Mobile Intel(r) Pentium(r) 4" },
2187 { 0xf, "Mobile Intel(r) Celeron(r)" },
2188 { 0x11, "Mobile Genuine Intel(r)" },
2189 { 0x12, "Intel(r) Celeron(r) M" },
2190 { 0x13, "Mobile Intel(r) Celeron(r)" },
2191 { 0x14, "Intel(r) Celeron(r)" },
2192 { 0x15, "Mobile Genuine Intel(r)" },
2193 { 0x16, "Intel(r) Pentium(r) M" },
2194 { 0x17, "Mobile Intel(r) Celeron(r)" }
2196 uint_t btblmax
= sizeof (brand_tbl
) / sizeof (brand_tbl
[0]);
2199 sgn
= (cpi
->cpi_family
<< 8) |
2200 (cpi
->cpi_model
<< 4) | cpi
->cpi_step
;
2202 for (i
= 0; i
< btblmax
; i
++)
2203 if (brand_tbl
[i
].bt_bid
== cpi
->cpi_brandid
)
2206 if (sgn
== 0x6b1 && cpi
->cpi_brandid
== 3)
2207 return ("Intel(r) Celeron(r)");
2208 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xb)
2209 return ("Intel(r) Xeon(tm) MP");
2210 if (sgn
< 0xf13 && cpi
->cpi_brandid
== 0xe)
2211 return ("Intel(r) Xeon(tm)");
2212 return (brand_tbl
[i
].bt_str
);
2220 amd_cpubrand(const struct cpuid_info
*cpi
)
2222 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2223 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5)
2224 return ("i486 compatible");
2226 switch (cpi
->cpi_family
) {
2228 switch (cpi
->cpi_model
) {
2235 return ("AMD-K5(r)");
2238 return ("AMD-K6(r)");
2240 return ("AMD-K6(r)-2");
2242 return ("AMD-K6(r)-III");
2244 return ("AMD (family 5)");
2247 switch (cpi
->cpi_model
) {
2249 return ("AMD-K7(tm)");
2253 return ("AMD Athlon(tm)");
2256 return ("AMD Duron(tm)");
2261 * Use the L2 cache size to distinguish
2263 return ((cpi
->cpi_extd
[6].cp_ecx
>> 16) >= 256 ?
2264 "AMD Athlon(tm)" : "AMD Duron(tm)");
2266 return ("AMD (family 6)");
2272 if (cpi
->cpi_family
== 0xf && cpi
->cpi_model
== 5 &&
2273 cpi
->cpi_brandid
!= 0) {
2274 switch (BITX(cpi
->cpi_brandid
, 7, 5)) {
2276 return ("AMD Opteron(tm) UP 1xx");
2278 return ("AMD Opteron(tm) DP 2xx");
2280 return ("AMD Opteron(tm) MP 8xx");
2282 return ("AMD Opteron(tm)");
2290 cyrix_cpubrand(struct cpuid_info
*cpi
, uint_t type
)
2292 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
) ||
2293 cpi
->cpi_maxeax
< 1 || cpi
->cpi_family
< 5 ||
2294 type
== X86_TYPE_CYRIX_486
)
2295 return ("i486 compatible");
2298 case X86_TYPE_CYRIX_6x86
:
2299 return ("Cyrix 6x86");
2300 case X86_TYPE_CYRIX_6x86L
:
2301 return ("Cyrix 6x86L");
2302 case X86_TYPE_CYRIX_6x86MX
:
2303 return ("Cyrix 6x86MX");
2304 case X86_TYPE_CYRIX_GXm
:
2305 return ("Cyrix GXm");
2306 case X86_TYPE_CYRIX_MediaGX
:
2307 return ("Cyrix MediaGX");
2308 case X86_TYPE_CYRIX_MII
:
2309 return ("Cyrix M2");
2310 case X86_TYPE_VIA_CYRIX_III
:
2311 return ("VIA Cyrix M3");
2314 * Have another wild guess ..
2316 if (cpi
->cpi_family
== 4 && cpi
->cpi_model
== 9)
2317 return ("Cyrix 5x86");
2318 else if (cpi
->cpi_family
== 5) {
2319 switch (cpi
->cpi_model
) {
2321 return ("Cyrix 6x86"); /* Cyrix M1 */
2323 return ("Cyrix MediaGX");
2327 } else if (cpi
->cpi_family
== 6) {
2328 switch (cpi
->cpi_model
) {
2330 return ("Cyrix 6x86MX"); /* Cyrix M2? */
2347 * This only gets called in the case that the CPU extended
2348 * feature brand string (0x80000002, 0x80000003, 0x80000004)
2349 * aren't available, or contain null bytes for some reason.
2352 fabricate_brandstr(struct cpuid_info
*cpi
)
2354 const char *brand
= NULL
;
2356 switch (cpi
->cpi_vendor
) {
2357 case X86_VENDOR_Intel
:
2358 brand
= intel_cpubrand(cpi
);
2360 case X86_VENDOR_AMD
:
2361 brand
= amd_cpubrand(cpi
);
2363 case X86_VENDOR_Cyrix
:
2364 brand
= cyrix_cpubrand(cpi
, x86_type
);
2366 case X86_VENDOR_NexGen
:
2367 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2368 brand
= "NexGen Nx586";
2370 case X86_VENDOR_Centaur
:
2371 if (cpi
->cpi_family
== 5)
2372 switch (cpi
->cpi_model
) {
2374 brand
= "Centaur C6";
2377 brand
= "Centaur C2";
2380 brand
= "Centaur C3";
2386 case X86_VENDOR_Rise
:
2387 if (cpi
->cpi_family
== 5 &&
2388 (cpi
->cpi_model
== 0 || cpi
->cpi_model
== 2))
2391 case X86_VENDOR_SiS
:
2392 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 0)
2396 if (cpi
->cpi_family
== 5 && cpi
->cpi_model
== 4)
2397 brand
= "Transmeta Crusoe TM3x00 or TM5x00";
2399 case X86_VENDOR_NSC
:
2400 case X86_VENDOR_UMC
:
2405 (void) strcpy((char *)cpi
->cpi_brandstr
, brand
);
2410 * If all else fails ...
2412 (void) snprintf(cpi
->cpi_brandstr
, sizeof (cpi
->cpi_brandstr
),
2413 "%s %d.%d.%d", cpi
->cpi_vendorstr
, cpi
->cpi_family
,
2414 cpi
->cpi_model
, cpi
->cpi_step
);
2418 * This routine is called just after kernel memory allocation
2419 * becomes available on cpu0, and as part of mp_startup() on
2422 * Fixup the brand string, and collect any information from cpuid
2423 * that requires dynamically allocated storage to represent.
2427 cpuid_pass3(cpu_t
*cpu
)
2429 int i
, max
, shft
, level
, size
;
2430 struct cpuid_regs regs
;
2431 struct cpuid_regs
*cp
;
2432 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2434 ASSERT(cpi
->cpi_pass
== 2);
2437 * Function 4: Deterministic cache parameters
2439 * Take this opportunity to detect the number of threads
2440 * sharing the last level cache, and construct a corresponding
2441 * cache id. The respective cpuid_info members are initialized
2442 * to the default case of "no last level cache sharing".
2444 cpi
->cpi_ncpu_shr_last_cache
= 1;
2445 cpi
->cpi_last_lvl_cacheid
= cpu
->cpu_id
;
2447 if (cpi
->cpi_maxeax
>= 4 && cpi
->cpi_vendor
== X86_VENDOR_Intel
) {
2450 * Find the # of elements (size) returned by fn 4, and along
2451 * the way detect last level cache sharing details.
2453 bzero(®s
, sizeof (regs
));
2455 for (i
= 0, max
= 0; i
< CPI_FN4_ECX_MAX
; i
++) {
2459 (void) __cpuid_insn(cp
);
2461 if (CPI_CACHE_TYPE(cp
) == 0)
2463 level
= CPI_CACHE_LVL(cp
);
2466 cpi
->cpi_ncpu_shr_last_cache
=
2467 CPI_NTHR_SHR_CACHE(cp
) + 1;
2470 cpi
->cpi_std_4_size
= size
= i
;
2473 * Allocate the cpi_std_4 array. The first element
2474 * references the regs for fn 4, %ecx == 0, which
2475 * cpuid_pass2() stashed in cpi->cpi_std[4].
2479 kmem_alloc(size
* sizeof (cp
), KM_SLEEP
);
2480 cpi
->cpi_std_4
[0] = &cpi
->cpi_std
[4];
2483 * Allocate storage to hold the additional regs
2484 * for function 4, %ecx == 1 .. cpi_std_4_size.
2486 * The regs for fn 4, %ecx == 0 has already
2487 * been allocated as indicated above.
2489 for (i
= 1; i
< size
; i
++) {
2490 cp
= cpi
->cpi_std_4
[i
] =
2491 kmem_zalloc(sizeof (regs
), KM_SLEEP
);
2495 (void) __cpuid_insn(cp
);
2499 * Determine the number of bits needed to represent
2500 * the number of CPUs sharing the last level cache.
2502 * Shift off that number of bits from the APIC id to
2503 * derive the cache id.
2506 for (i
= 1; i
< cpi
->cpi_ncpu_shr_last_cache
; i
<<= 1)
2508 cpi
->cpi_last_lvl_cacheid
= cpi
->cpi_apicid
>> shft
;
2512 * Now fixup the brand string
2514 if ((cpi
->cpi_xmaxeax
& 0x80000000) == 0) {
2515 fabricate_brandstr(cpi
);
2519 * If we successfully extracted a brand string from the cpuid
2520 * instruction, clean it up by removing leading spaces and
2523 if (cpi
->cpi_brandstr
[0]) {
2524 size_t maxlen
= sizeof (cpi
->cpi_brandstr
);
2527 dst
= src
= (char *)cpi
->cpi_brandstr
;
2528 src
[maxlen
- 1] = '\0';
2530 * strip leading spaces
2535 * Remove any 'Genuine' or "Authentic" prefixes
2537 if (strncmp(src
, "Genuine ", 8) == 0)
2539 if (strncmp(src
, "Authentic ", 10) == 0)
2543 * Now do an in-place copy.
2544 * Map (R) to (r) and (TM) to (tm).
2545 * The era of teletypes is long gone, and there's
2546 * -really- no need to shout.
2548 while (*src
!= '\0') {
2549 if (src
[0] == '(') {
2550 if (strncmp(src
+ 1, "R)", 2) == 0) {
2551 (void) strncpy(dst
, "(r)", 3);
2556 if (strncmp(src
+ 1, "TM)", 3) == 0) {
2557 (void) strncpy(dst
, "(tm)", 4);
2568 * Finally, remove any trailing spaces
2570 while (--dst
> cpi
->cpi_brandstr
)
2576 fabricate_brandstr(cpi
);
2582 * This routine is called out of bind_hwcap() much later in the life
2583 * of the kernel (post_startup()). The job of this routine is to resolve
2584 * the hardware feature support and kernel support for those features into
2585 * what we're actually going to tell applications via the aux vector.
2588 cpuid_pass4(cpu_t
*cpu
, uint_t
*hwcap_out
)
2590 struct cpuid_info
*cpi
;
2591 uint_t hwcap_flags
= 0, hwcap_flags_2
= 0;
2595 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2597 ASSERT(cpi
->cpi_pass
== 3);
2599 if (cpi
->cpi_maxeax
>= 1) {
2600 uint32_t *edx
= &cpi
->cpi_support
[STD_EDX_FEATURES
];
2601 uint32_t *ecx
= &cpi
->cpi_support
[STD_ECX_FEATURES
];
2602 uint32_t *ebx
= &cpi
->cpi_support
[STD_EBX_FEATURES
];
2604 *edx
= CPI_FEATURES_EDX(cpi
);
2605 *ecx
= CPI_FEATURES_ECX(cpi
);
2606 *ebx
= CPI_FEATURES_7_0_EBX(cpi
);
2609 * [these require explicit kernel support]
2611 if (!is_x86_feature(x86_featureset
, X86FSET_SEP
))
2612 *edx
&= ~CPUID_INTC_EDX_SEP
;
2614 if (!is_x86_feature(x86_featureset
, X86FSET_SSE
))
2615 *edx
&= ~(CPUID_INTC_EDX_FXSR
|CPUID_INTC_EDX_SSE
);
2616 if (!is_x86_feature(x86_featureset
, X86FSET_SSE2
))
2617 *edx
&= ~CPUID_INTC_EDX_SSE2
;
2619 if (!is_x86_feature(x86_featureset
, X86FSET_HTT
))
2620 *edx
&= ~CPUID_INTC_EDX_HTT
;
2622 if (!is_x86_feature(x86_featureset
, X86FSET_SSE3
))
2623 *ecx
&= ~CPUID_INTC_ECX_SSE3
;
2625 if (!is_x86_feature(x86_featureset
, X86FSET_SSSE3
))
2626 *ecx
&= ~CPUID_INTC_ECX_SSSE3
;
2627 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_1
))
2628 *ecx
&= ~CPUID_INTC_ECX_SSE4_1
;
2629 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4_2
))
2630 *ecx
&= ~CPUID_INTC_ECX_SSE4_2
;
2631 if (!is_x86_feature(x86_featureset
, X86FSET_AES
))
2632 *ecx
&= ~CPUID_INTC_ECX_AES
;
2633 if (!is_x86_feature(x86_featureset
, X86FSET_PCLMULQDQ
))
2634 *ecx
&= ~CPUID_INTC_ECX_PCLMULQDQ
;
2635 if (!is_x86_feature(x86_featureset
, X86FSET_XSAVE
))
2636 *ecx
&= ~(CPUID_INTC_ECX_XSAVE
|
2637 CPUID_INTC_ECX_OSXSAVE
);
2638 if (!is_x86_feature(x86_featureset
, X86FSET_AVX
))
2639 *ecx
&= ~CPUID_INTC_ECX_AVX
;
2640 if (!is_x86_feature(x86_featureset
, X86FSET_F16C
))
2641 *ecx
&= ~CPUID_INTC_ECX_F16C
;
2642 if (!is_x86_feature(x86_featureset
, X86FSET_FMA
))
2643 *ecx
&= ~CPUID_INTC_ECX_FMA
;
2644 if (!is_x86_feature(x86_featureset
, X86FSET_BMI1
))
2645 *ebx
&= ~CPUID_INTC_EBX_7_0_BMI1
;
2646 if (!is_x86_feature(x86_featureset
, X86FSET_BMI2
))
2647 *ebx
&= ~CPUID_INTC_EBX_7_0_BMI2
;
2648 if (!is_x86_feature(x86_featureset
, X86FSET_AVX2
))
2649 *ebx
&= ~CPUID_INTC_EBX_7_0_AVX2
;
2650 if (!is_x86_feature(x86_featureset
, X86FSET_RDSEED
))
2651 *ebx
&= ~CPUID_INTC_EBX_7_0_RDSEED
;
2652 if (!is_x86_feature(x86_featureset
, X86FSET_ADX
))
2653 *ebx
&= ~CPUID_INTC_EBX_7_0_ADX
;
2656 * [no explicit support required beyond x87 fp context]
2659 *edx
&= ~(CPUID_INTC_EDX_FPU
| CPUID_INTC_EDX_MMX
);
2662 * Now map the supported feature vector to things that we
2663 * think userland will care about.
2665 if (*edx
& CPUID_INTC_EDX_SEP
)
2666 hwcap_flags
|= AV_386_SEP
;
2667 if (*edx
& CPUID_INTC_EDX_SSE
)
2668 hwcap_flags
|= AV_386_FXSR
| AV_386_SSE
;
2669 if (*edx
& CPUID_INTC_EDX_SSE2
)
2670 hwcap_flags
|= AV_386_SSE2
;
2671 if (*ecx
& CPUID_INTC_ECX_SSE3
)
2672 hwcap_flags
|= AV_386_SSE3
;
2673 if (*ecx
& CPUID_INTC_ECX_SSSE3
)
2674 hwcap_flags
|= AV_386_SSSE3
;
2675 if (*ecx
& CPUID_INTC_ECX_SSE4_1
)
2676 hwcap_flags
|= AV_386_SSE4_1
;
2677 if (*ecx
& CPUID_INTC_ECX_SSE4_2
)
2678 hwcap_flags
|= AV_386_SSE4_2
;
2679 if (*ecx
& CPUID_INTC_ECX_MOVBE
)
2680 hwcap_flags
|= AV_386_MOVBE
;
2681 if (*ecx
& CPUID_INTC_ECX_AES
)
2682 hwcap_flags
|= AV_386_AES
;
2683 if (*ecx
& CPUID_INTC_ECX_PCLMULQDQ
)
2684 hwcap_flags
|= AV_386_PCLMULQDQ
;
2685 if ((*ecx
& CPUID_INTC_ECX_XSAVE
) &&
2686 (*ecx
& CPUID_INTC_ECX_OSXSAVE
)) {
2687 hwcap_flags
|= AV_386_XSAVE
;
2689 if (*ecx
& CPUID_INTC_ECX_AVX
) {
2690 hwcap_flags
|= AV_386_AVX
;
2691 if (*ecx
& CPUID_INTC_ECX_F16C
)
2692 hwcap_flags_2
|= AV_386_2_F16C
;
2693 if (*ecx
& CPUID_INTC_ECX_FMA
)
2694 hwcap_flags_2
|= AV_386_2_FMA
;
2695 if (*ebx
& CPUID_INTC_EBX_7_0_BMI1
)
2696 hwcap_flags_2
|= AV_386_2_BMI1
;
2697 if (*ebx
& CPUID_INTC_EBX_7_0_BMI2
)
2698 hwcap_flags_2
|= AV_386_2_BMI2
;
2699 if (*ebx
& CPUID_INTC_EBX_7_0_AVX2
)
2700 hwcap_flags_2
|= AV_386_2_AVX2
;
2703 if (*ecx
& CPUID_INTC_ECX_VMX
)
2704 hwcap_flags
|= AV_386_VMX
;
2705 if (*ecx
& CPUID_INTC_ECX_POPCNT
)
2706 hwcap_flags
|= AV_386_POPCNT
;
2707 if (*edx
& CPUID_INTC_EDX_FPU
)
2708 hwcap_flags
|= AV_386_FPU
;
2709 if (*edx
& CPUID_INTC_EDX_MMX
)
2710 hwcap_flags
|= AV_386_MMX
;
2712 if (*edx
& CPUID_INTC_EDX_TSC
)
2713 hwcap_flags
|= AV_386_TSC
;
2714 if (*edx
& CPUID_INTC_EDX_CX8
)
2715 hwcap_flags
|= AV_386_CX8
;
2716 if (*edx
& CPUID_INTC_EDX_CMOV
)
2717 hwcap_flags
|= AV_386_CMOV
;
2718 if (*ecx
& CPUID_INTC_ECX_CX16
)
2719 hwcap_flags
|= AV_386_CX16
;
2721 if (*ecx
& CPUID_INTC_ECX_RDRAND
)
2722 hwcap_flags_2
|= AV_386_2_RDRAND
;
2723 if (*ebx
& CPUID_INTC_EBX_7_0_ADX
)
2724 hwcap_flags_2
|= AV_386_2_ADX
;
2725 if (*ebx
& CPUID_INTC_EBX_7_0_RDSEED
)
2726 hwcap_flags_2
|= AV_386_2_RDSEED
;
2730 if (cpi
->cpi_xmaxeax
< 0x80000001)
2733 switch (cpi
->cpi_vendor
) {
2734 struct cpuid_regs cp
;
2735 uint32_t *edx
, *ecx
;
2737 case X86_VENDOR_Intel
:
2739 * Seems like Intel duplicated what we necessary
2740 * here to make the initial crop of 64-bit OS's work.
2741 * Hopefully, those are the only "extended" bits
2746 case X86_VENDOR_AMD
:
2747 edx
= &cpi
->cpi_support
[AMD_EDX_FEATURES
];
2748 ecx
= &cpi
->cpi_support
[AMD_ECX_FEATURES
];
2750 *edx
= CPI_FEATURES_XTD_EDX(cpi
);
2751 *ecx
= CPI_FEATURES_XTD_ECX(cpi
);
2754 * [these features require explicit kernel support]
2756 switch (cpi
->cpi_vendor
) {
2757 case X86_VENDOR_Intel
:
2758 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
2759 *edx
&= ~CPUID_AMD_EDX_TSCP
;
2762 case X86_VENDOR_AMD
:
2763 if (!is_x86_feature(x86_featureset
, X86FSET_TSCP
))
2764 *edx
&= ~CPUID_AMD_EDX_TSCP
;
2765 if (!is_x86_feature(x86_featureset
, X86FSET_SSE4A
))
2766 *ecx
&= ~CPUID_AMD_ECX_SSE4A
;
2774 * [no explicit support required beyond
2775 * x87 fp context and exception handlers]
2778 *edx
&= ~(CPUID_AMD_EDX_MMXamd
|
2779 CPUID_AMD_EDX_3DNow
| CPUID_AMD_EDX_3DNowx
);
2781 if (!is_x86_feature(x86_featureset
, X86FSET_NX
))
2782 *edx
&= ~CPUID_AMD_EDX_NX
;
2783 #if !defined(__amd64)
2784 *edx
&= ~CPUID_AMD_EDX_LM
;
2787 * Now map the supported feature vector to
2788 * things that we think userland will care about.
2790 #if defined(__amd64)
2791 if (*edx
& CPUID_AMD_EDX_SYSC
)
2792 hwcap_flags
|= AV_386_AMD_SYSC
;
2794 if (*edx
& CPUID_AMD_EDX_MMXamd
)
2795 hwcap_flags
|= AV_386_AMD_MMX
;
2796 if (*edx
& CPUID_AMD_EDX_3DNow
)
2797 hwcap_flags
|= AV_386_AMD_3DNow
;
2798 if (*edx
& CPUID_AMD_EDX_3DNowx
)
2799 hwcap_flags
|= AV_386_AMD_3DNowx
;
2800 if (*ecx
& CPUID_AMD_ECX_SVM
)
2801 hwcap_flags
|= AV_386_AMD_SVM
;
2803 switch (cpi
->cpi_vendor
) {
2804 case X86_VENDOR_AMD
:
2805 if (*edx
& CPUID_AMD_EDX_TSCP
)
2806 hwcap_flags
|= AV_386_TSCP
;
2807 if (*ecx
& CPUID_AMD_ECX_AHF64
)
2808 hwcap_flags
|= AV_386_AHF
;
2809 if (*ecx
& CPUID_AMD_ECX_SSE4A
)
2810 hwcap_flags
|= AV_386_AMD_SSE4A
;
2811 if (*ecx
& CPUID_AMD_ECX_LZCNT
)
2812 hwcap_flags
|= AV_386_AMD_LZCNT
;
2815 case X86_VENDOR_Intel
:
2816 if (*edx
& CPUID_AMD_EDX_TSCP
)
2817 hwcap_flags
|= AV_386_TSCP
;
2820 * Intel uses a different bit in the same word.
2822 if (*ecx
& CPUID_INTC_ECX_AHF64
)
2823 hwcap_flags
|= AV_386_AHF
;
2832 cp
.cp_eax
= 0x80860001;
2833 (void) __cpuid_insn(&cp
);
2834 cpi
->cpi_support
[TM_EDX_FEATURES
] = cp
.cp_edx
;
2843 if (hwcap_out
!= NULL
) {
2844 hwcap_out
[0] = hwcap_flags
;
2845 hwcap_out
[1] = hwcap_flags_2
;
2851 * Simulate the cpuid instruction using the data we previously
2852 * captured about this CPU. We try our best to return the truth
2853 * about the hardware, independently of kernel support.
2856 cpuid_insn(cpu_t
*cpu
, struct cpuid_regs
*cp
)
2858 struct cpuid_info
*cpi
;
2859 struct cpuid_regs
*xcp
;
2863 cpi
= cpu
->cpu_m
.mcpu_cpi
;
2865 ASSERT(cpuid_checkpass(cpu
, 3));
2868 * CPUID data is cached in two separate places: cpi_std for standard
2869 * CPUID functions, and cpi_extd for extended CPUID functions.
2871 if (cp
->cp_eax
<= cpi
->cpi_maxeax
&& cp
->cp_eax
< NMAX_CPI_STD
)
2872 xcp
= &cpi
->cpi_std
[cp
->cp_eax
];
2873 else if (cp
->cp_eax
>= 0x80000000 && cp
->cp_eax
<= cpi
->cpi_xmaxeax
&&
2874 cp
->cp_eax
< 0x80000000 + NMAX_CPI_EXTD
)
2875 xcp
= &cpi
->cpi_extd
[cp
->cp_eax
- 0x80000000];
2878 * The caller is asking for data from an input parameter which
2879 * the kernel has not cached. In this case we go fetch from
2880 * the hardware and return the data directly to the user.
2882 return (__cpuid_insn(cp
));
2884 cp
->cp_eax
= xcp
->cp_eax
;
2885 cp
->cp_ebx
= xcp
->cp_ebx
;
2886 cp
->cp_ecx
= xcp
->cp_ecx
;
2887 cp
->cp_edx
= xcp
->cp_edx
;
2888 return (cp
->cp_eax
);
2892 cpuid_checkpass(cpu_t
*cpu
, int pass
)
2894 return (cpu
!= NULL
&& cpu
->cpu_m
.mcpu_cpi
!= NULL
&&
2895 cpu
->cpu_m
.mcpu_cpi
->cpi_pass
>= pass
);
2899 cpuid_getbrandstr(cpu_t
*cpu
, char *s
, size_t n
)
2901 ASSERT(cpuid_checkpass(cpu
, 3));
2903 return (snprintf(s
, n
, "%s", cpu
->cpu_m
.mcpu_cpi
->cpi_brandstr
));
2907 cpuid_is_cmt(cpu_t
*cpu
)
2912 ASSERT(cpuid_checkpass(cpu
, 1));
2914 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
>= 0);
2918 * AMD and Intel both implement the 64-bit variant of the syscall
2919 * instruction (syscallq), so if there's -any- support for syscall,
2920 * cpuid currently says "yes, we support this".
2922 * However, Intel decided to -not- implement the 32-bit variant of the
2923 * syscall instruction, so we provide a predicate to allow our caller
2924 * to test that subtlety here.
2926 * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor,
2927 * even in the case where the hardware would in fact support it.
2931 cpuid_syscall32_insn(cpu_t
*cpu
)
2933 ASSERT(cpuid_checkpass((cpu
== NULL
? CPU
: cpu
), 1));
2940 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2942 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&&
2943 cpi
->cpi_xmaxeax
>= 0x80000001 &&
2944 (CPI_FEATURES_XTD_EDX(cpi
) & CPUID_AMD_EDX_SYSC
))
2951 cpuid_getidstr(cpu_t
*cpu
, char *s
, size_t n
)
2953 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
2955 static const char fmt
[] =
2956 "x86 (%s %X family %d model %d step %d clock %d MHz)";
2957 static const char fmt_ht
[] =
2958 "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2960 ASSERT(cpuid_checkpass(cpu
, 1));
2962 if (cpuid_is_cmt(cpu
))
2963 return (snprintf(s
, n
, fmt_ht
, cpi
->cpi_chipid
,
2964 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
2965 cpi
->cpi_family
, cpi
->cpi_model
,
2966 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
2967 return (snprintf(s
, n
, fmt
,
2968 cpi
->cpi_vendorstr
, cpi
->cpi_std
[1].cp_eax
,
2969 cpi
->cpi_family
, cpi
->cpi_model
,
2970 cpi
->cpi_step
, cpu
->cpu_type_info
.pi_clock
));
2974 cpuid_getvendorstr(cpu_t
*cpu
)
2976 ASSERT(cpuid_checkpass(cpu
, 1));
2977 return ((const char *)cpu
->cpu_m
.mcpu_cpi
->cpi_vendorstr
);
2981 cpuid_getvendor(cpu_t
*cpu
)
2983 ASSERT(cpuid_checkpass(cpu
, 1));
2984 return (cpu
->cpu_m
.mcpu_cpi
->cpi_vendor
);
2988 cpuid_getfamily(cpu_t
*cpu
)
2990 ASSERT(cpuid_checkpass(cpu
, 1));
2991 return (cpu
->cpu_m
.mcpu_cpi
->cpi_family
);
2995 cpuid_getmodel(cpu_t
*cpu
)
2997 ASSERT(cpuid_checkpass(cpu
, 1));
2998 return (cpu
->cpu_m
.mcpu_cpi
->cpi_model
);
3002 cpuid_get_ncpu_per_chip(cpu_t
*cpu
)
3004 ASSERT(cpuid_checkpass(cpu
, 1));
3005 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_per_chip
);
3009 cpuid_get_ncore_per_chip(cpu_t
*cpu
)
3011 ASSERT(cpuid_checkpass(cpu
, 1));
3012 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncore_per_chip
);
3016 cpuid_get_ncpu_sharing_last_cache(cpu_t
*cpu
)
3018 ASSERT(cpuid_checkpass(cpu
, 2));
3019 return (cpu
->cpu_m
.mcpu_cpi
->cpi_ncpu_shr_last_cache
);
3023 cpuid_get_last_lvl_cacheid(cpu_t
*cpu
)
3025 ASSERT(cpuid_checkpass(cpu
, 2));
3026 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3030 cpuid_getstep(cpu_t
*cpu
)
3032 ASSERT(cpuid_checkpass(cpu
, 1));
3033 return (cpu
->cpu_m
.mcpu_cpi
->cpi_step
);
3037 cpuid_getsig(struct cpu
*cpu
)
3039 ASSERT(cpuid_checkpass(cpu
, 1));
3040 return (cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_eax
);
3044 cpuid_getchiprev(struct cpu
*cpu
)
3046 ASSERT(cpuid_checkpass(cpu
, 1));
3047 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprev
);
3051 cpuid_getchiprevstr(struct cpu
*cpu
)
3053 ASSERT(cpuid_checkpass(cpu
, 1));
3054 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chiprevstr
);
3058 cpuid_getsockettype(struct cpu
*cpu
)
3060 ASSERT(cpuid_checkpass(cpu
, 1));
3061 return (cpu
->cpu_m
.mcpu_cpi
->cpi_socket
);
3065 cpuid_getsocketstr(cpu_t
*cpu
)
3067 static const char *socketstr
= NULL
;
3068 struct cpuid_info
*cpi
;
3070 ASSERT(cpuid_checkpass(cpu
, 1));
3071 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3073 /* Assume that socket types are the same across the system */
3074 if (socketstr
== NULL
)
3075 socketstr
= _cpuid_sktstr(cpi
->cpi_vendor
, cpi
->cpi_family
,
3076 cpi
->cpi_model
, cpi
->cpi_step
);
3083 cpuid_get_chipid(cpu_t
*cpu
)
3085 ASSERT(cpuid_checkpass(cpu
, 1));
3087 if (cpuid_is_cmt(cpu
))
3088 return (cpu
->cpu_m
.mcpu_cpi
->cpi_chipid
);
3089 return (cpu
->cpu_id
);
3093 cpuid_get_coreid(cpu_t
*cpu
)
3095 ASSERT(cpuid_checkpass(cpu
, 1));
3096 return (cpu
->cpu_m
.mcpu_cpi
->cpi_coreid
);
3100 cpuid_get_pkgcoreid(cpu_t
*cpu
)
3102 ASSERT(cpuid_checkpass(cpu
, 1));
3103 return (cpu
->cpu_m
.mcpu_cpi
->cpi_pkgcoreid
);
3107 cpuid_get_clogid(cpu_t
*cpu
)
3109 ASSERT(cpuid_checkpass(cpu
, 1));
3110 return (cpu
->cpu_m
.mcpu_cpi
->cpi_clogid
);
3114 cpuid_get_cacheid(cpu_t
*cpu
)
3116 ASSERT(cpuid_checkpass(cpu
, 1));
3117 return (cpu
->cpu_m
.mcpu_cpi
->cpi_last_lvl_cacheid
);
3121 cpuid_get_procnodeid(cpu_t
*cpu
)
3123 ASSERT(cpuid_checkpass(cpu
, 1));
3124 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodeid
);
3128 cpuid_get_procnodes_per_pkg(cpu_t
*cpu
)
3130 ASSERT(cpuid_checkpass(cpu
, 1));
3131 return (cpu
->cpu_m
.mcpu_cpi
->cpi_procnodes_per_pkg
);
3135 cpuid_get_compunitid(cpu_t
*cpu
)
3137 ASSERT(cpuid_checkpass(cpu
, 1));
3138 return (cpu
->cpu_m
.mcpu_cpi
->cpi_compunitid
);
3142 cpuid_get_cores_per_compunit(cpu_t
*cpu
)
3144 ASSERT(cpuid_checkpass(cpu
, 1));
3145 return (cpu
->cpu_m
.mcpu_cpi
->cpi_cores_per_compunit
);
3150 cpuid_have_cr8access(cpu_t
*cpu
)
3152 #if defined(__amd64)
3155 struct cpuid_info
*cpi
;
3157 ASSERT(cpu
!= NULL
);
3158 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3159 if (cpi
->cpi_vendor
== X86_VENDOR_AMD
&& cpi
->cpi_maxeax
>= 1 &&
3160 (CPI_FEATURES_XTD_ECX(cpi
) & CPUID_AMD_ECX_CR8D
) != 0)
3167 cpuid_get_apicid(cpu_t
*cpu
)
3169 ASSERT(cpuid_checkpass(cpu
, 1));
3170 if (cpu
->cpu_m
.mcpu_cpi
->cpi_maxeax
< 1) {
3171 return (UINT32_MAX
);
3173 return (cpu
->cpu_m
.mcpu_cpi
->cpi_apicid
);
3178 cpuid_get_addrsize(cpu_t
*cpu
, uint_t
*pabits
, uint_t
*vabits
)
3180 struct cpuid_info
*cpi
;
3184 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3186 ASSERT(cpuid_checkpass(cpu
, 1));
3189 *pabits
= cpi
->cpi_pabits
;
3191 *vabits
= cpi
->cpi_vabits
;
3195 * Returns the number of data TLB entries for a corresponding
3196 * pagesize. If it can't be computed, or isn't known, the
3197 * routine returns zero. If you ask about an architecturally
3198 * impossible pagesize, the routine will panic (so that the
3199 * hat implementor knows that things are inconsistent.)
3202 cpuid_get_dtlb_nent(cpu_t
*cpu
, size_t pagesize
)
3204 struct cpuid_info
*cpi
;
3205 uint_t dtlb_nent
= 0;
3209 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3211 ASSERT(cpuid_checkpass(cpu
, 1));
3214 * Check the L2 TLB info
3216 if (cpi
->cpi_xmaxeax
>= 0x80000006) {
3217 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[6];
3223 * All zero in the top 16 bits of the register
3224 * indicates a unified TLB. Size is in low 16 bits.
3226 if ((cp
->cp_ebx
& 0xffff0000) == 0)
3227 dtlb_nent
= cp
->cp_ebx
& 0x0000ffff;
3229 dtlb_nent
= BITX(cp
->cp_ebx
, 27, 16);
3232 case 2 * 1024 * 1024:
3233 if ((cp
->cp_eax
& 0xffff0000) == 0)
3234 dtlb_nent
= cp
->cp_eax
& 0x0000ffff;
3236 dtlb_nent
= BITX(cp
->cp_eax
, 27, 16);
3240 panic("unknown L2 pagesize");
3249 * No L2 TLB support for this size, try L1.
3251 if (cpi
->cpi_xmaxeax
>= 0x80000005) {
3252 struct cpuid_regs
*cp
= &cpi
->cpi_extd
[5];
3256 dtlb_nent
= BITX(cp
->cp_ebx
, 23, 16);
3258 case 2 * 1024 * 1024:
3259 dtlb_nent
= BITX(cp
->cp_eax
, 23, 16);
3262 panic("unknown L1 d-TLB pagesize");
3271 * Return 0 if the erratum is not present or not applicable, positive
3272 * if it is, and negative if the status of the erratum is unknown.
3274 * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3275 * Processors" #25759, Rev 3.57, August 2005
3278 cpuid_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3280 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
3284 * Bail out if this CPU isn't an AMD CPU, or if it's
3285 * a legacy (32-bit) AMD CPU.
3287 if (cpi
->cpi_vendor
!= X86_VENDOR_AMD
||
3288 cpi
->cpi_family
== 4 || cpi
->cpi_family
== 5 ||
3289 cpi
->cpi_family
== 6)
3293 eax
= cpi
->cpi_std
[1].cp_eax
;
3295 #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50)
3296 #define SH_B3(eax) (eax == 0xf51)
3297 #define B(eax) (SH_B0(eax) || SH_B3(eax))
3299 #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58)
3301 #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3302 #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3303 #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2)
3304 #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3306 #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3307 #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0)
3308 #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0)
3309 #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3311 #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3312 #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */
3313 #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0)
3314 #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71)
3315 #define BH_E4(eax) (eax == 0x20fb1)
3316 #define SH_E5(eax) (eax == 0x20f42)
3317 #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2)
3318 #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32)
3319 #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3320 SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3321 DH_E6(eax) || JH_E6(eax))
3323 #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3324 #define DR_B0(eax) (eax == 0x100f20)
3325 #define DR_B1(eax) (eax == 0x100f21)
3326 #define DR_BA(eax) (eax == 0x100f2a)
3327 #define DR_B2(eax) (eax == 0x100f22)
3328 #define DR_B3(eax) (eax == 0x100f23)
3329 #define RB_C0(eax) (eax == 0x100f40)
3333 return (cpi
->cpi_family
< 0x10);
3334 case 51: /* what does the asterisk mean? */
3335 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3339 return (cpi
->cpi_family
<= 0x11);
3343 return (cpi
->cpi_family
<= 0x11);
3356 return (SH_B0(eax
));
3360 return (cpi
->cpi_family
< 0x10);
3364 return (cpi
->cpi_family
<= 0x11);
3366 return (B(eax
) || SH_C0(eax
));
3368 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3374 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3376 return (cpi
->cpi_family
< 0x10);
3378 return (SH_C0(eax
) || CG(eax
));
3380 #if !defined(__amd64)
3383 return (B(eax
) || SH_C0(eax
));
3386 return (cpi
->cpi_family
< 0x10);
3388 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3391 return (B(eax
) || SH_C0(eax
));
3393 return (SH_C0(eax
));
3395 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3397 #if !defined(__amd64)
3400 return (B(eax
) || SH_C0(eax
));
3403 return (B(eax
) || SH_C0(eax
) || CG(eax
));
3406 return (SH_C0(eax
) || CG(eax
));
3408 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3410 return (B(eax
) || SH_C0(eax
));
3413 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3415 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3419 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3421 return (DH_CG(eax
));
3423 return (SH_C0(eax
) || CG(eax
) || D0(eax
));
3425 return (D0(eax
) || EX(eax
));
3429 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3431 return (eax
== 0x20fc0);
3433 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3435 return (SH_E0(eax
) || JH_E1(eax
));
3437 return (SH_E0(eax
) || JH_E1(eax
) || DH_E3(eax
));
3439 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
));
3441 return (SH_E0(eax
) || JH_E1(eax
) || SH_E4(eax
) || BH_E4(eax
) ||
3444 return (B(eax
) || SH_C0(eax
) || CG(eax
) || D0(eax
) || EX(eax
));
3446 return (cpi
->cpi_family
< 0x10 || cpi
->cpi_family
== 0x11);
3448 return (JH_E1(eax
) || BH_E4(eax
) || JH_E6(eax
));
3450 return (cpi
->cpi_family
< 0x10);
3453 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3454 * if this is a K8 family or newer processor
3456 if (CPI_FAMILY(cpi
) == 0xf) {
3457 struct cpuid_regs regs
;
3458 regs
.cp_eax
= 0x80000007;
3459 (void) __cpuid_insn(®s
);
3460 return (!(regs
.cp_edx
& 0x100));
3464 return (((((eax
>> 12) & 0xff00) + (eax
& 0xf00)) |
3465 (((eax
>> 4) & 0xf) | ((eax
>> 12) & 0xf0))) < 0xf40);
3469 * check for processors (pre-Shanghai) that do not provide
3470 * optimal management of 1gb ptes in its tlb.
3472 return (cpi
->cpi_family
== 0x10 && cpi
->cpi_model
< 4);
3475 return (DR_AX(eax
) || DR_B0(eax
) || DR_B1(eax
) || DR_BA(eax
) ||
3476 DR_B2(eax
) || RB_C0(eax
));
3479 #if defined(__amd64)
3480 return (cpi
->cpi_family
== 0x10 || cpi
->cpi_family
== 0x12);
3492 * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3493 * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3496 osvw_opteron_erratum(cpu_t
*cpu
, uint_t erratum
)
3498 struct cpuid_info
*cpi
;
3500 static int osvwfeature
= -1;
3501 uint64_t osvwlength
;
3504 cpi
= cpu
->cpu_m
.mcpu_cpi
;
3506 /* confirm OSVW supported */
3507 if (osvwfeature
== -1) {
3508 osvwfeature
= cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
;
3510 /* assert that osvw feature setting is consistent on all cpus */
3511 ASSERT(osvwfeature
==
3512 (cpi
->cpi_extd
[1].cp_ecx
& CPUID_AMD_ECX_OSVW
));
3517 osvwlength
= rdmsr(MSR_AMD_OSVW_ID_LEN
) & OSVW_ID_LEN_MASK
;
3520 case 298: /* osvwid is 0 */
3522 if (osvwlength
<= (uint64_t)osvwid
) {
3523 /* osvwid 0 is unknown */
3528 * Check the OSVW STATUS MSR to determine the state
3529 * of the erratum where:
3531 * 1 - BIOS has applied the workaround when BIOS
3532 * workaround is available. (Or for other errata,
3533 * OS workaround is required.)
3534 * For a value of 1, caller will confirm that the
3535 * erratum 298 workaround has indeed been applied by BIOS.
3537 * A 1 may be set in cpus that have a HW fix
3538 * in a mixed cpu system. Regarding erratum 298:
3539 * In a multiprocessor platform, the workaround above
3540 * should be applied to all processors regardless of
3541 * silicon revision when an affected processor is
3545 return (rdmsr(MSR_AMD_OSVW_STATUS
+
3546 (osvwid
/ OSVW_ID_CNT_PER_MSR
)) &
3547 (1ULL << (osvwid
% OSVW_ID_CNT_PER_MSR
)));
3554 static const char assoc_str
[] = "associativity";
3555 static const char line_str
[] = "line-size";
3556 static const char size_str
[] = "size";
3559 add_cache_prop(dev_info_t
*devi
, const char *label
, const char *type
,
3565 * ndi_prop_update_int() is used because it is desirable for
3566 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3568 if (snprintf(buf
, sizeof (buf
), "%s-%s", label
, type
) < sizeof (buf
))
3569 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, devi
, buf
, val
);
3573 * Intel-style cache/tlb description
3575 * Standard cpuid level 2 gives a randomly ordered
3576 * selection of tags that index into a table that describes
3577 * cache and tlb properties.
3580 static const char l1_icache_str
[] = "l1-icache";
3581 static const char l1_dcache_str
[] = "l1-dcache";
3582 static const char l2_cache_str
[] = "l2-cache";
3583 static const char l3_cache_str
[] = "l3-cache";
3584 static const char itlb4k_str
[] = "itlb-4K";
3585 static const char dtlb4k_str
[] = "dtlb-4K";
3586 static const char itlb2M_str
[] = "itlb-2M";
3587 static const char itlb4M_str
[] = "itlb-4M";
3588 static const char dtlb4M_str
[] = "dtlb-4M";
3589 static const char dtlb24_str
[] = "dtlb0-2M-4M";
3590 static const char itlb424_str
[] = "itlb-4K-2M-4M";
3591 static const char itlb24_str
[] = "itlb-2M-4M";
3592 static const char dtlb44_str
[] = "dtlb-4K-4M";
3593 static const char sl1_dcache_str
[] = "sectored-l1-dcache";
3594 static const char sl2_cache_str
[] = "sectored-l2-cache";
3595 static const char itrace_str
[] = "itrace-cache";
3596 static const char sl3_cache_str
[] = "sectored-l3-cache";
3597 static const char sh_l2_tlb4k_str
[] = "shared-l2-tlb-4k";
3599 static const struct cachetab
{
3602 uint16_t ct_line_size
;
3604 const char *ct_label
;
3607 * maintain descending order!
3609 * Codes ignored - Reason
3610 * ----------------------
3611 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3612 * f0H/f1H - Currently we do not interpret prefetch size by design
3614 { 0xe4, 16, 64, 8*1024*1024, l3_cache_str
},
3615 { 0xe3, 16, 64, 4*1024*1024, l3_cache_str
},
3616 { 0xe2, 16, 64, 2*1024*1024, l3_cache_str
},
3617 { 0xde, 12, 64, 6*1024*1024, l3_cache_str
},
3618 { 0xdd, 12, 64, 3*1024*1024, l3_cache_str
},
3619 { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str
},
3620 { 0xd8, 8, 64, 4*1024*1024, l3_cache_str
},
3621 { 0xd7, 8, 64, 2*1024*1024, l3_cache_str
},
3622 { 0xd6, 8, 64, 1*1024*1024, l3_cache_str
},
3623 { 0xd2, 4, 64, 2*1024*1024, l3_cache_str
},
3624 { 0xd1, 4, 64, 1*1024*1024, l3_cache_str
},
3625 { 0xd0, 4, 64, 512*1024, l3_cache_str
},
3626 { 0xca, 4, 0, 512, sh_l2_tlb4k_str
},
3627 { 0xc0, 4, 0, 8, dtlb44_str
},
3628 { 0xba, 4, 0, 64, dtlb4k_str
},
3629 { 0xb4, 4, 0, 256, dtlb4k_str
},
3630 { 0xb3, 4, 0, 128, dtlb4k_str
},
3631 { 0xb2, 4, 0, 64, itlb4k_str
},
3632 { 0xb0, 4, 0, 128, itlb4k_str
},
3633 { 0x87, 8, 64, 1024*1024, l2_cache_str
},
3634 { 0x86, 4, 64, 512*1024, l2_cache_str
},
3635 { 0x85, 8, 32, 2*1024*1024, l2_cache_str
},
3636 { 0x84, 8, 32, 1024*1024, l2_cache_str
},
3637 { 0x83, 8, 32, 512*1024, l2_cache_str
},
3638 { 0x82, 8, 32, 256*1024, l2_cache_str
},
3639 { 0x80, 8, 64, 512*1024, l2_cache_str
},
3640 { 0x7f, 2, 64, 512*1024, l2_cache_str
},
3641 { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str
},
3642 { 0x7c, 8, 64, 1024*1024, sl2_cache_str
},
3643 { 0x7b, 8, 64, 512*1024, sl2_cache_str
},
3644 { 0x7a, 8, 64, 256*1024, sl2_cache_str
},
3645 { 0x79, 8, 64, 128*1024, sl2_cache_str
},
3646 { 0x78, 8, 64, 1024*1024, l2_cache_str
},
3647 { 0x73, 8, 0, 64*1024, itrace_str
},
3648 { 0x72, 8, 0, 32*1024, itrace_str
},
3649 { 0x71, 8, 0, 16*1024, itrace_str
},
3650 { 0x70, 8, 0, 12*1024, itrace_str
},
3651 { 0x68, 4, 64, 32*1024, sl1_dcache_str
},
3652 { 0x67, 4, 64, 16*1024, sl1_dcache_str
},
3653 { 0x66, 4, 64, 8*1024, sl1_dcache_str
},
3654 { 0x60, 8, 64, 16*1024, sl1_dcache_str
},
3655 { 0x5d, 0, 0, 256, dtlb44_str
},
3656 { 0x5c, 0, 0, 128, dtlb44_str
},
3657 { 0x5b, 0, 0, 64, dtlb44_str
},
3658 { 0x5a, 4, 0, 32, dtlb24_str
},
3659 { 0x59, 0, 0, 16, dtlb4k_str
},
3660 { 0x57, 4, 0, 16, dtlb4k_str
},
3661 { 0x56, 4, 0, 16, dtlb4M_str
},
3662 { 0x55, 0, 0, 7, itlb24_str
},
3663 { 0x52, 0, 0, 256, itlb424_str
},
3664 { 0x51, 0, 0, 128, itlb424_str
},
3665 { 0x50, 0, 0, 64, itlb424_str
},
3666 { 0x4f, 0, 0, 32, itlb4k_str
},
3667 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str
},
3668 { 0x4d, 16, 64, 16*1024*1024, l3_cache_str
},
3669 { 0x4c, 12, 64, 12*1024*1024, l3_cache_str
},
3670 { 0x4b, 16, 64, 8*1024*1024, l3_cache_str
},
3671 { 0x4a, 12, 64, 6*1024*1024, l3_cache_str
},
3672 { 0x49, 16, 64, 4*1024*1024, l3_cache_str
},
3673 { 0x48, 12, 64, 3*1024*1024, l2_cache_str
},
3674 { 0x47, 8, 64, 8*1024*1024, l3_cache_str
},
3675 { 0x46, 4, 64, 4*1024*1024, l3_cache_str
},
3676 { 0x45, 4, 32, 2*1024*1024, l2_cache_str
},
3677 { 0x44, 4, 32, 1024*1024, l2_cache_str
},
3678 { 0x43, 4, 32, 512*1024, l2_cache_str
},
3679 { 0x42, 4, 32, 256*1024, l2_cache_str
},
3680 { 0x41, 4, 32, 128*1024, l2_cache_str
},
3681 { 0x3e, 4, 64, 512*1024, sl2_cache_str
},
3682 { 0x3d, 6, 64, 384*1024, sl2_cache_str
},
3683 { 0x3c, 4, 64, 256*1024, sl2_cache_str
},
3684 { 0x3b, 2, 64, 128*1024, sl2_cache_str
},
3685 { 0x3a, 6, 64, 192*1024, sl2_cache_str
},
3686 { 0x39, 4, 64, 128*1024, sl2_cache_str
},
3687 { 0x30, 8, 64, 32*1024, l1_icache_str
},
3688 { 0x2c, 8, 64, 32*1024, l1_dcache_str
},
3689 { 0x29, 8, 64, 4096*1024, sl3_cache_str
},
3690 { 0x25, 8, 64, 2048*1024, sl3_cache_str
},
3691 { 0x23, 8, 64, 1024*1024, sl3_cache_str
},
3692 { 0x22, 4, 64, 512*1024, sl3_cache_str
},
3693 { 0x0e, 6, 64, 24*1024, l1_dcache_str
},
3694 { 0x0d, 4, 32, 16*1024, l1_dcache_str
},
3695 { 0x0c, 4, 32, 16*1024, l1_dcache_str
},
3696 { 0x0b, 4, 0, 4, itlb4M_str
},
3697 { 0x0a, 2, 32, 8*1024, l1_dcache_str
},
3698 { 0x08, 4, 32, 16*1024, l1_icache_str
},
3699 { 0x06, 4, 32, 8*1024, l1_icache_str
},
3700 { 0x05, 4, 0, 32, dtlb4M_str
},
3701 { 0x04, 4, 0, 8, dtlb4M_str
},
3702 { 0x03, 4, 0, 64, dtlb4k_str
},
3703 { 0x02, 4, 0, 2, itlb4M_str
},
3704 { 0x01, 4, 0, 32, itlb4k_str
},
3708 static const struct cachetab cyrix_ctab
[] = {
3709 { 0x70, 4, 0, 32, "tlb-4K" },
3710 { 0x80, 4, 16, 16*1024, "l1-cache" },
3715 * Search a cache table for a matching entry
3717 static const struct cachetab
*
3718 find_cacheent(const struct cachetab
*ct
, uint_t code
)
3721 for (; ct
->ct_code
!= 0; ct
++)
3722 if (ct
->ct_code
<= code
)
3724 if (ct
->ct_code
== code
)
3731 * Populate cachetab entry with L2 or L3 cache-information using
3732 * cpuid function 4. This function is called from intel_walk_cacheinfo()
3733 * when descriptor 0x49 is encountered. It returns 0 if no such cache
3734 * information is found.
3737 intel_cpuid_4_cache_info(struct cachetab
*ct
, struct cpuid_info
*cpi
)
3742 for (i
= 0; i
< cpi
->cpi_std_4_size
; i
++) {
3743 level
= CPI_CACHE_LVL(cpi
->cpi_std_4
[i
]);
3745 if (level
== 2 || level
== 3) {
3746 ct
->ct_assoc
= CPI_CACHE_WAYS(cpi
->cpi_std_4
[i
]) + 1;
3748 CPI_CACHE_COH_LN_SZ(cpi
->cpi_std_4
[i
]) + 1;
3749 ct
->ct_size
= ct
->ct_assoc
*
3750 (CPI_CACHE_PARTS(cpi
->cpi_std_4
[i
]) + 1) *
3752 (cpi
->cpi_std_4
[i
]->cp_ecx
+ 1);
3755 ct
->ct_label
= l2_cache_str
;
3756 } else if (level
== 3) {
3757 ct
->ct_label
= l3_cache_str
;
3767 * Walk the cacheinfo descriptor, applying 'func' to every valid element
3768 * The walk is terminated if the walker returns non-zero.
3771 intel_walk_cacheinfo(struct cpuid_info
*cpi
,
3772 void *arg
, int (*func
)(void *, const struct cachetab
*))
3774 const struct cachetab
*ct
;
3775 struct cachetab des_49_ct
, des_b1_ct
;
3779 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
3781 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
3783 * For overloaded descriptor 0x49 we use cpuid function 4
3784 * if supported by the current processor, to create
3785 * cache information.
3786 * For overloaded descriptor 0xb1 we use X86_PAE flag
3787 * to disambiguate the cache information.
3789 if (*dp
== 0x49 && cpi
->cpi_maxeax
>= 0x4 &&
3790 intel_cpuid_4_cache_info(&des_49_ct
, cpi
) == 1) {
3792 } else if (*dp
== 0xb1) {
3793 des_b1_ct
.ct_code
= 0xb1;
3794 des_b1_ct
.ct_assoc
= 4;
3795 des_b1_ct
.ct_line_size
= 0;
3796 if (is_x86_feature(x86_featureset
, X86FSET_PAE
)) {
3797 des_b1_ct
.ct_size
= 8;
3798 des_b1_ct
.ct_label
= itlb2M_str
;
3800 des_b1_ct
.ct_size
= 4;
3801 des_b1_ct
.ct_label
= itlb4M_str
;
3805 if ((ct
= find_cacheent(intel_ctab
, *dp
)) == NULL
) {
3810 if (func(arg
, ct
) != 0) {
3817 * (Like the Intel one, except for Cyrix CPUs)
3820 cyrix_walk_cacheinfo(struct cpuid_info
*cpi
,
3821 void *arg
, int (*func
)(void *, const struct cachetab
*))
3823 const struct cachetab
*ct
;
3827 if ((dp
= cpi
->cpi_cacheinfo
) == NULL
)
3829 for (i
= 0; i
< cpi
->cpi_ncache
; i
++, dp
++) {
3831 * Search Cyrix-specific descriptor table first ..
3833 if ((ct
= find_cacheent(cyrix_ctab
, *dp
)) != NULL
) {
3834 if (func(arg
, ct
) != 0)
3839 * .. else fall back to the Intel one
3841 if ((ct
= find_cacheent(intel_ctab
, *dp
)) != NULL
) {
3842 if (func(arg
, ct
) != 0)
3850 * A cacheinfo walker that adds associativity, line-size, and size properties
3851 * to the devinfo node it is passed as an argument.
3854 add_cacheent_props(void *arg
, const struct cachetab
*ct
)
3856 dev_info_t
*devi
= arg
;
3858 add_cache_prop(devi
, ct
->ct_label
, assoc_str
, ct
->ct_assoc
);
3859 if (ct
->ct_line_size
!= 0)
3860 add_cache_prop(devi
, ct
->ct_label
, line_str
,
3862 add_cache_prop(devi
, ct
->ct_label
, size_str
, ct
->ct_size
);
3867 static const char fully_assoc
[] = "fully-associative?";
3870 * AMD style cache/tlb description
3872 * Extended functions 5 and 6 directly describe properties of
3873 * tlbs and various cache levels.
3876 add_amd_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
3879 case 0: /* reserved; ignore */
3882 add_cache_prop(devi
, label
, assoc_str
, assoc
);
3885 add_cache_prop(devi
, label
, fully_assoc
, 1);
3891 add_amd_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
3895 add_cache_prop(devi
, label
, size_str
, size
);
3896 add_amd_assoc(devi
, label
, assoc
);
3900 add_amd_cache(dev_info_t
*devi
, const char *label
,
3901 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
3903 if (size
== 0 || line_size
== 0)
3905 add_amd_assoc(devi
, label
, assoc
);
3907 * Most AMD parts have a sectored cache. Multiple cache lines are
3908 * associated with each tag. A sector consists of all cache lines
3909 * associated with a tag. For example, the AMD K6-III has a sector
3910 * size of 2 cache lines per tag.
3912 if (lines_per_tag
!= 0)
3913 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
3914 add_cache_prop(devi
, label
, line_str
, line_size
);
3915 add_cache_prop(devi
, label
, size_str
, size
* 1024);
3919 add_amd_l2_assoc(dev_info_t
*devi
, const char *label
, uint_t assoc
)
3927 add_cache_prop(devi
, label
, assoc_str
, assoc
);
3930 add_cache_prop(devi
, label
, assoc_str
, 8);
3933 add_cache_prop(devi
, label
, assoc_str
, 16);
3936 add_cache_prop(devi
, label
, fully_assoc
, 1);
3938 default: /* reserved; ignore */
3944 add_amd_l2_tlb(dev_info_t
*devi
, const char *label
, uint_t assoc
, uint_t size
)
3946 if (size
== 0 || assoc
== 0)
3948 add_amd_l2_assoc(devi
, label
, assoc
);
3949 add_cache_prop(devi
, label
, size_str
, size
);
3953 add_amd_l2_cache(dev_info_t
*devi
, const char *label
,
3954 uint_t size
, uint_t assoc
, uint_t lines_per_tag
, uint_t line_size
)
3956 if (size
== 0 || assoc
== 0 || line_size
== 0)
3958 add_amd_l2_assoc(devi
, label
, assoc
);
3959 if (lines_per_tag
!= 0)
3960 add_cache_prop(devi
, label
, "lines-per-tag", lines_per_tag
);
3961 add_cache_prop(devi
, label
, line_str
, line_size
);
3962 add_cache_prop(devi
, label
, size_str
, size
* 1024);
3966 amd_cache_info(struct cpuid_info
*cpi
, dev_info_t
*devi
)
3968 struct cpuid_regs
*cp
;
3970 if (cpi
->cpi_xmaxeax
< 0x80000005)
3972 cp
= &cpi
->cpi_extd
[5];
3975 * 4M/2M L1 TLB configuration
3977 * We report the size for 2M pages because AMD uses two
3978 * TLB entries for one 4M page.
3980 add_amd_tlb(devi
, "dtlb-2M",
3981 BITX(cp
->cp_eax
, 31, 24), BITX(cp
->cp_eax
, 23, 16));
3982 add_amd_tlb(devi
, "itlb-2M",
3983 BITX(cp
->cp_eax
, 15, 8), BITX(cp
->cp_eax
, 7, 0));
3986 * 4K L1 TLB configuration
3989 switch (cpi
->cpi_vendor
) {
3992 if (cpi
->cpi_family
>= 5) {
3994 * Crusoe processors have 256 TLB entries, but
3995 * cpuid data format constrains them to only
3996 * reporting 255 of them.
3998 if ((nentries
= BITX(cp
->cp_ebx
, 23, 16)) == 255)
4001 * Crusoe processors also have a unified TLB
4003 add_amd_tlb(devi
, "tlb-4K", BITX(cp
->cp_ebx
, 31, 24),
4009 add_amd_tlb(devi
, itlb4k_str
,
4010 BITX(cp
->cp_ebx
, 31, 24), BITX(cp
->cp_ebx
, 23, 16));
4011 add_amd_tlb(devi
, dtlb4k_str
,
4012 BITX(cp
->cp_ebx
, 15, 8), BITX(cp
->cp_ebx
, 7, 0));
4017 * data L1 cache configuration
4020 add_amd_cache(devi
, l1_dcache_str
,
4021 BITX(cp
->cp_ecx
, 31, 24), BITX(cp
->cp_ecx
, 23, 16),
4022 BITX(cp
->cp_ecx
, 15, 8), BITX(cp
->cp_ecx
, 7, 0));
4025 * code L1 cache configuration
4028 add_amd_cache(devi
, l1_icache_str
,
4029 BITX(cp
->cp_edx
, 31, 24), BITX(cp
->cp_edx
, 23, 16),
4030 BITX(cp
->cp_edx
, 15, 8), BITX(cp
->cp_edx
, 7, 0));
4032 if (cpi
->cpi_xmaxeax
< 0x80000006)
4034 cp
= &cpi
->cpi_extd
[6];
4036 /* Check for a unified L2 TLB for large pages */
4038 if (BITX(cp
->cp_eax
, 31, 16) == 0)
4039 add_amd_l2_tlb(devi
, "l2-tlb-2M",
4040 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4042 add_amd_l2_tlb(devi
, "l2-dtlb-2M",
4043 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4044 add_amd_l2_tlb(devi
, "l2-itlb-2M",
4045 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4048 /* Check for a unified L2 TLB for 4K pages */
4050 if (BITX(cp
->cp_ebx
, 31, 16) == 0) {
4051 add_amd_l2_tlb(devi
, "l2-tlb-4K",
4052 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4054 add_amd_l2_tlb(devi
, "l2-dtlb-4K",
4055 BITX(cp
->cp_eax
, 31, 28), BITX(cp
->cp_eax
, 27, 16));
4056 add_amd_l2_tlb(devi
, "l2-itlb-4K",
4057 BITX(cp
->cp_eax
, 15, 12), BITX(cp
->cp_eax
, 11, 0));
4060 add_amd_l2_cache(devi
, l2_cache_str
,
4061 BITX(cp
->cp_ecx
, 31, 16), BITX(cp
->cp_ecx
, 15, 12),
4062 BITX(cp
->cp_ecx
, 11, 8), BITX(cp
->cp_ecx
, 7, 0));
4066 * There are two basic ways that the x86 world describes it cache
4067 * and tlb architecture - Intel's way and AMD's way.
4069 * Return which flavor of cache architecture we should use
4072 x86_which_cacheinfo(struct cpuid_info
*cpi
)
4074 switch (cpi
->cpi_vendor
) {
4075 case X86_VENDOR_Intel
:
4076 if (cpi
->cpi_maxeax
>= 2)
4077 return (X86_VENDOR_Intel
);
4079 case X86_VENDOR_AMD
:
4081 * The K5 model 1 was the first part from AMD that reported
4082 * cache sizes via extended cpuid functions.
4084 if (cpi
->cpi_family
> 5 ||
4085 (cpi
->cpi_family
== 5 && cpi
->cpi_model
>= 1))
4086 return (X86_VENDOR_AMD
);
4089 if (cpi
->cpi_family
>= 5)
4090 return (X86_VENDOR_AMD
);
4094 * If they have extended CPU data for 0x80000005
4095 * then we assume they have AMD-format cache
4098 * If not, and the vendor happens to be Cyrix,
4099 * then try our-Cyrix specific handler.
4101 * If we're not Cyrix, then assume we're using Intel's
4102 * table-driven format instead.
4104 if (cpi
->cpi_xmaxeax
>= 0x80000005)
4105 return (X86_VENDOR_AMD
);
4106 else if (cpi
->cpi_vendor
== X86_VENDOR_Cyrix
)
4107 return (X86_VENDOR_Cyrix
);
4108 else if (cpi
->cpi_maxeax
>= 2)
4109 return (X86_VENDOR_Intel
);
4116 cpuid_set_cpu_properties(void *dip
, processorid_t cpu_id
,
4117 struct cpuid_info
*cpi
)
4119 dev_info_t
*cpu_devi
;
4122 cpu_devi
= (dev_info_t
*)dip
;
4125 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4126 "device_type", "cpu");
4129 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4132 /* cpu-mhz, and clock-frequency */
4136 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4137 "cpu-mhz", cpu_freq
);
4138 if ((mul
= cpu_freq
* 1000000LL) <= INT_MAX
)
4139 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4140 "clock-frequency", (int)mul
);
4143 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
)) {
4148 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4149 "vendor-id", cpi
->cpi_vendorstr
);
4151 if (cpi
->cpi_maxeax
== 0) {
4156 * family, model, and step
4158 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4159 "family", CPI_FAMILY(cpi
));
4160 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4161 "cpu-model", CPI_MODEL(cpi
));
4162 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4163 "stepping-id", CPI_STEP(cpi
));
4166 switch (cpi
->cpi_vendor
) {
4167 case X86_VENDOR_Intel
:
4175 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4176 "type", CPI_TYPE(cpi
));
4179 switch (cpi
->cpi_vendor
) {
4180 case X86_VENDOR_Intel
:
4181 case X86_VENDOR_AMD
:
4182 create
= cpi
->cpi_family
>= 0xf;
4189 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4190 "ext-family", CPI_FAMILY_XTD(cpi
));
4193 switch (cpi
->cpi_vendor
) {
4194 case X86_VENDOR_Intel
:
4195 create
= IS_EXTENDED_MODEL_INTEL(cpi
);
4197 case X86_VENDOR_AMD
:
4198 create
= CPI_FAMILY(cpi
) == 0xf;
4205 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4206 "ext-model", CPI_MODEL_XTD(cpi
));
4209 switch (cpi
->cpi_vendor
) {
4210 case X86_VENDOR_AMD
:
4212 * AMD K5 model 1 was the first part to support this
4214 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4221 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4222 "generation", BITX((cpi
)->cpi_extd
[1].cp_eax
, 11, 8));
4225 switch (cpi
->cpi_vendor
) {
4226 case X86_VENDOR_Intel
:
4228 * brand id first appeared on Pentium III Xeon model 8,
4229 * and Celeron model 8 processors and Opteron
4231 create
= cpi
->cpi_family
> 6 ||
4232 (cpi
->cpi_family
== 6 && cpi
->cpi_model
>= 8);
4234 case X86_VENDOR_AMD
:
4235 create
= cpi
->cpi_family
>= 0xf;
4241 if (create
&& cpi
->cpi_brandid
!= 0) {
4242 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4243 "brand-id", cpi
->cpi_brandid
);
4246 /* chunks, and apic-id */
4247 switch (cpi
->cpi_vendor
) {
4249 * first available on Pentium IV and Opteron (K8)
4251 case X86_VENDOR_Intel
:
4252 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4254 case X86_VENDOR_AMD
:
4255 create
= cpi
->cpi_family
>= 0xf;
4262 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4263 "chunks", CPI_CHUNKS(cpi
));
4264 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4265 "apic-id", cpi
->cpi_apicid
);
4266 if (cpi
->cpi_chipid
>= 0) {
4267 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4268 "chip#", cpi
->cpi_chipid
);
4269 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4270 "clog#", cpi
->cpi_clogid
);
4274 /* cpuid-features */
4275 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4276 "cpuid-features", CPI_FEATURES_EDX(cpi
));
4279 /* cpuid-features-ecx */
4280 switch (cpi
->cpi_vendor
) {
4281 case X86_VENDOR_Intel
:
4282 create
= IS_NEW_F6(cpi
) || cpi
->cpi_family
>= 0xf;
4284 case X86_VENDOR_AMD
:
4285 create
= cpi
->cpi_family
>= 0xf;
4292 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4293 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi
));
4295 /* ext-cpuid-features */
4296 switch (cpi
->cpi_vendor
) {
4297 case X86_VENDOR_Intel
:
4298 case X86_VENDOR_AMD
:
4299 case X86_VENDOR_Cyrix
:
4301 case X86_VENDOR_Centaur
:
4302 create
= cpi
->cpi_xmaxeax
>= 0x80000001;
4309 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4310 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi
));
4311 (void) ndi_prop_update_int(DDI_DEV_T_NONE
, cpu_devi
,
4312 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi
));
4316 * Brand String first appeared in Intel Pentium IV, AMD K5
4317 * model 1, and Cyrix GXm. On earlier models we try and
4318 * simulate something similar .. so this string should always
4319 * same -something- about the processor, however lame.
4321 (void) ndi_prop_update_string(DDI_DEV_T_NONE
, cpu_devi
,
4322 "brand-string", cpi
->cpi_brandstr
);
4325 * Finally, cache and tlb information
4327 switch (x86_which_cacheinfo(cpi
)) {
4328 case X86_VENDOR_Intel
:
4329 intel_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4331 case X86_VENDOR_Cyrix
:
4332 cyrix_walk_cacheinfo(cpi
, cpu_devi
, add_cacheent_props
);
4334 case X86_VENDOR_AMD
:
4335 amd_cache_info(cpi
, cpu_devi
);
4350 * A cacheinfo walker that fetches the size, line-size and associativity
4354 intel_l2cinfo(void *arg
, const struct cachetab
*ct
)
4356 struct l2info
*l2i
= arg
;
4359 if (ct
->ct_label
!= l2_cache_str
&&
4360 ct
->ct_label
!= sl2_cache_str
)
4361 return (0); /* not an L2 -- keep walking */
4363 if ((ip
= l2i
->l2i_csz
) != NULL
)
4365 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4366 *ip
= ct
->ct_line_size
;
4367 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4369 l2i
->l2i_ret
= ct
->ct_size
;
4370 return (1); /* was an L2 -- terminate walk */
4374 * AMD L2/L3 Cache and TLB Associativity Field Definition:
4376 * Unlike the associativity for the L1 cache and tlb where the 8 bit
4377 * value is the associativity, the associativity for the L2 cache and
4378 * tlb is encoded in the following table. The 4 bit L2 value serves as
4379 * an index into the amd_afd[] array to determine the associativity.
4380 * -1 is undefined. 0 is fully associative.
4383 static int amd_afd
[] =
4384 {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4387 amd_l2cacheinfo(struct cpuid_info
*cpi
, struct l2info
*l2i
)
4389 struct cpuid_regs
*cp
;
4394 if (cpi
->cpi_xmaxeax
< 0x80000006)
4396 cp
= &cpi
->cpi_extd
[6];
4398 if ((i
= BITX(cp
->cp_ecx
, 15, 12)) != 0 &&
4399 (size
= BITX(cp
->cp_ecx
, 31, 16)) != 0) {
4400 uint_t cachesz
= size
* 1024;
4403 ASSERT(assoc
!= -1);
4405 if ((ip
= l2i
->l2i_csz
) != NULL
)
4407 if ((ip
= l2i
->l2i_lsz
) != NULL
)
4408 *ip
= BITX(cp
->cp_ecx
, 7, 0);
4409 if ((ip
= l2i
->l2i_assoc
) != NULL
)
4411 l2i
->l2i_ret
= cachesz
;
4416 getl2cacheinfo(cpu_t
*cpu
, int *csz
, int *lsz
, int *assoc
)
4418 struct cpuid_info
*cpi
= cpu
->cpu_m
.mcpu_cpi
;
4419 struct l2info __l2info
, *l2i
= &__l2info
;
4423 l2i
->l2i_assoc
= assoc
;
4426 switch (x86_which_cacheinfo(cpi
)) {
4427 case X86_VENDOR_Intel
:
4428 intel_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4430 case X86_VENDOR_Cyrix
:
4431 cyrix_walk_cacheinfo(cpi
, l2i
, intel_l2cinfo
);
4433 case X86_VENDOR_AMD
:
4434 amd_l2cacheinfo(cpi
, l2i
);
4439 return (l2i
->l2i_ret
);
4444 cpuid_mwait_alloc(cpu_t
*cpu
)
4450 ASSERT(cpuid_checkpass(CPU
, 2));
4452 size
= CPU
->cpu_m
.mcpu_cpi
->cpi_mwait
.mon_max
;
4457 * kmem_alloc() returns cache line size aligned data for size
4458 * allocations. size is currently cache line sized. Neither of
4459 * these implementation details are guarantied to be true in the
4462 * First try allocating size as kmem_alloc() currently returns
4463 * correctly aligned memory. If kmem_alloc() does not return
4464 * size aligned memory, then use size ROUNDUP.
4466 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4467 * decide to free this memory.
4469 buf
= kmem_zalloc(size
, KM_SLEEP
);
4470 if (buf
== (uint32_t *)P2ROUNDUP((uintptr_t)buf
, size
)) {
4473 kmem_free(buf
, size
);
4474 buf
= kmem_zalloc(size
* 2, KM_SLEEP
);
4476 mwait
= (uint32_t *)P2ROUNDUP((uintptr_t)buf
, size
);
4480 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= buf
;
4481 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= size
;
4483 *mwait
= MWAIT_RUNNING
;
4485 cpu
->cpu_m
.mcpu_mwait
= mwait
;
4491 cpuid_mwait_free(cpu_t
*cpu
)
4493 if (cpu
->cpu_m
.mcpu_cpi
== NULL
) {
4497 if (cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
!= NULL
&&
4498 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
> 0) {
4499 kmem_free(cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
,
4500 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
);
4503 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.buf_actual
= NULL
;
4504 cpu
->cpu_m
.mcpu_cpi
->cpi_mwait
.size_actual
= 0;
4508 patch_tsc_read(int flag
)
4514 cnt
= &_no_rdtsc_end
- &_no_rdtsc_start
;
4515 (void) memcpy((void *)tsc_read
, (void *)&_no_rdtsc_start
, cnt
);
4517 case TSC_RDTSC_MFENCE
:
4518 cnt
= &_tsc_mfence_end
- &_tsc_mfence_start
;
4519 (void) memcpy((void *)tsc_read
,
4520 (void *)&_tsc_mfence_start
, cnt
);
4522 case TSC_RDTSC_LFENCE
:
4523 cnt
= &_tsc_lfence_end
- &_tsc_lfence_start
;
4524 (void) memcpy((void *)tsc_read
,
4525 (void *)&_tsc_lfence_start
, cnt
);
4528 cnt
= &_tscp_end
- &_tscp_start
;
4529 (void) memcpy((void *)tsc_read
, (void *)&_tscp_start
, cnt
);
4532 /* Bail for unexpected TSC types. (TSC_NONE covers 0) */
4533 cmn_err(CE_PANIC
, "Unrecogized TSC type: %d", flag
);
4540 cpuid_deep_cstates_supported(void)
4542 struct cpuid_info
*cpi
;
4543 struct cpuid_regs regs
;
4545 ASSERT(cpuid_checkpass(CPU
, 1));
4547 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4549 if (!is_x86_feature(x86_featureset
, X86FSET_CPUID
))
4552 switch (cpi
->cpi_vendor
) {
4553 case X86_VENDOR_Intel
:
4554 if (cpi
->cpi_xmaxeax
< 0x80000007)
4558 * TSC run at a constant rate in all ACPI C-states?
4560 regs
.cp_eax
= 0x80000007;
4561 (void) __cpuid_insn(®s
);
4562 return (regs
.cp_edx
& CPUID_TSC_CSTATE_INVARIANCE
);
4571 post_startup_cpu_fixups(void)
4574 * Some AMD processors support C1E state. Entering this state will
4575 * cause the local APIC timer to stop, which we can't deal with at
4578 if (cpuid_getvendor(CPU
) == X86_VENDOR_AMD
) {
4582 if (!on_trap(&otd
, OT_DATA_ACCESS
)) {
4583 reg
= rdmsr(MSR_AMD_INT_PENDING_CMP_HALT
);
4584 /* Disable C1E state if it is enabled by BIOS */
4585 if ((reg
>> AMD_ACTONCMPHALT_SHIFT
) &
4586 AMD_ACTONCMPHALT_MASK
) {
4587 reg
&= ~(AMD_ACTONCMPHALT_MASK
<<
4588 AMD_ACTONCMPHALT_SHIFT
);
4589 wrmsr(MSR_AMD_INT_PENDING_CMP_HALT
, reg
);
4597 * Setup necessary registers to enable XSAVE feature on this processor.
4598 * This function needs to be called early enough, so that no xsave/xrstor
4599 * ops will execute on the processor before the MSRs are properly set up.
4601 * Current implementation has the following assumption:
4602 * - cpuid_pass1() is done, so that X86 features are known.
4603 * - fpu_probe() is done, so that fp_save_mech is chosen.
4606 xsave_setup_msr(cpu_t
*cpu
)
4608 ASSERT(fp_save_mech
== FP_XSAVE
);
4609 ASSERT(is_x86_feature(x86_featureset
, X86FSET_XSAVE
));
4611 /* Enable OSXSAVE in CR4. */
4612 setcr4(getcr4() | CR4_OSXSAVE
);
4614 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4617 cpu
->cpu_m
.mcpu_cpi
->cpi_std
[1].cp_ecx
|= CPUID_INTC_ECX_OSXSAVE
;
4622 * Starting with the Westmere processor the local
4623 * APIC timer will continue running in all C-states,
4624 * including the deepest C-states.
4627 cpuid_arat_supported(void)
4629 struct cpuid_info
*cpi
;
4630 struct cpuid_regs regs
;
4632 ASSERT(cpuid_checkpass(CPU
, 1));
4633 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
4635 cpi
= CPU
->cpu_m
.mcpu_cpi
;
4637 switch (cpi
->cpi_vendor
) {
4638 case X86_VENDOR_Intel
:
4640 * Always-running Local APIC Timer is
4641 * indicated by CPUID.6.EAX[2].
4643 if (cpi
->cpi_maxeax
>= 6) {
4645 (void) cpuid_insn(NULL
, ®s
);
4646 return (regs
.cp_eax
& CPUID_CSTATE_ARAT
);
4656 * Check support for Intel ENERGY_PERF_BIAS feature
4659 cpuid_iepb_supported(struct cpu
*cp
)
4661 struct cpuid_info
*cpi
= cp
->cpu_m
.mcpu_cpi
;
4662 struct cpuid_regs regs
;
4664 ASSERT(cpuid_checkpass(cp
, 1));
4666 if (!(is_x86_feature(x86_featureset
, X86FSET_CPUID
)) ||
4667 !(is_x86_feature(x86_featureset
, X86FSET_MSR
))) {
4672 * Intel ENERGY_PERF_BIAS MSR is indicated by
4673 * capability bit CPUID.6.ECX.3
4675 if ((cpi
->cpi_vendor
!= X86_VENDOR_Intel
) || (cpi
->cpi_maxeax
< 6))
4679 (void) cpuid_insn(NULL
, ®s
);
4680 return (regs
.cp_ecx
& CPUID_EPB_SUPPORT
);
4684 * Check support for TSC deadline timer
4686 * TSC deadline timer provides a superior software programming
4687 * model over local APIC timer that eliminates "time drifts".
4688 * Instead of specifying a relative time, software specifies an
4689 * absolute time as the target at which the processor should
4690 * generate a timer event.
4693 cpuid_deadline_tsc_supported(void)
4695 struct cpuid_info
*cpi
= CPU
->cpu_m
.mcpu_cpi
;
4696 struct cpuid_regs regs
;
4698 ASSERT(cpuid_checkpass(CPU
, 1));
4699 ASSERT(is_x86_feature(x86_featureset
, X86FSET_CPUID
));
4701 switch (cpi
->cpi_vendor
) {
4702 case X86_VENDOR_Intel
:
4703 if (cpi
->cpi_maxeax
>= 1) {
4705 (void) cpuid_insn(NULL
, ®s
);
4706 return (regs
.cp_ecx
& CPUID_DEADLINE_TSC
);
4715 #if defined(__amd64) && !defined(__xpv)
4717 * Patch in versions of bcopy for high performance Intel Nhm processors
4721 patch_memops(uint_t vendor
)
4726 if ((vendor
== X86_VENDOR_Intel
) &&
4727 is_x86_feature(x86_featureset
, X86FSET_SSE4_2
)) {
4728 cnt
= &bcopy_patch_end
- &bcopy_patch_start
;
4729 to
= &bcopy_ck_size
;
4730 from
= &bcopy_patch_start
;
4731 for (i
= 0; i
< cnt
; i
++) {
4736 #endif /* __amd64 && !__xpv */
4739 * This function finds the number of bits to represent the number of cores per
4740 * chip and the number of strands per core for the Intel platforms.
4741 * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4744 cpuid_get_ext_topo(uint_t vendor
, uint_t
*core_nbits
, uint_t
*strand_nbits
)
4746 struct cpuid_regs regs
;
4747 struct cpuid_regs
*cp
= ®s
;
4749 if (vendor
!= X86_VENDOR_Intel
) {
4753 /* if the cpuid level is 0xB, extended topo is available. */
4755 if (__cpuid_insn(cp
) >= 0xB) {
4758 cp
->cp_edx
= cp
->cp_ebx
= cp
->cp_ecx
= 0;
4759 (void) __cpuid_insn(cp
);
4762 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4763 * indicates that the extended topology enumeration leaf is
4767 uint_t coreid_shift
= 0;
4768 uint_t chipid_shift
= 0;
4772 for (i
= 0; i
< CPI_FNB_ECX_MAX
; i
++) {
4776 (void) __cpuid_insn(cp
);
4777 level
= CPI_CPU_LEVEL_TYPE(cp
);
4781 * Thread level processor topology
4782 * Number of bits shift right APIC ID
4783 * to get the coreid.
4785 coreid_shift
= BITX(cp
->cp_eax
, 4, 0);
4786 } else if (level
== 2) {
4788 * Core level processor topology
4789 * Number of bits shift right APIC ID
4790 * to get the chipid.
4792 chipid_shift
= BITX(cp
->cp_eax
, 4, 0);
4796 if (coreid_shift
> 0 && chipid_shift
> coreid_shift
) {
4797 *strand_nbits
= coreid_shift
;
4798 *core_nbits
= chipid_shift
- coreid_shift
;