cxgbe/t4_tom: Read the chip's DDP page sizes and save them in a
[freebsd-src.git] / sys / i386 / i386 / initcpu.c
blob214c6f6d989f3202010a2f8840498869e59f6553
1 /*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_cpu.h"
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
49 #endif
51 #ifdef I486_CPU
52 static void init_5x86(void);
53 static void init_bluelightning(void);
54 static void init_486dlc(void);
55 static void init_cy486dx(void);
56 #ifdef CPU_I486_ON_386
57 static void init_i486_on_386(void);
58 #endif
59 static void init_6x86(void);
60 #endif /* I486_CPU */
62 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
63 static void enable_K5_wt_alloc(void);
64 static void enable_K6_wt_alloc(void);
65 static void enable_K6_2_wt_alloc(void);
66 #endif
68 #ifdef I686_CPU
69 static void init_6x86MX(void);
70 static void init_ppro(void);
71 static void init_mendocino(void);
72 #endif
74 static int hw_instruction_sse;
75 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
76 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
78 * -1: automatic (default)
79 * 0: keep enable CLFLUSH
80 * 1: force disable CLFLUSH
82 static int hw_clflush_disable = -1;
84 u_int cyrix_did; /* Device ID of Cyrix CPU */
86 #ifdef I486_CPU
88 * IBM Blue Lightning
90 static void
91 init_bluelightning(void)
93 register_t saveintr;
95 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
96 need_post_dma_flush = 1;
97 #endif
99 saveintr = intr_disable();
101 load_cr0(rcr0() | CR0_CD | CR0_NW);
102 invd();
104 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
105 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
106 #else
107 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
108 #endif
109 /* Enables 13MB and 0-640KB cache. */
110 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
111 #ifdef CPU_BLUELIGHTNING_3X
112 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
113 #else
114 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
115 #endif
117 /* Enable caching in CR0. */
118 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
119 invd();
120 intr_restore(saveintr);
124 * Cyrix 486SLC/DLC/SR/DR series
126 static void
127 init_486dlc(void)
129 register_t saveintr;
130 u_char ccr0;
132 saveintr = intr_disable();
133 invd();
135 ccr0 = read_cyrix_reg(CCR0);
136 #ifndef CYRIX_CACHE_WORKS
137 ccr0 |= CCR0_NC1 | CCR0_BARB;
138 write_cyrix_reg(CCR0, ccr0);
139 invd();
140 #else
141 ccr0 &= ~CCR0_NC0;
142 #ifndef CYRIX_CACHE_REALLY_WORKS
143 ccr0 |= CCR0_NC1 | CCR0_BARB;
144 #else
145 ccr0 |= CCR0_NC1;
146 #endif
147 #ifdef CPU_DIRECT_MAPPED_CACHE
148 ccr0 |= CCR0_CO; /* Direct mapped mode. */
149 #endif
150 write_cyrix_reg(CCR0, ccr0);
152 /* Clear non-cacheable region. */
153 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
154 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
155 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
156 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
158 write_cyrix_reg(0, 0); /* dummy write */
160 /* Enable caching in CR0. */
161 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
162 invd();
163 #endif /* !CYRIX_CACHE_WORKS */
164 intr_restore(saveintr);
169 * Cyrix 486S/DX series
171 static void
172 init_cy486dx(void)
174 register_t saveintr;
175 u_char ccr2;
177 saveintr = intr_disable();
178 invd();
180 ccr2 = read_cyrix_reg(CCR2);
181 #ifdef CPU_SUSP_HLT
182 ccr2 |= CCR2_SUSP_HLT;
183 #endif
185 #ifdef PC98
186 /* Enables WB cache interface pin and Lock NW bit in CR0. */
187 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
188 /* Unlock NW bit in CR0. */
189 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
190 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
191 #endif
193 write_cyrix_reg(CCR2, ccr2);
194 intr_restore(saveintr);
199 * Cyrix 5x86
201 static void
202 init_5x86(void)
204 register_t saveintr;
205 u_char ccr2, ccr3, ccr4, pcr0;
207 saveintr = intr_disable();
209 load_cr0(rcr0() | CR0_CD | CR0_NW);
210 wbinvd();
212 (void)read_cyrix_reg(CCR3); /* dummy */
214 /* Initialize CCR2. */
215 ccr2 = read_cyrix_reg(CCR2);
216 ccr2 |= CCR2_WB;
217 #ifdef CPU_SUSP_HLT
218 ccr2 |= CCR2_SUSP_HLT;
219 #else
220 ccr2 &= ~CCR2_SUSP_HLT;
221 #endif
222 ccr2 |= CCR2_WT1;
223 write_cyrix_reg(CCR2, ccr2);
225 /* Initialize CCR4. */
226 ccr3 = read_cyrix_reg(CCR3);
227 write_cyrix_reg(CCR3, CCR3_MAPEN0);
229 ccr4 = read_cyrix_reg(CCR4);
230 ccr4 |= CCR4_DTE;
231 ccr4 |= CCR4_MEM;
232 #ifdef CPU_FASTER_5X86_FPU
233 ccr4 |= CCR4_FASTFPE;
234 #else
235 ccr4 &= ~CCR4_FASTFPE;
236 #endif
237 ccr4 &= ~CCR4_IOMASK;
238 /********************************************************************
239 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
240 * should be 0 for errata fix.
241 ********************************************************************/
242 #ifdef CPU_IORT
243 ccr4 |= CPU_IORT & CCR4_IOMASK;
244 #endif
245 write_cyrix_reg(CCR4, ccr4);
247 /* Initialize PCR0. */
248 /****************************************************************
249 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
250 * BTB_EN might make your system unstable.
251 ****************************************************************/
252 pcr0 = read_cyrix_reg(PCR0);
253 #ifdef CPU_RSTK_EN
254 pcr0 |= PCR0_RSTK;
255 #else
256 pcr0 &= ~PCR0_RSTK;
257 #endif
258 #ifdef CPU_BTB_EN
259 pcr0 |= PCR0_BTB;
260 #else
261 pcr0 &= ~PCR0_BTB;
262 #endif
263 #ifdef CPU_LOOP_EN
264 pcr0 |= PCR0_LOOP;
265 #else
266 pcr0 &= ~PCR0_LOOP;
267 #endif
269 /****************************************************************
270 * WARNING: if you use a memory mapped I/O device, don't use
271 * DISABLE_5X86_LSSER option, which may reorder memory mapped
272 * I/O access.
273 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
274 ****************************************************************/
275 #ifdef CPU_DISABLE_5X86_LSSER
276 pcr0 &= ~PCR0_LSSER;
277 #else
278 pcr0 |= PCR0_LSSER;
279 #endif
280 write_cyrix_reg(PCR0, pcr0);
282 /* Restore CCR3. */
283 write_cyrix_reg(CCR3, ccr3);
285 (void)read_cyrix_reg(0x80); /* dummy */
287 /* Unlock NW bit in CR0. */
288 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
289 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
290 /* Lock NW bit in CR0. */
291 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
293 intr_restore(saveintr);
296 #ifdef CPU_I486_ON_386
298 * There are i486 based upgrade products for i386 machines.
299 * In this case, BIOS doesn't enable CPU cache.
301 static void
302 init_i486_on_386(void)
304 register_t saveintr;
306 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
307 need_post_dma_flush = 1;
308 #endif
310 saveintr = intr_disable();
312 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
314 intr_restore(saveintr);
316 #endif
319 * Cyrix 6x86
321 * XXX - What should I do here? Please let me know.
323 static void
324 init_6x86(void)
326 register_t saveintr;
327 u_char ccr3, ccr4;
329 saveintr = intr_disable();
331 load_cr0(rcr0() | CR0_CD | CR0_NW);
332 wbinvd();
334 /* Initialize CCR0. */
335 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
337 /* Initialize CCR1. */
338 #ifdef CPU_CYRIX_NO_LOCK
339 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
340 #else
341 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
342 #endif
344 /* Initialize CCR2. */
345 #ifdef CPU_SUSP_HLT
346 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
347 #else
348 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
349 #endif
351 ccr3 = read_cyrix_reg(CCR3);
352 write_cyrix_reg(CCR3, CCR3_MAPEN0);
354 /* Initialize CCR4. */
355 ccr4 = read_cyrix_reg(CCR4);
356 ccr4 |= CCR4_DTE;
357 ccr4 &= ~CCR4_IOMASK;
358 #ifdef CPU_IORT
359 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
360 #else
361 write_cyrix_reg(CCR4, ccr4 | 7);
362 #endif
364 /* Initialize CCR5. */
365 #ifdef CPU_WT_ALLOC
366 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
367 #endif
369 /* Restore CCR3. */
370 write_cyrix_reg(CCR3, ccr3);
372 /* Unlock NW bit in CR0. */
373 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
376 * Earlier revision of the 6x86 CPU could crash the system if
377 * L1 cache is in write-back mode.
379 if ((cyrix_did & 0xff00) > 0x1600)
380 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
381 else {
382 /* Revision 2.6 and lower. */
383 #ifdef CYRIX_CACHE_REALLY_WORKS
384 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
385 #else
386 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
387 #endif
390 /* Lock NW bit in CR0. */
391 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
393 intr_restore(saveintr);
395 #endif /* I486_CPU */
397 #ifdef I586_CPU
399 * Rise mP6
401 static void
402 init_rise(void)
406 * The CMPXCHG8B instruction is always available but hidden.
408 cpu_feature |= CPUID_CX8;
412 * IDT WinChip C6/2/2A/2B/3
414 * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
416 static void
417 init_winchip(void)
419 u_int regs[4];
420 uint64_t fcr;
422 fcr = rdmsr(0x0107);
425 * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
427 fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
428 fcr &= ~(1ULL << 11);
431 * Additionally, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
433 if (CPUID_TO_MODEL(cpu_id) >= 8)
434 fcr |= (1 << 12) | (1 << 19) | (1 << 20);
436 wrmsr(0x0107, fcr);
437 do_cpuid(1, regs);
438 cpu_feature = regs[3];
440 #endif
442 #ifdef I686_CPU
444 * Cyrix 6x86MX (code-named M2)
446 * XXX - What should I do here? Please let me know.
448 static void
449 init_6x86MX(void)
451 register_t saveintr;
452 u_char ccr3, ccr4;
454 saveintr = intr_disable();
456 load_cr0(rcr0() | CR0_CD | CR0_NW);
457 wbinvd();
459 /* Initialize CCR0. */
460 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
462 /* Initialize CCR1. */
463 #ifdef CPU_CYRIX_NO_LOCK
464 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
465 #else
466 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
467 #endif
469 /* Initialize CCR2. */
470 #ifdef CPU_SUSP_HLT
471 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
472 #else
473 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
474 #endif
476 ccr3 = read_cyrix_reg(CCR3);
477 write_cyrix_reg(CCR3, CCR3_MAPEN0);
479 /* Initialize CCR4. */
480 ccr4 = read_cyrix_reg(CCR4);
481 ccr4 &= ~CCR4_IOMASK;
482 #ifdef CPU_IORT
483 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
484 #else
485 write_cyrix_reg(CCR4, ccr4 | 7);
486 #endif
488 /* Initialize CCR5. */
489 #ifdef CPU_WT_ALLOC
490 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
491 #endif
493 /* Restore CCR3. */
494 write_cyrix_reg(CCR3, ccr3);
496 /* Unlock NW bit in CR0. */
497 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
499 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
501 /* Lock NW bit in CR0. */
502 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
504 intr_restore(saveintr);
507 static int ppro_apic_used = -1;
509 static void
510 init_ppro(void)
512 u_int64_t apicbase;
515 * Local APIC should be disabled if it is not going to be used.
517 if (ppro_apic_used != 1) {
518 apicbase = rdmsr(MSR_APICBASE);
519 apicbase &= ~APICBASE_ENABLED;
520 wrmsr(MSR_APICBASE, apicbase);
521 ppro_apic_used = 0;
526 * If the local APIC is going to be used after being disabled above,
527 * re-enable it and don't disable it in the future.
529 void
530 ppro_reenable_apic(void)
532 u_int64_t apicbase;
534 if (ppro_apic_used == 0) {
535 apicbase = rdmsr(MSR_APICBASE);
536 apicbase |= APICBASE_ENABLED;
537 wrmsr(MSR_APICBASE, apicbase);
538 ppro_apic_used = 1;
543 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
544 * L2 cache).
546 static void
547 init_mendocino(void)
549 #ifdef CPU_PPRO2CELERON
550 register_t saveintr;
551 u_int64_t bbl_cr_ctl3;
553 saveintr = intr_disable();
555 load_cr0(rcr0() | CR0_CD | CR0_NW);
556 wbinvd();
558 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
560 /* If the L2 cache is configured, do nothing. */
561 if (!(bbl_cr_ctl3 & 1)) {
562 bbl_cr_ctl3 = 0x134052bLL;
564 /* Set L2 Cache Latency (Default: 5). */
565 #ifdef CPU_CELERON_L2_LATENCY
566 #if CPU_L2_LATENCY > 15
567 #error invalid CPU_L2_LATENCY.
568 #endif
569 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
570 #else
571 bbl_cr_ctl3 |= 5 << 1;
572 #endif
573 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
576 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
577 intr_restore(saveintr);
578 #endif /* CPU_PPRO2CELERON */
582 * Initialize special VIA features
584 static void
585 init_via(void)
587 u_int regs[4], val;
588 uint64_t fcr;
591 * Explicitly enable CX8 and PGE on C3.
593 * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
595 if (CPUID_TO_MODEL(cpu_id) <= 9)
596 fcr = (1 << 1) | (1 << 7);
597 else
598 fcr = 0;
601 * Check extended CPUID for PadLock features.
603 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
605 do_cpuid(0xc0000000, regs);
606 if (regs[0] >= 0xc0000001) {
607 do_cpuid(0xc0000001, regs);
608 val = regs[3];
609 } else
610 val = 0;
612 /* Enable RNG if present. */
613 if ((val & VIA_CPUID_HAS_RNG) != 0) {
614 via_feature_rng = VIA_HAS_RNG;
615 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
618 /* Enable PadLock if present. */
619 if ((val & VIA_CPUID_HAS_ACE) != 0)
620 via_feature_xcrypt |= VIA_HAS_AES;
621 if ((val & VIA_CPUID_HAS_ACE2) != 0)
622 via_feature_xcrypt |= VIA_HAS_AESCTR;
623 if ((val & VIA_CPUID_HAS_PHE) != 0)
624 via_feature_xcrypt |= VIA_HAS_SHA;
625 if ((val & VIA_CPUID_HAS_PMM) != 0)
626 via_feature_xcrypt |= VIA_HAS_MM;
627 if (via_feature_xcrypt != 0)
628 fcr |= 1 << 28;
630 wrmsr(0x1107, rdmsr(0x1107) | fcr);
633 #endif /* I686_CPU */
635 #if defined(I586_CPU) || defined(I686_CPU)
636 static void
637 init_transmeta(void)
639 u_int regs[0];
641 /* Expose all hidden features. */
642 wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
643 do_cpuid(1, regs);
644 cpu_feature = regs[3];
646 #endif
648 extern int elf32_nxstack;
650 void
651 initializecpu(void)
654 switch (cpu) {
655 #ifdef I486_CPU
656 case CPU_BLUE:
657 init_bluelightning();
658 break;
659 case CPU_486DLC:
660 init_486dlc();
661 break;
662 case CPU_CY486DX:
663 init_cy486dx();
664 break;
665 case CPU_M1SC:
666 init_5x86();
667 break;
668 #ifdef CPU_I486_ON_386
669 case CPU_486:
670 init_i486_on_386();
671 break;
672 #endif
673 case CPU_M1:
674 init_6x86();
675 break;
676 #endif /* I486_CPU */
677 #ifdef I586_CPU
678 case CPU_586:
679 switch (cpu_vendor_id) {
680 case CPU_VENDOR_AMD:
681 #ifdef CPU_WT_ALLOC
682 if (((cpu_id & 0x0f0) > 0) &&
683 ((cpu_id & 0x0f0) < 0x60) &&
684 ((cpu_id & 0x00f) > 3))
685 enable_K5_wt_alloc();
686 else if (((cpu_id & 0x0f0) > 0x80) ||
687 (((cpu_id & 0x0f0) == 0x80) &&
688 (cpu_id & 0x00f) > 0x07))
689 enable_K6_2_wt_alloc();
690 else if ((cpu_id & 0x0f0) > 0x50)
691 enable_K6_wt_alloc();
692 #endif
693 if ((cpu_id & 0xf0) == 0xa0)
695 * Make sure the TSC runs through
696 * suspension, otherwise we can't use
697 * it as timecounter
699 wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
700 break;
701 case CPU_VENDOR_CENTAUR:
702 init_winchip();
703 break;
704 case CPU_VENDOR_TRANSMETA:
705 init_transmeta();
706 break;
707 case CPU_VENDOR_RISE:
708 init_rise();
709 break;
711 break;
712 #endif
713 #ifdef I686_CPU
714 case CPU_M2:
715 init_6x86MX();
716 break;
717 case CPU_686:
718 switch (cpu_vendor_id) {
719 case CPU_VENDOR_INTEL:
720 switch (cpu_id & 0xff0) {
721 case 0x610:
722 init_ppro();
723 break;
724 case 0x660:
725 init_mendocino();
726 break;
728 break;
729 #ifdef CPU_ATHLON_SSE_HACK
730 case CPU_VENDOR_AMD:
732 * Sometimes the BIOS doesn't enable SSE instructions.
733 * According to AMD document 20734, the mobile
734 * Duron, the (mobile) Athlon 4 and the Athlon MP
735 * support SSE. These correspond to cpu_id 0x66X
736 * or 0x67X.
738 if ((cpu_feature & CPUID_XMM) == 0 &&
739 ((cpu_id & ~0xf) == 0x660 ||
740 (cpu_id & ~0xf) == 0x670 ||
741 (cpu_id & ~0xf) == 0x680)) {
742 u_int regs[4];
743 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
744 do_cpuid(1, regs);
745 cpu_feature = regs[3];
747 break;
748 #endif
749 case CPU_VENDOR_CENTAUR:
750 init_via();
751 break;
752 case CPU_VENDOR_TRANSMETA:
753 init_transmeta();
754 break;
756 #if defined(PAE) || defined(PAE_TABLES)
757 if ((amd_feature & AMDID_NX) != 0) {
758 uint64_t msr;
760 msr = rdmsr(MSR_EFER) | EFER_NXE;
761 wrmsr(MSR_EFER, msr);
762 pg_nx = PG_NX;
763 elf32_nxstack = 1;
765 #endif
766 break;
767 #endif
768 default:
769 break;
771 #if defined(CPU_ENABLE_SSE)
772 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
773 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
774 cpu_fxsr = hw_instruction_sse = 1;
776 #endif
779 void
780 initializecpucache(void)
784 * CPUID with %eax = 1, %ebx returns
785 * Bits 15-8: CLFLUSH line size
786 * (Value * 8 = cache line size in bytes)
788 if ((cpu_feature & CPUID_CLFSH) != 0)
789 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
791 * XXXKIB: (temporary) hack to work around traps generated
792 * when CLFLUSHing APIC register window under virtualization
793 * environments. These environments tend to disable the
794 * CPUID_SS feature even though the native CPU supports it.
796 TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
797 if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
798 cpu_feature &= ~CPUID_CLFSH;
799 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
802 * The kernel's use of CLFLUSH{,OPT} can be disabled manually
803 * by setting the hw.clflush_disable tunable.
805 if (hw_clflush_disable == 1) {
806 cpu_feature &= ~CPUID_CLFSH;
807 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
810 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
812 * OS should flush L1 cache by itself because no PC-98 supports
813 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
814 * when need_pre_dma_flush = 1, use invd instruction after DMA
815 * transfer when need_post_dma_flush = 1. If your CPU upgrade
816 * product supports hardware cache control, you can add the
817 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
818 * This option eliminates unneeded cache flush instruction(s).
820 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
821 switch (cpu) {
822 #ifdef I486_CPU
823 case CPU_486DLC:
824 need_post_dma_flush = 1;
825 break;
826 case CPU_M1SC:
827 need_pre_dma_flush = 1;
828 break;
829 case CPU_CY486DX:
830 need_pre_dma_flush = 1;
831 #ifdef CPU_I486_ON_386
832 need_post_dma_flush = 1;
833 #endif
834 break;
835 #endif
836 default:
837 break;
839 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
840 switch (cpu_id & 0xFF0) {
841 case 0x470: /* Enhanced Am486DX2 WB */
842 case 0x490: /* Enhanced Am486DX4 WB */
843 case 0x4F0: /* Am5x86 WB */
844 need_pre_dma_flush = 1;
845 break;
847 } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
848 need_post_dma_flush = 1;
849 } else {
850 #ifdef CPU_I486_ON_386
851 need_pre_dma_flush = 1;
852 #endif
854 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
857 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
859 * Enable write allocate feature of AMD processors.
860 * Following two functions require the Maxmem variable being set.
862 static void
863 enable_K5_wt_alloc(void)
865 u_int64_t msr;
866 register_t saveintr;
869 * Write allocate is supported only on models 1, 2, and 3, with
870 * a stepping of 4 or greater.
872 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
873 saveintr = intr_disable();
874 msr = rdmsr(0x83); /* HWCR */
875 wrmsr(0x83, msr & !(0x10));
878 * We have to tell the chip where the top of memory is,
879 * since video cards could have frame bufferes there,
880 * memory-mapped I/O could be there, etc.
882 if(Maxmem > 0)
883 msr = Maxmem / 16;
884 else
885 msr = 0;
886 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
887 #ifdef PC98
888 if (!(inb(0x43b) & 4)) {
889 wrmsr(0x86, 0x0ff00f0);
890 msr |= AMD_WT_ALLOC_PRE;
892 #else
894 * There is no way to know wheter 15-16M hole exists or not.
895 * Therefore, we disable write allocate for this range.
897 wrmsr(0x86, 0x0ff00f0);
898 msr |= AMD_WT_ALLOC_PRE;
899 #endif
900 wrmsr(0x85, msr);
902 msr=rdmsr(0x83);
903 wrmsr(0x83, msr|0x10); /* enable write allocate */
904 intr_restore(saveintr);
908 static void
909 enable_K6_wt_alloc(void)
911 quad_t size;
912 u_int64_t whcr;
913 register_t saveintr;
915 saveintr = intr_disable();
916 wbinvd();
918 #ifdef CPU_DISABLE_CACHE
920 * Certain K6-2 box becomes unstable when write allocation is
921 * enabled.
924 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
925 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
926 * All other bits in TR12 have no effect on the processer's operation.
927 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
928 * on the AMD-K6.
930 wrmsr(0x0000000e, (u_int64_t)0x0008);
931 #endif
932 /* Don't assume that memory size is aligned with 4M. */
933 if (Maxmem > 0)
934 size = ((Maxmem >> 8) + 3) >> 2;
935 else
936 size = 0;
938 /* Limit is 508M bytes. */
939 if (size > 0x7f)
940 size = 0x7f;
941 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
943 #if defined(PC98) || defined(NO_MEMORY_HOLE)
944 if (whcr & (0x7fLL << 1)) {
945 #ifdef PC98
947 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
948 * 15-16M range.
950 if (!(inb(0x43b) & 4))
951 whcr &= ~0x0001LL;
952 else
953 #endif
954 whcr |= 0x0001LL;
956 #else
958 * There is no way to know wheter 15-16M hole exists or not.
959 * Therefore, we disable write allocate for this range.
961 whcr &= ~0x0001LL;
962 #endif
963 wrmsr(0x0c0000082, whcr);
965 intr_restore(saveintr);
968 static void
969 enable_K6_2_wt_alloc(void)
971 quad_t size;
972 u_int64_t whcr;
973 register_t saveintr;
975 saveintr = intr_disable();
976 wbinvd();
978 #ifdef CPU_DISABLE_CACHE
980 * Certain K6-2 box becomes unstable when write allocation is
981 * enabled.
984 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
985 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
986 * All other bits in TR12 have no effect on the processer's operation.
987 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
988 * on the AMD-K6.
990 wrmsr(0x0000000e, (u_int64_t)0x0008);
991 #endif
992 /* Don't assume that memory size is aligned with 4M. */
993 if (Maxmem > 0)
994 size = ((Maxmem >> 8) + 3) >> 2;
995 else
996 size = 0;
998 /* Limit is 4092M bytes. */
999 if (size > 0x3fff)
1000 size = 0x3ff;
1001 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
1003 #if defined(PC98) || defined(NO_MEMORY_HOLE)
1004 if (whcr & (0x3ffLL << 22)) {
1005 #ifdef PC98
1007 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
1008 * 15-16M range.
1010 if (!(inb(0x43b) & 4))
1011 whcr &= ~(1LL << 16);
1012 else
1013 #endif
1014 whcr |= 1LL << 16;
1016 #else
1018 * There is no way to know wheter 15-16M hole exists or not.
1019 * Therefore, we disable write allocate for this range.
1021 whcr &= ~(1LL << 16);
1022 #endif
1023 wrmsr(0x0c0000082, whcr);
1025 intr_restore(saveintr);
1027 #endif /* I585_CPU && CPU_WT_ALLOC */
1029 #include "opt_ddb.h"
1030 #ifdef DDB
1031 #include <ddb/ddb.h>
1033 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
1035 register_t saveintr;
1036 u_int cr0;
1037 u_char ccr1, ccr2, ccr3;
1038 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
1040 cr0 = rcr0();
1041 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
1042 saveintr = intr_disable();
1045 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
1046 ccr0 = read_cyrix_reg(CCR0);
1048 ccr1 = read_cyrix_reg(CCR1);
1049 ccr2 = read_cyrix_reg(CCR2);
1050 ccr3 = read_cyrix_reg(CCR3);
1051 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
1052 write_cyrix_reg(CCR3, CCR3_MAPEN0);
1053 ccr4 = read_cyrix_reg(CCR4);
1054 if ((cpu == CPU_M1) || (cpu == CPU_M2))
1055 ccr5 = read_cyrix_reg(CCR5);
1056 else
1057 pcr0 = read_cyrix_reg(PCR0);
1058 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
1060 intr_restore(saveintr);
1062 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
1063 printf("CCR0=%x, ", (u_int)ccr0);
1065 printf("CCR1=%x, CCR2=%x, CCR3=%x",
1066 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
1067 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
1068 printf(", CCR4=%x, ", (u_int)ccr4);
1069 if (cpu == CPU_M1SC)
1070 printf("PCR0=%x\n", pcr0);
1071 else
1072 printf("CCR5=%x\n", ccr5);
1075 printf("CR0=%x\n", cr0);
1077 #endif /* DDB */