x86: mtrr: add CONFIG_CACHE_ROM support
[coreboot.git] / src / cpu / x86 / mtrr / mtrr.c
blobdad10292a9b538f8b4006e9b47d9960147de3e2c
1 /*
2 * mtrr.c: setting MTRR to decent values for cache initialization on P6
4 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
6 * Copyright 2000 Silicon Integrated System Corporation
7 * Copyright 2013 Google Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming
27 #include <stddef.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <console/console.h>
31 #include <device/device.h>
32 #include <cpu/x86/msr.h>
33 #include <cpu/x86/mtrr.h>
34 #include <cpu/x86/cache.h>
35 #include <cpu/x86/lapic.h>
36 #include <arch/cpu.h>
37 #include <arch/acpi.h>
38 #include <memrange.h>
39 #if CONFIG_X86_AMD_FIXED_MTRRS
40 #include <cpu/amd/mtrr.h>
41 #define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
42 #else
43 #define MTRR_FIXED_WRBACK_BITS 0
44 #endif
46 /* 2 MTRRS are reserved for the operating system */
47 #define BIOS_MTRRS 6
48 #define OS_MTRRS 2
49 #define MTRRS (BIOS_MTRRS + OS_MTRRS)
51 static int total_mtrrs = MTRRS;
52 static int bios_mtrrs = BIOS_MTRRS;
54 static void detect_var_mtrrs(void)
56 msr_t msr;
58 msr = rdmsr(MTRRcap_MSR);
60 total_mtrrs = msr.lo & 0xff;
61 bios_mtrrs = total_mtrrs - OS_MTRRS;
64 void enable_fixed_mtrr(void)
66 msr_t msr;
68 msr = rdmsr(MTRRdefType_MSR);
69 msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn;
70 wrmsr(MTRRdefType_MSR, msr);
73 static void enable_var_mtrr(unsigned char deftype)
75 msr_t msr;
77 msr = rdmsr(MTRRdefType_MSR);
78 msr.lo &= ~0xff;
79 msr.lo |= MTRRdefTypeEn | deftype;
80 wrmsr(MTRRdefType_MSR, msr);
83 /* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
84 static inline unsigned int fms(unsigned int x)
86 int r;
88 __asm__("bsrl %1,%0\n\t"
89 "jnz 1f\n\t"
90 "movl $0,%0\n"
91 "1:" : "=r" (r) : "g" (x));
92 return r;
95 /* fls: find least sigificant bit set */
96 static inline unsigned int fls(unsigned int x)
98 int r;
100 __asm__("bsfl %1,%0\n\t"
101 "jnz 1f\n\t"
102 "movl $32,%0\n"
103 "1:" : "=r" (r) : "g" (x));
104 return r;
107 #define MTRR_VERBOSE_LEVEL BIOS_NEVER
109 /* MTRRs are at a 4KiB granularity. Therefore all address calculations can
110 * be done with 32-bit numbers. This allows for the MTRR code to handle
111 * up to 2^44 bytes (16 TiB) of address space. */
112 #define RANGE_SHIFT 12
113 #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
114 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
115 #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
116 #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
117 #define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
119 /* The minimum alignment while handling variable MTRR ranges is 64MiB. */
120 #define MTRR_MIN_ALIGN PHYS_TO_RANGE_ADDR(64 << 20)
121 /* Helpful constants. */
122 #define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
123 #define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
125 static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
127 return PHYS_TO_RANGE_ADDR(range_entry_base(r));
130 static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
132 return PHYS_TO_RANGE_ADDR(range_entry_end(r));
135 static struct memranges *get_physical_address_space(void)
137 static struct memranges *addr_space;
138 static struct memranges addr_space_storage;
140 /* In order to handle some chipsets not being able to pre-determine
141 * uncacheable ranges, such as graphics memory, at resource inseration
142 * time remove unacheable regions from the cacheable ones. */
143 if (addr_space == NULL) {
144 struct range_entry *r;
145 unsigned long mask;
146 unsigned long match;
148 addr_space = &addr_space_storage;
150 mask = IORESOURCE_CACHEABLE;
151 /* Collect cacheable and uncacheable address ranges. The
152 * uncacheable regions take precedence over the cacheable
153 * regions. */
154 memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
155 memranges_add_resources(addr_space, mask, 0,
156 MTRR_TYPE_UNCACHEABLE);
158 /* Handle any write combining resources. Only prefetchable
159 * resources with the IORESOURCE_WRCOMB flag are appropriate
160 * for this MTRR type. */
161 match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB;
162 mask |= match;
163 memranges_add_resources(addr_space, mask, match,
164 MTRR_TYPE_WRCOMB);
166 #if CONFIG_CACHE_ROM
167 /* Add a write-protect region covering the ROM size
168 * when CONFIG_CACHE_ROM is enabled. The ROM is assumed
169 * to be located at 4GiB - rom size. */
170 resource_t rom_base = RANGE_TO_PHYS_ADDR(
171 RANGE_4GB - PHYS_TO_RANGE_ADDR(CONFIG_ROM_SIZE));
172 memranges_insert(addr_space, rom_base, CONFIG_ROM_SIZE,
173 MTRR_TYPE_WRPROT);
174 #endif
176 /* The address space below 4GiB is special. It needs to be
177 * covered entirly by range entries so that MTRR calculations
178 * can be properly done for the full 32-bit address space.
179 * Therefore, ensure holes are filled up to 4GiB as
180 * uncacheable */
181 memranges_fill_holes_up_to(addr_space,
182 RANGE_TO_PHYS_ADDR(RANGE_4GB),
183 MTRR_TYPE_UNCACHEABLE);
185 printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
186 memranges_each_entry(r, addr_space)
187 printk(BIOS_DEBUG,
188 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
189 range_entry_base(r), range_entry_end(r),
190 range_entry_size(r), range_entry_tag(r));
193 return addr_space;
196 /* Fixed MTRR descriptor. This structure defines the step size and begin
197 * and end (exclusive) address covered by a set of fixe MTRR MSRs.
198 * It also describes the offset in byte intervals to store the calculated MTRR
199 * type in an array. */
200 struct fixed_mtrr_desc {
201 uint32_t begin;
202 uint32_t end;
203 uint32_t step;
204 int range_index;
205 int msr_index_base;
208 /* Shared MTRR calculations. Can be reused by APs. */
209 static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
211 /* Fixed MTRR descriptors. */
212 static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
213 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
214 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR },
215 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
216 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR },
217 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
218 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR },
221 static void calc_fixed_mtrrs(void)
223 static int fixed_mtrr_types_initialized;
224 struct memranges *phys_addr_space;
225 struct range_entry *r;
226 const struct fixed_mtrr_desc *desc;
227 const struct fixed_mtrr_desc *last_desc;
228 uint32_t begin;
229 uint32_t end;
230 int type_index;
232 if (fixed_mtrr_types_initialized)
233 return;
235 phys_addr_space = get_physical_address_space();
237 /* Set all fixed ranges to uncacheable first. */
238 memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
240 desc = &fixed_mtrr_desc[0];
241 last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
242 type_index = desc->range_index;
244 memranges_each_entry(r, phys_addr_space) {
245 begin = range_entry_base_mtrr_addr(r);
246 end = range_entry_end_mtrr_addr(r);
248 if (begin >= last_desc->end)
249 break;
251 if (end > last_desc->end)
252 end = last_desc->end;
254 /* Get to the correct fixed mtrr descriptor. */
255 while (begin >= desc->end)
256 desc++;
258 type_index = desc->range_index;
259 type_index += (begin - desc->begin) / desc->step;
261 while (begin != end) {
262 unsigned char type;
264 type = range_entry_tag(r);
265 printk(MTRR_VERBOSE_LEVEL,
266 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
267 begin, begin + desc->step, type, type_index);
268 if (type == MTRR_TYPE_WRBACK)
269 type |= MTRR_FIXED_WRBACK_BITS;
270 fixed_mtrr_types[type_index] = type;
271 type_index++;
272 begin += desc->step;
273 if (begin == desc->end)
274 desc++;
277 fixed_mtrr_types_initialized = 1;
280 static void commit_fixed_mtrrs(void)
282 int i;
283 int j;
284 int msr_num;
285 int type_index;
286 /* 8 ranges per msr. */
287 msr_t fixed_msrs[NUM_FIXED_MTRRS];
288 unsigned long msr_index[NUM_FIXED_MTRRS];
290 memset(&fixed_msrs, 0, sizeof(fixed_msrs));
292 disable_cache();
294 msr_num = 0;
295 type_index = 0;
296 for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
297 const struct fixed_mtrr_desc *desc;
298 int num_ranges;
300 desc = &fixed_mtrr_desc[i];
301 num_ranges = (desc->end - desc->begin) / desc->step;
302 for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
303 msr_index[msr_num] = desc->msr_index_base +
304 (j / RANGES_PER_FIXED_MTRR);
305 fixed_msrs[msr_num].lo |=
306 fixed_mtrr_types[type_index++] << 0;
307 fixed_msrs[msr_num].lo |=
308 fixed_mtrr_types[type_index++] << 8;
309 fixed_msrs[msr_num].lo |=
310 fixed_mtrr_types[type_index++] << 16;
311 fixed_msrs[msr_num].lo |=
312 fixed_mtrr_types[type_index++] << 24;
313 fixed_msrs[msr_num].hi |=
314 fixed_mtrr_types[type_index++] << 0;
315 fixed_msrs[msr_num].hi |=
316 fixed_mtrr_types[type_index++] << 8;
317 fixed_msrs[msr_num].hi |=
318 fixed_mtrr_types[type_index++] << 16;
319 fixed_msrs[msr_num].hi |=
320 fixed_mtrr_types[type_index++] << 24;
321 msr_num++;
325 for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) {
326 printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
327 msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
328 wrmsr(msr_index[i], fixed_msrs[i]);
331 enable_cache();
334 void x86_setup_fixed_mtrrs_no_enable(void)
336 calc_fixed_mtrrs();
337 commit_fixed_mtrrs();
340 void x86_setup_fixed_mtrrs(void)
342 x86_setup_fixed_mtrrs_no_enable();
344 printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
345 enable_fixed_mtrr();
348 /* Keep track of the MTRR that covers the ROM for caching purposes. */
349 #if CONFIG_CACHE_ROM
350 static long rom_cache_mtrr = -1;
352 void x86_mtrr_enable_rom_caching(void)
354 msr_t msr_val;
355 unsigned long index;
357 if (rom_cache_mtrr < 0)
358 return;
360 index = rom_cache_mtrr;
361 disable_cache();
362 msr_val = rdmsr(MTRRphysBase_MSR(index));
363 msr_val.lo &= ~0xff;
364 msr_val.lo |= MTRR_TYPE_WRPROT;
365 wrmsr(MTRRphysBase_MSR(index), msr_val);
366 enable_cache();
369 void x86_mtrr_disable_rom_caching(void)
371 msr_t msr_val;
372 unsigned long index;
374 if (rom_cache_mtrr < 0)
375 return;
377 index = rom_cache_mtrr;
378 disable_cache();
379 msr_val = rdmsr(MTRRphysBase_MSR(index));
380 msr_val.lo &= ~0xff;
381 wrmsr(MTRRphysBase_MSR(index), msr_val);
382 enable_cache();
384 #endif
386 struct var_mtrr_state {
387 struct memranges *addr_space;
388 int above4gb;
389 int address_bits;
390 int commit_mtrrs;
391 int mtrr_index;
392 int def_mtrr_type;
395 static void clear_var_mtrr(int index)
397 msr_t msr_val;
399 msr_val = rdmsr(MTRRphysMask_MSR(index));
400 msr_val.lo &= ~MTRRphysMaskValid;
401 wrmsr(MTRRphysMask_MSR(index), msr_val);
404 static void write_var_mtrr(struct var_mtrr_state *var_state,
405 uint32_t base, uint32_t size, int mtrr_type)
407 msr_t msr_val;
408 unsigned long msr_index;
409 resource_t rbase;
410 resource_t rsize;
411 resource_t mask;
413 /* Some variable MTRRs are attempted to be saved for the OS use.
414 * However, it's more important to try to map the full address space
415 * properly. */
416 if (var_state->mtrr_index >= bios_mtrrs)
417 printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
418 if (var_state->mtrr_index >= total_mtrrs) {
419 printk(BIOS_ERR, "ERROR: Not enough MTTRs available!\n");
420 return;
423 rbase = base;
424 rsize = size;
426 rbase = RANGE_TO_PHYS_ADDR(rbase);
427 rsize = RANGE_TO_PHYS_ADDR(rsize);
428 rsize = -rsize;
430 mask = (1ULL << var_state->address_bits) - 1;
431 rsize = rsize & mask;
433 #if CONFIG_CACHE_ROM
434 /* CONFIG_CACHE_ROM allocates an MTRR specifically for allowing
435 * one to turn on caching for faster ROM access. However, it is
436 * left to the MTRR callers to enable it. */
437 if (mtrr_type == MTRR_TYPE_WRPROT) {
438 mtrr_type = MTRR_TYPE_UNCACHEABLE;
439 if (rom_cache_mtrr < 0)
440 rom_cache_mtrr = var_state->mtrr_index;
442 #endif
444 printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
445 var_state->mtrr_index, rbase, rsize, mtrr_type);
447 msr_val.lo = rbase;
448 msr_val.lo |= mtrr_type;
450 msr_val.hi = rbase >> 32;
451 msr_index = MTRRphysBase_MSR(var_state->mtrr_index);
452 wrmsr(msr_index, msr_val);
454 msr_val.lo = rsize;
455 msr_val.lo |= MTRRphysMaskValid;
456 msr_val.hi = rsize >> 32;
457 msr_index = MTRRphysMask_MSR(var_state->mtrr_index);
458 wrmsr(msr_index, msr_val);
461 static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
462 uint32_t base, uint32_t size, int mtrr_type)
464 while (size != 0) {
465 uint32_t addr_lsb;
466 uint32_t size_msb;
467 uint32_t mtrr_size;
469 addr_lsb = fls(base);
470 size_msb = fms(size);
472 /* All MTRR entries need to have their base aligned to the mask
473 * size. The maximum size is calculated by a function of the
474 * min base bit set and maximum size bit set. */
475 if (addr_lsb > size_msb)
476 mtrr_size = 1 << size_msb;
477 else
478 mtrr_size = 1 << addr_lsb;
480 if (var_state->commit_mtrrs)
481 write_var_mtrr(var_state, base, mtrr_size, mtrr_type);
483 size -= mtrr_size;
484 base += mtrr_size;
485 var_state->mtrr_index++;
489 static void setup_var_mtrrs_by_state(struct var_mtrr_state *var_state)
491 struct range_entry *r;
494 * For each range that meets the non-default type process it in the
495 * following manner:
496 * +------------------+ c2 = end
497 * | 0 or more bytes |
498 * +------------------+ b2 = c1 = ALIGN_DOWN(end)
499 * | |
500 * +------------------+ b1 = a2 = ALIGN_UP(begin)
501 * | 0 or more bytes |
502 * +------------------+ a1 = begin
504 * Thus, there are 3 sub-ranges to configure variable MTRRs for.
506 memranges_each_entry(r, var_state->addr_space) {
507 uint32_t a1, a2, b1, b2, c1, c2;
508 int mtrr_type = range_entry_tag(r);
510 /* Skip default type. */
511 if (var_state->def_mtrr_type == mtrr_type)
512 continue;
514 a1 = range_entry_base_mtrr_addr(r);
515 c2 = range_entry_end_mtrr_addr(r);
517 /* The end address is under 1MiB. The fixed MTRRs take
518 * precedence over the variable ones. Therefore this range
519 * can be ignored. */
520 if (c2 < RANGE_1MB)
521 continue;
523 /* Again, the fixed MTRRs take precedence so the beginning
524 * of the range can be set to 0 if it starts below 1MiB. */
525 if (a1 < RANGE_1MB)
526 a1 = 0;
528 /* If the range starts above 4GiB the processing is done. */
529 if (!var_state->above4gb && a1 >= RANGE_4GB)
530 break;
532 /* Clip the upper address to 4GiB if addresses above 4GiB
533 * are not being processed. */
534 if (!var_state->above4gb && c2 > RANGE_4GB)
535 c2 = RANGE_4GB;
537 /* Don't align up or down on the range if it is smaller
538 * than the minimum granularity. */
539 if ((c2 - a1) < MTRR_MIN_ALIGN) {
540 calc_var_mtrr_range(var_state, a1, c2 - a1, mtrr_type);
541 continue;
544 b1 = a2 = ALIGN_UP(a1, MTRR_MIN_ALIGN);
545 b2 = c1 = ALIGN_DOWN(c2, MTRR_MIN_ALIGN);
547 calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
548 calc_var_mtrr_range(var_state, b1, b2 - b1, mtrr_type);
549 calc_var_mtrr_range(var_state, c1, c2 - c1, mtrr_type);
553 static int calc_var_mtrrs(struct memranges *addr_space,
554 int above4gb, int address_bits)
556 int wb_deftype_count;
557 int uc_deftype_count;
558 struct var_mtrr_state var_state;
560 /* The default MTRR cacheability type is determined by calculating
561 * the number of MTTRs required for each MTTR type as if it was the
562 * default. */
563 var_state.addr_space = addr_space;
564 var_state.above4gb = above4gb;
565 var_state.address_bits = address_bits;
566 var_state.commit_mtrrs = 0;
568 var_state.mtrr_index = 0;
569 var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
570 setup_var_mtrrs_by_state(&var_state);
571 wb_deftype_count = var_state.mtrr_index;
573 var_state.mtrr_index = 0;
574 var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
575 setup_var_mtrrs_by_state(&var_state);
576 uc_deftype_count = var_state.mtrr_index;
578 printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
579 wb_deftype_count, uc_deftype_count);
581 if (wb_deftype_count < uc_deftype_count) {
582 printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
583 return MTRR_TYPE_WRBACK;
585 printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
586 return MTRR_TYPE_UNCACHEABLE;
589 static void commit_var_mtrrs(struct memranges *addr_space, int def_type,
590 int above4gb, int address_bits)
592 struct var_mtrr_state var_state;
593 int i;
595 var_state.addr_space = addr_space;
596 var_state.above4gb = above4gb;
597 var_state.address_bits = address_bits;
598 /* Write the MSRs. */
599 var_state.commit_mtrrs = 1;
600 var_state.mtrr_index = 0;
601 var_state.def_mtrr_type = def_type;
602 setup_var_mtrrs_by_state(&var_state);
604 /* Clear all remaining variable MTTRs. */
605 for (i = var_state.mtrr_index; i < total_mtrrs; i++)
606 clear_var_mtrr(i);
609 void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
611 static int mtrr_default_type = -1;
612 struct memranges *addr_space;
614 addr_space = get_physical_address_space();
616 if (mtrr_default_type == -1) {
617 if (above4gb == 2)
618 detect_var_mtrrs();
619 mtrr_default_type =
620 calc_var_mtrrs(addr_space, !!above4gb, address_bits);
623 disable_cache();
624 commit_var_mtrrs(addr_space, mtrr_default_type, !!above4gb,
625 address_bits);
626 enable_var_mtrr(mtrr_default_type);
627 enable_cache();
630 void x86_setup_mtrrs(void)
632 int address_size;
633 x86_setup_fixed_mtrrs();
634 address_size = cpu_phys_address_size();
635 printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
636 x86_setup_var_mtrrs(address_size, 1);
639 int x86_mtrr_check(void)
641 /* Only Pentium Pro and later have MTRR */
642 msr_t msr;
643 printk(BIOS_DEBUG, "\nMTRR check\n");
645 msr = rdmsr(0x2ff);
646 msr.lo >>= 10;
648 printk(BIOS_DEBUG, "Fixed MTRRs : ");
649 if (msr.lo & 0x01)
650 printk(BIOS_DEBUG, "Enabled\n");
651 else
652 printk(BIOS_DEBUG, "Disabled\n");
654 printk(BIOS_DEBUG, "Variable MTRRs: ");
655 if (msr.lo & 0x02)
656 printk(BIOS_DEBUG, "Enabled\n");
657 else
658 printk(BIOS_DEBUG, "Disabled\n");
660 printk(BIOS_DEBUG, "\n");
662 post_code(0x93);
663 return ((int) msr.lo);