x86: use CR0 defines.
[linux-2.6/zen-sources.git] / arch / x86 / kernel / cpu / mtrr / generic.c
blob55d31ff118fbd0824a08f9cf42eca1bbb95d3ca3
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include "mtrr.h"
16 struct mtrr_state {
17 struct mtrr_var_range *var_ranges;
18 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
19 unsigned char enabled;
20 unsigned char have_fixed;
21 mtrr_type def_type;
24 struct fixed_range_block {
25 int base_msr; /* start address of an MTRR block */
26 int ranges; /* number of MTRRs in this block */
29 static struct fixed_range_block fixed_range_blocks[] = {
30 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
31 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
32 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
36 static unsigned long smp_changes_mask;
37 static struct mtrr_state mtrr_state = {};
39 #undef MODULE_PARAM_PREFIX
40 #define MODULE_PARAM_PREFIX "mtrr."
42 static int mtrr_show;
43 module_param_named(show, mtrr_show, bool, 0);
45 /* Get the MSR pair relating to a var range */
46 static void
47 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
49 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
50 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
53 static void
54 get_fixed_ranges(mtrr_type * frs)
56 unsigned int *p = (unsigned int *) frs;
57 int i;
59 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
61 for (i = 0; i < 2; i++)
62 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
63 for (i = 0; i < 8; i++)
64 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
67 void mtrr_save_fixed_ranges(void *info)
69 if (cpu_has_mtrr)
70 get_fixed_ranges(mtrr_state.fixed_ranges);
73 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
75 unsigned i;
77 for (i = 0; i < 8; ++i, ++types, base += step)
78 printk(KERN_INFO "MTRR %05X-%05X %s\n",
79 base, base + step - 1, mtrr_attrib_to_str(*types));
82 /* Grab all of the MTRR state for this CPU into *state */
83 void __init get_mtrr_state(void)
85 unsigned int i;
86 struct mtrr_var_range *vrs;
87 unsigned lo, dummy;
89 if (!mtrr_state.var_ranges) {
90 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
91 GFP_KERNEL);
92 if (!mtrr_state.var_ranges)
93 return;
95 vrs = mtrr_state.var_ranges;
97 rdmsr(MTRRcap_MSR, lo, dummy);
98 mtrr_state.have_fixed = (lo >> 8) & 1;
100 for (i = 0; i < num_var_ranges; i++)
101 get_mtrr_var_range(i, &vrs[i]);
102 if (mtrr_state.have_fixed)
103 get_fixed_ranges(mtrr_state.fixed_ranges);
105 rdmsr(MTRRdefType_MSR, lo, dummy);
106 mtrr_state.def_type = (lo & 0xff);
107 mtrr_state.enabled = (lo & 0xc00) >> 10;
109 if (mtrr_show) {
110 int high_width;
112 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
113 if (mtrr_state.have_fixed) {
114 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
115 mtrr_state.enabled & 1 ? "en" : "dis");
116 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
117 for (i = 0; i < 2; ++i)
118 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
119 for (i = 0; i < 8; ++i)
120 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
122 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
123 mtrr_state.enabled & 2 ? "en" : "dis");
124 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
125 for (i = 0; i < num_var_ranges; ++i) {
126 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
127 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
129 high_width,
130 mtrr_state.var_ranges[i].base_hi,
131 mtrr_state.var_ranges[i].base_lo >> 12,
132 high_width,
133 mtrr_state.var_ranges[i].mask_hi,
134 mtrr_state.var_ranges[i].mask_lo >> 12,
135 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
136 else
137 printk(KERN_INFO "MTRR %u disabled\n", i);
142 /* Some BIOS's are fucked and don't set all MTRRs the same! */
143 void __init mtrr_state_warn(void)
145 unsigned long mask = smp_changes_mask;
147 if (!mask)
148 return;
149 if (mask & MTRR_CHANGE_MASK_FIXED)
150 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
151 if (mask & MTRR_CHANGE_MASK_VARIABLE)
152 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
153 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
154 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
155 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
156 printk(KERN_INFO "mtrr: corrected configuration.\n");
159 /* Doesn't attempt to pass an error out to MTRR users
160 because it's quite complicated in some cases and probably not
161 worth it because the best error handling is to ignore it. */
162 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
164 if (wrmsr_safe(msr, a, b) < 0)
165 printk(KERN_ERR
166 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
167 smp_processor_id(), msr, a, b);
171 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
172 * see AMD publication no. 24593, chapter 3.2.1 for more information
174 static inline void k8_enable_fixed_iorrs(void)
176 unsigned lo, hi;
178 rdmsr(MSR_K8_SYSCFG, lo, hi);
179 mtrr_wrmsr(MSR_K8_SYSCFG, lo
180 | K8_MTRRFIXRANGE_DRAM_ENABLE
181 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
185 * Checks and updates an fixed-range MTRR if it differs from the value it
186 * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also.
187 * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
188 * \param msr MSR address of the MTTR which should be checked and updated
189 * \param changed pointer which indicates whether the MTRR needed to be changed
190 * \param msrwords pointer to the MSR values which the MSR should have
192 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
194 unsigned lo, hi;
196 rdmsr(msr, lo, hi);
198 if (lo != msrwords[0] || hi != msrwords[1]) {
199 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
200 boot_cpu_data.x86 == 15 &&
201 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
202 k8_enable_fixed_iorrs();
203 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
204 *changed = true;
208 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
209 /* [SUMMARY] Get a free MTRR.
210 <base> The starting (base) address of the region.
211 <size> The size (in bytes) of the region.
212 [RETURNS] The index of the region on success, else -1 on error.
215 int i, max;
216 mtrr_type ltype;
217 unsigned long lbase, lsize;
219 max = num_var_ranges;
220 if (replace_reg >= 0 && replace_reg < max)
221 return replace_reg;
222 for (i = 0; i < max; ++i) {
223 mtrr_if->get(i, &lbase, &lsize, &ltype);
224 if (lsize == 0)
225 return i;
227 return -ENOSPC;
230 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
231 unsigned long *size, mtrr_type *type)
233 unsigned int mask_lo, mask_hi, base_lo, base_hi;
235 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
236 if ((mask_lo & 0x800) == 0) {
237 /* Invalid (i.e. free) range */
238 *base = 0;
239 *size = 0;
240 *type = 0;
241 return;
244 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
246 /* Work out the shifted address mask. */
247 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
248 | mask_lo >> PAGE_SHIFT;
250 /* This works correctly if size is a power of two, i.e. a
251 contiguous range. */
252 *size = -mask_lo;
253 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
254 *type = base_lo & 0xff;
258 * Checks and updates the fixed-range MTRRs if they differ from the saved set
259 * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges()
261 static int set_fixed_ranges(mtrr_type * frs)
263 unsigned long long *saved = (unsigned long long *) frs;
264 bool changed = false;
265 int block=-1, range;
267 while (fixed_range_blocks[++block].ranges)
268 for (range=0; range < fixed_range_blocks[block].ranges; range++)
269 set_fixed_range(fixed_range_blocks[block].base_msr + range,
270 &changed, (unsigned int *) saved++);
272 return changed;
275 /* Set the MSR pair relating to a var range. Returns TRUE if
276 changes are made */
277 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
279 unsigned int lo, hi;
280 bool changed = false;
282 rdmsr(MTRRphysBase_MSR(index), lo, hi);
283 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
284 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
285 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
286 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
287 changed = true;
290 rdmsr(MTRRphysMask_MSR(index), lo, hi);
292 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
293 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
294 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
295 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
296 changed = true;
298 return changed;
301 static u32 deftype_lo, deftype_hi;
303 static unsigned long set_mtrr_state(void)
304 /* [SUMMARY] Set the MTRR state for this CPU.
305 <state> The MTRR state information to read.
306 <ctxt> Some relevant CPU context.
307 [NOTE] The CPU must already be in a safe state for MTRR changes.
308 [RETURNS] 0 if no changes made, else a mask indication what was changed.
311 unsigned int i;
312 unsigned long change_mask = 0;
314 for (i = 0; i < num_var_ranges; i++)
315 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
316 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
318 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
319 change_mask |= MTRR_CHANGE_MASK_FIXED;
321 /* Set_mtrr_restore restores the old value of MTRRdefType,
322 so to set it we fiddle with the saved value */
323 if ((deftype_lo & 0xff) != mtrr_state.def_type
324 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
325 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
326 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
329 return change_mask;
333 static unsigned long cr4 = 0;
334 static DEFINE_SPINLOCK(set_atomicity_lock);
337 * Since we are disabling the cache don't allow any interrupts - they
338 * would run extremely slow and would only increase the pain. The caller must
339 * ensure that local interrupts are disabled and are reenabled after post_set()
340 * has been called.
343 static void prepare_set(void) __acquires(set_atomicity_lock)
345 unsigned long cr0;
347 /* Note that this is not ideal, since the cache is only flushed/disabled
348 for this CPU while the MTRRs are changed, but changing this requires
349 more invasive changes to the way the kernel boots */
351 spin_lock(&set_atomicity_lock);
353 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
354 cr0 = read_cr0() | X86_CR0_CD;
355 write_cr0(cr0);
356 wbinvd();
358 /* Save value of CR4 and clear Page Global Enable (bit 7) */
359 if ( cpu_has_pge ) {
360 cr4 = read_cr4();
361 write_cr4(cr4 & ~X86_CR4_PGE);
364 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
365 __flush_tlb();
367 /* Save MTRR state */
368 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
370 /* Disable MTRRs, and set the default type to uncached */
371 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
374 static void post_set(void) __releases(set_atomicity_lock)
376 /* Flush TLBs (no need to flush caches - they are disabled) */
377 __flush_tlb();
379 /* Intel (P6) standard MTRRs */
380 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
382 /* Enable caches */
383 write_cr0(read_cr0() & 0xbfffffff);
385 /* Restore value of CR4 */
386 if ( cpu_has_pge )
387 write_cr4(cr4);
388 spin_unlock(&set_atomicity_lock);
391 static void generic_set_all(void)
393 unsigned long mask, count;
394 unsigned long flags;
396 local_irq_save(flags);
397 prepare_set();
399 /* Actually set the state */
400 mask = set_mtrr_state();
402 post_set();
403 local_irq_restore(flags);
405 /* Use the atomic bitops to update the global mask */
406 for (count = 0; count < sizeof mask * 8; ++count) {
407 if (mask & 0x01)
408 set_bit(count, &smp_changes_mask);
409 mask >>= 1;
414 static void generic_set_mtrr(unsigned int reg, unsigned long base,
415 unsigned long size, mtrr_type type)
416 /* [SUMMARY] Set variable MTRR register on the local CPU.
417 <reg> The register to set.
418 <base> The base address of the region.
419 <size> The size of the region. If this is 0 the region is disabled.
420 <type> The type of the region.
421 [RETURNS] Nothing.
424 unsigned long flags;
425 struct mtrr_var_range *vr;
427 vr = &mtrr_state.var_ranges[reg];
429 local_irq_save(flags);
430 prepare_set();
432 if (size == 0) {
433 /* The invalid bit is kept in the mask, so we simply clear the
434 relevant mask register to disable a range. */
435 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
436 memset(vr, 0, sizeof(struct mtrr_var_range));
437 } else {
438 vr->base_lo = base << PAGE_SHIFT | type;
439 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
440 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
441 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
443 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
444 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
447 post_set();
448 local_irq_restore(flags);
451 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
453 unsigned long lbase, last;
455 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
456 and not touch 0x70000000->0x7003FFFF */
457 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
458 boot_cpu_data.x86_model == 1 &&
459 boot_cpu_data.x86_mask <= 7) {
460 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
461 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
462 return -EINVAL;
464 if (!(base + size < 0x70000 || base > 0x7003F) &&
465 (type == MTRR_TYPE_WRCOMB
466 || type == MTRR_TYPE_WRBACK)) {
467 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
468 return -EINVAL;
472 /* Check upper bits of base and last are equal and lower bits are 0
473 for base and 1 for last */
474 last = base + size - 1;
475 for (lbase = base; !(lbase & 1) && (last & 1);
476 lbase = lbase >> 1, last = last >> 1) ;
477 if (lbase != last) {
478 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
479 base, size);
480 return -EINVAL;
482 return 0;
486 static int generic_have_wrcomb(void)
488 unsigned long config, dummy;
489 rdmsr(MTRRcap_MSR, config, dummy);
490 return (config & (1 << 10));
493 int positive_have_wrcomb(void)
495 return 1;
498 /* generic structure...
500 struct mtrr_ops generic_mtrr_ops = {
501 .use_intel_if = 1,
502 .set_all = generic_set_all,
503 .get = generic_get_mtrr,
504 .get_free_region = generic_get_free_region,
505 .set = generic_set_mtrr,
506 .validate_add_page = generic_validate_add_page,
507 .have_wrcomb = generic_have_wrcomb,