x86: fix get_mtrr() warning about smp_processor_id() with CONFIG_PREEMPT=y
[linux-2.6/mini2440.git] / arch / x86 / kernel / cpu / mtrr / amd.c
blobee2331b0e58fe1b19c6fd22ca6e01cc290b7eb23
1 #include <linux/init.h>
2 #include <linux/mm.h>
3 #include <asm/mtrr.h>
4 #include <asm/msr.h>
6 #include "mtrr.h"
8 static void
9 amd_get_mtrr(unsigned int reg, unsigned long *base,
10 unsigned long *size, mtrr_type * type)
12 unsigned long low, high;
14 rdmsr(MSR_K6_UWCCR, low, high);
15 /* Upper dword is region 1, lower is region 0 */
16 if (reg == 1)
17 low = high;
18 /* The base masks off on the right alignment */
19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
20 *type = 0;
21 if (low & 1)
22 *type = MTRR_TYPE_UNCACHABLE;
23 if (low & 2)
24 *type = MTRR_TYPE_WRCOMB;
25 if (!(low & 3)) {
26 *size = 0;
27 return;
30 * This needs a little explaining. The size is stored as an
31 * inverted mask of bits of 128K granularity 15 bits long offset
32 * 2 bits
34 * So to get a size we do invert the mask and add 1 to the lowest
35 * mask bit (4 as its 2 bits in). This gives us a size we then shift
36 * to turn into 128K blocks
38 * eg 111 1111 1111 1100 is 512K
40 * invert 000 0000 0000 0011
41 * +1 000 0000 0000 0100
42 * *128K ...
44 low = (~low) & 0x1FFFC;
45 *size = (low + 4) << (15 - PAGE_SHIFT);
46 return;
49 static void amd_set_mtrr(unsigned int reg, unsigned long base,
50 unsigned long size, mtrr_type type)
51 /* [SUMMARY] Set variable MTRR register on the local CPU.
52 <reg> The register to set.
53 <base> The base address of the region.
54 <size> The size of the region. If this is 0 the region is disabled.
55 <type> The type of the region.
56 [RETURNS] Nothing.
59 u32 regs[2];
62 * Low is MTRR0 , High MTRR 1
64 rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
66 * Blank to disable
68 if (size == 0)
69 regs[reg] = 0;
70 else
71 /* Set the register to the base, the type (off by one) and an
72 inverted bitmask of the size The size is the only odd
73 bit. We are fed say 512K We invert this and we get 111 1111
74 1111 1011 but if you subtract one and invert you get the
75 desired 111 1111 1111 1100 mask
77 But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
78 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
79 | (base << PAGE_SHIFT) | (type + 1);
82 * The writeback rule is quite specific. See the manual. Its
83 * disable local interrupts, write back the cache, set the mtrr
85 wbinvd();
86 wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
89 static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
91 /* Apply the K6 block alignment and size rules
92 In order
93 o Uncached or gathering only
94 o 128K or bigger block
95 o Power of 2 block
96 o base suitably aligned to the power
98 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
99 || (size & ~(size - 1)) - size || (base & (size - 1)))
100 return -EINVAL;
101 return 0;
104 static struct mtrr_ops amd_mtrr_ops = {
105 .vendor = X86_VENDOR_AMD,
106 .set = amd_set_mtrr,
107 .get = amd_get_mtrr,
108 .get_free_region = generic_get_free_region,
109 .validate_add_page = amd_validate_add_page,
110 .have_wrcomb = positive_have_wrcomb,
113 int __init amd_init_mtrr(void)
115 set_mtrr_ops(&amd_mtrr_ops);
116 return 0;
119 //arch_initcall(amd_mtrr_init);