1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-1998 Richard Gooch
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23 Source: "Pentium Pro Family Developer's Manual, Volume 3:
24 Operating System Writer's Guide" (Intel document number 242692),
29 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
30 Initial register-setting code (from proform-1.0).
31 19971216 Richard Gooch <rgooch@atnf.csiro.au>
32 Original version for /proc/mtrr interface, SMP-safe.
34 19971217 Richard Gooch <rgooch@atnf.csiro.au>
35 Bug fix for ioctls()'s.
36 Added sample code in Documentation/mtrr.txt
38 19971218 Richard Gooch <rgooch@atnf.csiro.au>
39 Disallow overlapping regions.
40 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
41 Register-setting fixups.
43 19971222 Richard Gooch <rgooch@atnf.csiro.au>
44 Fixups for kernel 2.1.75.
46 19971229 David Wragg <dpw@doc.ic.ac.uk>
47 Register-setting fixups and conformity with Intel conventions.
48 19971229 Richard Gooch <rgooch@atnf.csiro.au>
49 Cosmetic changes and wrote this ChangeLog ;-)
50 19980106 Richard Gooch <rgooch@atnf.csiro.au>
51 Fixups for kernel 2.1.78.
53 19980119 David Wragg <dpw@doc.ic.ac.uk>
54 Included passive-release enable code (elsewhere in PCI setup).
56 19980131 Richard Gooch <rgooch@atnf.csiro.au>
57 Replaced global kernel lock with private spinlock.
59 19980201 Richard Gooch <rgooch@atnf.csiro.au>
60 Added wait for other CPUs to complete changes.
62 19980202 Richard Gooch <rgooch@atnf.csiro.au>
63 Bug fix in definition of <set_mtrr> for UP.
65 19980319 Richard Gooch <rgooch@atnf.csiro.au>
66 Fixups for kernel 2.1.90.
67 19980323 Richard Gooch <rgooch@atnf.csiro.au>
68 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
70 19980325 Richard Gooch <rgooch@atnf.csiro.au>
71 Fixed test for overlapping regions: confused by adjacent regions
72 19980326 Richard Gooch <rgooch@atnf.csiro.au>
73 Added wbinvd in <set_mtrr_prepare>.
74 19980401 Richard Gooch <rgooch@atnf.csiro.au>
75 Bug fix for non-SMP compilation.
76 19980418 David Wragg <dpw@doc.ic.ac.uk>
77 Fixed-MTRR synchronisation for SMP and use atomic operations
79 19980418 Richard Gooch <rgooch@atnf.csiro.au>
80 Differentiate different MTRR register classes for BIOS fixup.
82 19980419 David Wragg <dpw@doc.ic.ac.uk>
83 Bug fix in variable MTRR synchronisation.
85 19980419 Richard Gooch <rgooch@atnf.csiro.au>
86 Fixups for kernel 2.1.97.
88 19980421 Richard Gooch <rgooch@atnf.csiro.au>
89 Safer synchronisation across CPUs when changing MTRRs.
91 19980423 Richard Gooch <rgooch@atnf.csiro.au>
92 Bugfix for SMP systems without MTRR support.
94 19980427 Richard Gooch <rgooch@atnf.csiro.au>
95 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
97 19980427 Richard Gooch <rgooch@atnf.csiro.au>
98 Use atomic bitops for setting SMP change mask.
100 19980428 Richard Gooch <rgooch@atnf.csiro.au>
101 Removed spurious diagnostic message.
103 19980429 Richard Gooch <rgooch@atnf.csiro.au>
104 Moved register-setting macros into this file.
105 Moved setup code from init/main.c to i386-specific areas.
107 19980502 Richard Gooch <rgooch@atnf.csiro.au>
108 Moved MTRR detection outside conditionals in <mtrr_init>.
110 19980502 Richard Gooch <rgooch@atnf.csiro.au>
111 Documentation improvement: mention Pentium II and AGP.
113 19980521 Richard Gooch <rgooch@atnf.csiro.au>
114 Only manipulate interrupt enable flag on local CPU.
115 Allow enclosed uncachable regions.
117 19980611 Richard Gooch <rgooch@atnf.csiro.au>
118 Always define <main_lock>.
121 #include <linux/types.h>
122 #include <linux/errno.h>
123 #include <linux/sched.h>
124 #include <linux/tty.h>
125 #include <linux/timer.h>
126 #include <linux/config.h>
127 #include <linux/kernel.h>
128 #include <linux/wait.h>
129 #include <linux/string.h>
130 #include <linux/malloc.h>
131 #include <linux/ioport.h>
132 #include <linux/delay.h>
133 #include <linux/fs.h>
134 #include <linux/ctype.h>
135 #include <linux/proc_fs.h>
136 #include <linux/mm.h>
137 #include <linux/module.h>
138 #define MTRR_NEED_STRINGS
139 #include <asm/mtrr.h>
140 #include <linux/init.h>
141 #include <linux/smp.h>
143 #include <asm/uaccess.h>
145 #include <asm/processor.h>
146 #include <asm/system.h>
147 #include <asm/pgtable.h>
148 #include <asm/segment.h>
149 #include <asm/bitops.h>
150 #include <asm/atomic.h>
152 #define MTRR_VERSION "1.22 (19980611)"
157 #define MTRRcap_MSR 0x0fe
158 #define MTRRdefType_MSR 0x2ff
160 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
161 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
163 #define NUM_FIXED_RANGES 88
164 #define MTRRfix64K_00000_MSR 0x250
165 #define MTRRfix16K_80000_MSR 0x258
166 #define MTRRfix16K_A0000_MSR 0x259
167 #define MTRRfix4K_C0000_MSR 0x268
168 #define MTRRfix4K_C8000_MSR 0x269
169 #define MTRRfix4K_D0000_MSR 0x26a
170 #define MTRRfix4K_D8000_MSR 0x26b
171 #define MTRRfix4K_E0000_MSR 0x26c
172 #define MTRRfix4K_E8000_MSR 0x26d
173 #define MTRRfix4K_F0000_MSR 0x26e
174 #define MTRRfix4K_F8000_MSR 0x26f
177 # define MTRR_CHANGE_MASK_FIXED 0x01
178 # define MTRR_CHANGE_MASK_VARIABLE 0x02
179 # define MTRR_CHANGE_MASK_DEFTYPE 0x04
182 /* In the processor's MTRR interface, the MTRR type is always held in
184 typedef u8 mtrr_type
;
187 #define JIFFIE_TIMEOUT 100
190 # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
192 # define set_mtrr(reg,base,size,type) set_mtrr_up (reg, base, size, type,TRUE)
195 #ifndef CONFIG_PROC_FS
196 # define compute_ascii() while (0)
199 #ifdef CONFIG_PROC_FS
200 static char *ascii_buffer
= NULL
;
201 static unsigned int ascii_buf_bytes
= 0;
203 static unsigned int *usage_table
= NULL
;
204 static spinlock_t main_lock
= SPIN_LOCK_UNLOCKED
;
206 /* Private functions */
207 #ifdef CONFIG_PROC_FS
208 static void compute_ascii (void);
212 struct set_mtrr_context
215 unsigned long deftype_lo
;
216 unsigned long deftype_hi
;
217 unsigned long cr4val
;
221 * Access to machine-specific registers (available on 586 and better only)
222 * Note: the rd* operations modify the parameters directly (without using
223 * pointer indirection), this allows gcc to optimize better
225 #define rdmsr(msr,val1,val2) \
226 __asm__ __volatile__("rdmsr" \
227 : "=a" (val1), "=d" (val2) \
230 #define wrmsr(msr,val1,val2) \
231 __asm__ __volatile__("wrmsr" \
233 : "c" (msr), "a" (val1), "d" (val2))
235 #define rdtsc(low,high) \
236 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
238 #define rdpmc(counter,low,high) \
239 __asm__ __volatile__("rdpmc" \
240 : "=a" (low), "=d" (high) \
244 /* Put the processor into a state where MTRRs can be safely set. */
245 static void set_mtrr_prepare(struct set_mtrr_context
*ctxt
)
249 /* disable interrupts locally */
250 __save_flags (ctxt
->flags
); __cli ();
252 /* save value of CR4 and clear Page Global Enable (bit 7) */
253 asm volatile ("movl %%cr4, %0\n\t"
255 "andb $0x7f, %b1\n\t"
257 : "=r" (ctxt
->cr4val
), "=q" (tmp
) : : "memory");
259 /* disable and flush caches. Note that wbinvd flushes the TLBs as
261 asm volatile ("movl %%cr0, %0\n\t"
262 "orl $0x40000000, %0\n\t"
266 : "=r" (tmp
) : : "memory");
268 /* disable MTRRs, and set the default type to uncached. */
269 rdmsr(MTRRdefType_MSR
, ctxt
->deftype_lo
, ctxt
->deftype_hi
);
270 wrmsr(MTRRdefType_MSR
, ctxt
->deftype_lo
& 0xf300UL
, ctxt
->deftype_hi
);
271 } /* End Function set_mtrr_prepare */
274 /* Restore the processor after a set_mtrr_prepare */
275 static void set_mtrr_done(struct set_mtrr_context
*ctxt
)
279 /* flush caches and TLBs */
280 asm volatile ("wbinvd" : : : "memory" );
282 /* restore MTRRdefType */
283 wrmsr(MTRRdefType_MSR
, ctxt
->deftype_lo
, ctxt
->deftype_hi
);
286 asm volatile ("movl %%cr0, %0\n\t"
287 "andl $0xbfffffff, %0\n\t"
289 : "=r" (tmp
) : : "memory");
291 /* restore value of CR4 */
292 asm volatile ("movl %0, %%cr4"
293 : : "r" (ctxt
->cr4val
) : "memory");
295 /* re-enable interrupts locally (if enabled previously) */
296 __restore_flags (ctxt
->flags
);
297 } /* End Function set_mtrr_done */
300 /* this function returns the number of variable MTRRs */
301 static unsigned int get_num_var_ranges (void)
303 unsigned long config
, dummy
;
305 rdmsr(MTRRcap_MSR
, config
, dummy
);
306 return (config
& 0xff);
307 } /* End Function get_num_var_ranges */
310 /* non-zero if we have the write-combining memory type. */
311 static int have_wrcomb (void)
313 unsigned long config
, dummy
;
315 rdmsr(MTRRcap_MSR
, config
, dummy
);
316 return (config
& (1<<10));
320 static void get_mtrr (unsigned int reg
, unsigned long *base
,
321 unsigned long *size
, mtrr_type
*type
)
323 unsigned long dummy
, mask_lo
, base_lo
;
325 rdmsr(MTRRphysMask_MSR(reg
), mask_lo
, dummy
);
326 if ((mask_lo
& 0x800) == 0) {
327 /* Invalid (i.e. free) range. */
334 rdmsr(MTRRphysBase_MSR(reg
), base_lo
, dummy
);
336 /* We ignore the extra address bits (32-35). If someone wants to
337 run x86 Linux on a machine with >4GB memory, this will be the
338 least of their problems. */
340 /* Clean up mask_lo so it gives the real address mask. */
341 mask_lo
= (mask_lo
& 0xfffff000UL
);
343 /* This works correctly if size is a power of two, i.e. a
345 *size
= ~(mask_lo
- 1);
347 *base
= (base_lo
& 0xfffff000UL
);
348 *type
= (base_lo
& 0xff);
349 } /* End Function get_mtrr */
352 static void set_mtrr_up (unsigned int reg
, unsigned long base
,
353 unsigned long size
, mtrr_type type
, int do_safe
)
354 /* [SUMMARY] Set variable MTRR register on the local CPU.
355 <reg> The register to set.
356 <base> The base address of the region.
357 <size> The size of the region. If this is 0 the region is disabled.
358 <type> The type of the region.
359 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
363 struct set_mtrr_context ctxt
;
365 if (do_safe
) set_mtrr_prepare (&ctxt
);
368 /* The invalid bit is kept in the mask, so we simply clear the
369 relevant mask register to disable a range. */
370 wrmsr (MTRRphysMask_MSR (reg
), 0, 0);
374 wrmsr (MTRRphysBase_MSR (reg
), base
| type
, 0);
375 wrmsr (MTRRphysMask_MSR (reg
), ~(size
- 1) | 0x800, 0);
377 if (do_safe
) set_mtrr_done (&ctxt
);
378 } /* End Function set_mtrr_up */
383 struct mtrr_var_range
385 unsigned long base_lo
;
386 unsigned long base_hi
;
387 unsigned long mask_lo
;
388 unsigned long mask_hi
;
392 /* Get the MSR pair relating to a var range. */
393 __initfunc(static void get_mtrr_var_range (unsigned int index
,
394 struct mtrr_var_range
*vr
))
396 rdmsr (MTRRphysBase_MSR (index
), vr
->base_lo
, vr
->base_hi
);
397 rdmsr (MTRRphysMask_MSR (index
), vr
->mask_lo
, vr
->mask_hi
);
398 } /* End Function get_mtrr_var_range */
401 /* Set the MSR pair relating to a var range. Returns TRUE if
403 __initfunc(static int set_mtrr_var_range_testing (unsigned int index
,
404 struct mtrr_var_range
*vr
))
409 rdmsr(MTRRphysBase_MSR(index
), lo
, hi
);
411 if ((vr
->base_lo
& 0xfffff0ffUL
) != (lo
& 0xfffff0ffUL
)
412 || (vr
->base_hi
& 0xfUL
) != (hi
& 0xfUL
)) {
413 wrmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
417 rdmsr(MTRRphysMask_MSR(index
), lo
, hi
);
419 if ((vr
->mask_lo
& 0xfffff800UL
) != (lo
& 0xfffff800UL
)
420 || (vr
->mask_hi
& 0xfUL
) != (hi
& 0xfUL
)) {
421 wrmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
429 __initfunc(static void get_fixed_ranges(mtrr_type
*frs
))
431 unsigned long *p
= (unsigned long *)frs
;
434 rdmsr(MTRRfix64K_00000_MSR
, p
[0], p
[1]);
436 for (i
= 0; i
< 2; i
++)
437 rdmsr(MTRRfix16K_80000_MSR
+ i
, p
[2 + i
*2], p
[3 + i
*2]);
439 for (i
= 0; i
< 8; i
++)
440 rdmsr(MTRRfix4K_C0000_MSR
+ i
, p
[6 + i
*2], p
[7 + i
*2]);
444 __initfunc(static int set_fixed_ranges_testing(mtrr_type
*frs
))
446 unsigned long *p
= (unsigned long *)frs
;
449 unsigned long lo
, hi
;
451 rdmsr(MTRRfix64K_00000_MSR
, lo
, hi
);
452 if (p
[0] != lo
|| p
[1] != hi
) {
453 wrmsr(MTRRfix64K_00000_MSR
, p
[0], p
[1]);
457 for (i
= 0; i
< 2; i
++) {
458 rdmsr(MTRRfix16K_80000_MSR
+ i
, lo
, hi
);
459 if (p
[2 + i
*2] != lo
|| p
[3 + i
*2] != hi
) {
460 wrmsr(MTRRfix16K_80000_MSR
+ i
, p
[2 + i
*2], p
[3 + i
*2]);
465 for (i
= 0; i
< 8; i
++) {
466 rdmsr(MTRRfix4K_C0000_MSR
+ i
, lo
, hi
);
467 if (p
[6 + i
*2] != lo
|| p
[7 + i
*2] != hi
) {
468 wrmsr(MTRRfix4K_C0000_MSR
+ i
, p
[6 + i
*2], p
[7 + i
*2]);
479 unsigned int num_var_ranges
;
480 struct mtrr_var_range
*var_ranges
;
481 mtrr_type fixed_ranges
[NUM_FIXED_RANGES
];
482 unsigned char enabled
;
487 /* Grab all of the MTRR state for this CPU into *state. */
488 __initfunc(static void get_mtrr_state(struct mtrr_state
*state
))
490 unsigned int nvrs
, i
;
491 struct mtrr_var_range
*vrs
;
492 unsigned long lo
, dummy
;
494 nvrs
= state
->num_var_ranges
= get_num_var_ranges();
495 vrs
= state
->var_ranges
496 = kmalloc(nvrs
* sizeof(struct mtrr_var_range
), GFP_KERNEL
);
498 nvrs
= state
->num_var_ranges
= 0;
500 for (i
= 0; i
< nvrs
; i
++)
501 get_mtrr_var_range(i
, &vrs
[i
]);
503 get_fixed_ranges(state
->fixed_ranges
);
505 rdmsr(MTRRdefType_MSR
, lo
, dummy
);
506 state
->def_type
= (lo
& 0xff);
507 state
->enabled
= (lo
& 0xc00) >> 10;
508 } /* End Function get_mtrr_state */
511 /* Free resources associated with a struct mtrr_state */
512 __initfunc(static void finalize_mtrr_state(struct mtrr_state
*state
))
514 if (state
->var_ranges
) kfree (state
->var_ranges
);
515 } /* End Function finalize_mtrr_state */
518 __initfunc(static unsigned long set_mtrr_state (struct mtrr_state
*state
,
519 struct set_mtrr_context
*ctxt
))
520 /* [SUMMARY] Set the MTRR state for this CPU.
521 <state> The MTRR state information to read.
522 <ctxt> Some relevant CPU context.
523 [NOTE] The CPU must already be in a safe state for MTRR changes.
524 [RETURNS] 0 if no changes made, else a mask indication what was changed.
528 unsigned long change_mask
= 0;
530 for (i
= 0; i
< state
->num_var_ranges
; i
++)
531 if (set_mtrr_var_range_testing(i
, &state
->var_ranges
[i
]))
532 change_mask
|= MTRR_CHANGE_MASK_VARIABLE
;
534 if (set_fixed_ranges_testing(state
->fixed_ranges
))
535 change_mask
|= MTRR_CHANGE_MASK_FIXED
;
537 /* set_mtrr_restore restores the old value of MTRRdefType,
538 so to set it we fiddle with the saved value. */
539 if ((ctxt
->deftype_lo
& 0xff) != state
->def_type
540 || ((ctxt
->deftype_lo
& 0xc00) >> 10) != state
->enabled
)
542 ctxt
->deftype_lo
|= (state
->def_type
| state
->enabled
<< 10);
543 change_mask
|= MTRR_CHANGE_MASK_DEFTYPE
;
547 } /* End Function set_mtrr_state */
550 static atomic_t undone_count
;
551 static void (*handler_func
) (struct set_mtrr_context
*ctxt
, void *info
);
552 static void *handler_info
;
553 static volatile int wait_barrier_execute
= FALSE
;
554 static volatile int wait_barrier_cache_enable
= FALSE
;
556 static void sync_handler (void)
557 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
561 struct set_mtrr_context ctxt
;
563 set_mtrr_prepare (&ctxt
);
564 /* Notify master CPU that I'm at the barrier and then wait */
565 atomic_dec (&undone_count
);
566 while (wait_barrier_execute
) barrier ();
567 /* The master has cleared me to execute */
568 (*handler_func
) (&ctxt
, handler_info
);
569 /* Notify master CPU that I've executed the function */
570 atomic_dec (&undone_count
);
571 /* Wait for master to clear me to enable cache and return */
572 while (wait_barrier_cache_enable
) barrier ();
573 set_mtrr_done (&ctxt
);
574 } /* End Function sync_handler */
576 static void do_all_cpus (void (*handler
) (struct set_mtrr_context
*ctxt
,
578 void *info
, int local
)
579 /* [SUMMARY] Execute a function on all CPUs, with caches flushed and disabled.
580 [PURPOSE] This function will synchronise all CPUs, flush and disable caches
581 on all CPUs, then call a specified function. When the specified function
582 finishes on all CPUs, caches are enabled on all CPUs.
583 <handler> The function to execute.
584 <info> An arbitrary information pointer which is passed to <<handler>>.
585 <local> If TRUE <<handler>> is executed locally.
589 unsigned long timeout
;
590 struct set_mtrr_context ctxt
;
592 mtrr_hook
= sync_handler
;
593 handler_func
= handler
;
595 wait_barrier_execute
= TRUE
;
596 wait_barrier_cache_enable
= TRUE
;
597 /* Send a message to all other CPUs and wait for them to enter the
599 atomic_set (&undone_count
, smp_num_cpus
- 1);
600 smp_message_pass (MSG_ALL_BUT_SELF
, MSG_MTRR_CHANGE
, 0, 0);
601 /* Wait for it to be done */
602 timeout
= jiffies
+ JIFFIE_TIMEOUT
;
603 while ( (atomic_read (&undone_count
) > 0) && (jiffies
< timeout
) )
605 if (atomic_read (&undone_count
) > 0)
607 panic ("mtrr: timed out waiting for other CPUs\n");
610 /* All other CPUs should be waiting for the barrier, with their caches
611 already flushed and disabled. Prepare for function completion
613 atomic_set (&undone_count
, smp_num_cpus
- 1);
614 /* Flush and disable the local CPU's cache and release the barier, which
615 should cause the other CPUs to execute the function. Also execute it
616 locally if required */
617 set_mtrr_prepare (&ctxt
);
618 wait_barrier_execute
= FALSE
;
619 if (local
) (*handler
) (&ctxt
, info
);
620 /* Now wait for other CPUs to complete the function */
621 while (atomic_read (&undone_count
) > 0) barrier ();
622 /* Now all CPUs should have finished the function. Release the barrier to
623 allow them to re-enable their caches and return from their interrupt,
624 then enable the local cache and return */
625 wait_barrier_cache_enable
= FALSE
;
626 set_mtrr_done (&ctxt
);
629 } /* End Function do_all_cpus */
634 unsigned long smp_base
;
635 unsigned long smp_size
;
636 unsigned int smp_reg
;
640 static void set_mtrr_handler (struct set_mtrr_context
*ctxt
, void *info
)
642 struct set_mtrr_data
*data
= info
;
644 set_mtrr_up (data
->smp_reg
, data
->smp_base
, data
->smp_size
, data
->smp_type
,
646 } /* End Function set_mtrr_handler */
648 static void set_mtrr_smp (unsigned int reg
, unsigned long base
,
649 unsigned long size
, mtrr_type type
)
651 struct set_mtrr_data data
;
654 data
.smp_base
= base
;
655 data
.smp_size
= size
;
656 data
.smp_type
= type
;
657 do_all_cpus (set_mtrr_handler
, &data
, TRUE
);
658 } /* End Function set_mtrr_smp */
661 /* A warning that is common to the module and non-module cases. */
662 /* Some BIOS's are fucked and don't set all MTRRs the same! */
664 static void mtrr_state_warn (unsigned long mask
)
666 __initfunc(static void mtrr_state_warn (unsigned long mask
))
670 if (mask
& MTRR_CHANGE_MASK_FIXED
)
671 printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
672 if (mask
& MTRR_CHANGE_MASK_VARIABLE
)
673 printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
674 if (mask
& MTRR_CHANGE_MASK_DEFTYPE
)
675 printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
676 printk ("mtrr: probably your BIOS does not setup all CPUs\n");
677 } /* End Function mtrr_state_warn */
680 /* As a module, copy the MTRR state using an IPI handler. */
682 static volatile unsigned long smp_changes_mask
= 0;
684 static void copy_mtrr_state_handler (struct set_mtrr_context
*ctxt
, void *info
)
686 unsigned long mask
, count
;
687 struct mtrr_state
*smp_mtrr_state
= info
;
689 mask
= set_mtrr_state (smp_mtrr_state
, ctxt
);
690 /* Use the atomic bitops to update the global mask */
691 for (count
= 0; count
< sizeof mask
* 8; ++count
)
693 if (mask
& 0x01) set_bit (count
, &smp_changes_mask
);
696 } /* End Function copy_mtrr_state_handler */
698 /* Copies the entire MTRR state of this CPU to all the others. */
699 static void copy_mtrr_state (void)
701 struct mtrr_state ms
;
703 get_mtrr_state (&ms
);
704 do_all_cpus (copy_mtrr_state_handler
, &ms
, FALSE
);
705 finalize_mtrr_state (&ms
);
706 mtrr_state_warn (smp_changes_mask
);
707 } /* End Function copy_mtrr_state */
712 static char *attrib_to_str (int x
)
714 return (x
<= 6) ? mtrr_strings
[x
] : "?";
715 } /* End Function attrib_to_str */
717 static void init_table (void)
721 max
= get_num_var_ranges ();
722 if ( ( usage_table
= kmalloc (max
* sizeof *usage_table
, GFP_KERNEL
) )
725 printk ("mtrr: could not allocate\n");
728 for (i
= 0; i
< max
; i
++) usage_table
[i
] = 1;
729 #ifdef CONFIG_PROC_FS
730 if ( ( ascii_buffer
= kmalloc (max
* LINE_SIZE
, GFP_KERNEL
) ) == NULL
)
732 printk ("mtrr: could not allocate\n");
738 } /* End Function init_table */
740 int mtrr_add (unsigned long base
, unsigned long size
, unsigned int type
,
742 /* [SUMMARY] Add an MTRR entry.
743 <base> The starting (base) address of the region.
744 <size> The size (in bytes) of the region.
745 <type> The type of the new region.
746 <increment> If true and the region already exists, the usage count will be
748 [RETURNS] The MTRR register on success, else a negative number indicating
750 [NOTE] This routine uses a spinlock.
755 unsigned long lbase
, lsize
, last
;
757 if ( !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
) ) return -ENODEV
;
758 if ( (base
& 0xfff) || (size
& 0xfff) )
760 printk ("mtrr: size and base must be multiples of 4kB\n");
761 printk ("mtrr: size: %lx base: %lx\n", size
, base
);
764 if (base
+ size
< 0x100000)
766 printk ("mtrr: cannot set region below 1 MByte (0x%lx,0x%lx)\n",
770 /* Check upper bits of base and last are equal and lower bits are 0 for
771 base and 1 for last */
772 last
= base
+ size
- 1;
773 for (lbase
= base
; !(lbase
& 1) && (last
& 1);
774 lbase
= lbase
>> 1, last
= last
>> 1);
777 printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n",
781 if (type
>= MTRR_NUM_TYPES
)
783 printk ("mtrr: type: %u illegal\n", type
);
786 /* If the type is WC, check that this processor supports it */
787 if ( (type
== MTRR_TYPE_WRCOMB
) && !have_wrcomb () )
789 printk ("mtrr: your processor doesn't support write-combining\n");
792 increment
= increment
? 1 : 0;
793 max
= get_num_var_ranges ();
794 /* Search for existing MTRR */
795 spin_lock (&main_lock
);
796 for (i
= 0; i
< max
; ++i
)
798 get_mtrr (i
, &lbase
, &lsize
, <ype
);
799 if (base
>= lbase
+ lsize
) continue;
800 if ( (base
< lbase
) && (base
+ size
<= lbase
) ) continue;
801 /* At this point we know there is some kind of overlap/enclosure */
802 if ( (base
< lbase
) || (base
+ size
> lbase
+ lsize
) )
804 spin_unlock (&main_lock
);
805 printk ("mtrr: 0x%lx,0x%lx overlaps existing 0x%lx,0x%lx\n",
806 base
, size
, lbase
, lsize
);
809 /* New region is enclosed by an existing region */
812 if (type
== MTRR_TYPE_UNCACHABLE
) continue;
813 spin_unlock (&main_lock
);
814 printk ( "mtrr: type mismatch for %lx,%lx old: %s new: %s\n",
815 base
, size
, attrib_to_str (ltype
), attrib_to_str (type
) );
818 if (increment
) ++usage_table
[i
];
820 spin_unlock (&main_lock
);
823 /* Search for an empty MTRR */
824 for (i
= 0; i
< max
; ++i
)
826 get_mtrr (i
, &lbase
, &lsize
, <ype
);
827 if (lsize
> 0) continue;
828 set_mtrr (i
, base
, size
, type
);
831 spin_unlock (&main_lock
);
834 spin_unlock (&main_lock
);
835 printk ("mtrr: no more MTRRs available\n");
837 } /* End Function mtrr_add */
839 int mtrr_del (int reg
, unsigned long base
, unsigned long size
)
840 /* [SUMMARY] Delete MTRR/decrement usage count.
841 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
843 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
844 <size> The size of the region. This is ignored if <<reg>> is >= 0.
845 [RETURNS] The register on success, else a negative number indicating
847 [NOTE] This routine uses a spinlock.
852 unsigned long lbase
, lsize
;
854 if ( !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
) ) return -ENODEV
;
855 max
= get_num_var_ranges ();
856 spin_lock (&main_lock
);
859 /* Search for existing MTRR */
860 for (i
= 0; i
< max
; ++i
)
862 get_mtrr (i
, &lbase
, &lsize
, <ype
);
863 if ( (lbase
== base
) && (lsize
== size
) )
871 spin_unlock (&main_lock
);
872 printk ("mtrr: no MTRR for %lx,%lx found\n", base
, size
);
878 spin_unlock (&main_lock
);
879 printk ("mtrr: register: %d too big\n", reg
);
882 get_mtrr (reg
, &lbase
, &lsize
, <ype
);
885 spin_unlock (&main_lock
);
886 printk ("mtrr: MTRR %d not used\n", reg
);
889 if (usage_table
[reg
] < 1)
891 spin_unlock (&main_lock
);
892 printk ("mtrr: reg: %d has count=0\n", reg
);
895 if (--usage_table
[reg
] < 1) set_mtrr (reg
, 0, 0, 0);
897 spin_unlock (&main_lock
);
899 } /* End Function mtrr_del */
901 #ifdef CONFIG_PROC_FS
903 static int mtrr_file_add (unsigned long base
, unsigned long size
,
904 unsigned int type
, char increment
, struct file
*file
)
907 unsigned int *fcount
= file
->private_data
;
909 max
= get_num_var_ranges ();
912 if ( ( fcount
= kmalloc (max
* sizeof *fcount
, GFP_KERNEL
) ) == NULL
)
914 printk ("mtrr: could not allocate\n");
917 memset (fcount
, 0, max
* sizeof *fcount
);
918 file
->private_data
= fcount
;
920 reg
= mtrr_add (base
, size
, type
, 1);
921 if (reg
>= 0) ++fcount
[reg
];
923 } /* End Function mtrr_file_add */
925 static int mtrr_file_del (unsigned long base
, unsigned long size
,
929 unsigned int *fcount
= file
->private_data
;
931 reg
= mtrr_del (-1, base
, size
);
932 if (reg
< 0) return reg
;
933 if (fcount
!= NULL
) --fcount
[reg
];
935 } /* End Function mtrr_file_del */
937 static ssize_t
mtrr_read (struct file
*file
, char *buf
, size_t len
,
940 if (*ppos
>= ascii_buf_bytes
) return 0;
941 if (*ppos
+ len
> ascii_buf_bytes
) len
= ascii_buf_bytes
- *ppos
;
942 if ( copy_to_user (buf
, ascii_buffer
+ *ppos
, len
) ) return -EFAULT
;
945 } /* End Function mtrr_read */
947 static ssize_t
mtrr_write (struct file
*file
, const char *buf
, size_t len
,
949 /* Format of control line:
950 "base=%lx size=%lx type=%s" OR:
955 unsigned long reg
, base
, size
;
957 char line
[LINE_SIZE
];
959 if ( !suser () ) return -EPERM
;
960 /* Can't seek (pwrite) on this device */
961 if (ppos
!= &file
->f_pos
) return -ESPIPE
;
962 memset (line
, 0, LINE_SIZE
);
963 if (len
> LINE_SIZE
) len
= LINE_SIZE
;
964 if ( copy_from_user (line
, buf
, len
- 1) ) return -EFAULT
;
965 ptr
= line
+ strlen (line
) - 1;
966 if (*ptr
== '\n') *ptr
= '\0';
967 if ( !strncmp (line
, "disable=", 8) )
969 reg
= simple_strtoul (line
+ 8, &ptr
, 0);
970 err
= mtrr_del (reg
, 0, 0);
971 if (err
< 0) return err
;
974 if ( strncmp (line
, "base=", 5) )
976 printk ("mtrr: no \"base=\" in line: \"%s\"\n", line
);
979 base
= simple_strtoul (line
+ 5, &ptr
, 0);
980 for (; isspace (*ptr
); ++ptr
);
981 if ( strncmp (ptr
, "size=", 5) )
983 printk ("mtrr: no \"size=\" in line: \"%s\"\n", line
);
986 size
= simple_strtoul (ptr
+ 5, &ptr
, 0);
987 for (; isspace (*ptr
); ++ptr
);
988 if ( strncmp (ptr
, "type=", 5) )
990 printk ("mtrr: no \"type=\" in line: \"%s\"\n", line
);
994 for (; isspace (*ptr
); ++ptr
);
995 for (i
= 0; i
< MTRR_NUM_TYPES
; ++i
)
997 if ( strcmp (ptr
, mtrr_strings
[i
]) ) continue;
998 err
= mtrr_add (base
, size
, i
, 1);
999 if (err
< 0) return err
;
1002 printk ("mtrr: illegal type: \"%s\"\n", ptr
);
1004 } /* End Function mtrr_write */
1006 static int mtrr_ioctl (struct inode
*inode
, struct file
*file
,
1007 unsigned int cmd
, unsigned long arg
)
1011 struct mtrr_sentry sentry
;
1012 struct mtrr_gentry gentry
;
1017 return -ENOIOCTLCMD
;
1018 case MTRRIOC_ADD_ENTRY
:
1019 if ( !suser () ) return -EPERM
;
1020 if ( copy_from_user (&sentry
, (void *) arg
, sizeof sentry
) )
1022 err
= mtrr_file_add (sentry
.base
, sentry
.size
, sentry
.type
, 1, file
);
1023 if (err
< 0) return err
;
1025 case MTRRIOC_SET_ENTRY
:
1026 if ( !suser () ) return -EPERM
;
1027 if ( copy_from_user (&sentry
, (void *) arg
, sizeof sentry
) )
1029 err
= mtrr_add (sentry
.base
, sentry
.size
, sentry
.type
, 0);
1030 if (err
< 0) return err
;
1032 case MTRRIOC_DEL_ENTRY
:
1033 if ( !suser () ) return -EPERM
;
1034 if ( copy_from_user (&sentry
, (void *) arg
, sizeof sentry
) )
1036 err
= mtrr_file_del (sentry
.base
, sentry
.size
, file
);
1037 if (err
< 0) return err
;
1039 case MTRRIOC_GET_ENTRY
:
1040 if ( copy_from_user (&gentry
, (void *) arg
, sizeof gentry
) )
1042 if ( gentry
.regnum
>= get_num_var_ranges () ) return -EINVAL
;
1043 get_mtrr (gentry
.regnum
, &gentry
.base
, &gentry
.size
, &type
);
1045 if ( copy_to_user ( (void *) arg
, &gentry
, sizeof gentry
) )
1050 } /* End Function mtrr_ioctl */
1052 static int mtrr_open (struct inode
*ino
, struct file
*filep
)
1056 } /* End Function mtrr_open */
1058 static int mtrr_close (struct inode
*ino
, struct file
*file
)
1061 unsigned int *fcount
= file
->private_data
;
1064 if (fcount
== NULL
) return 0;
1065 max
= get_num_var_ranges ();
1066 for (i
= 0; i
< max
; ++i
)
1068 while (fcount
[i
] > 0)
1070 if (mtrr_del (i
, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i
);
1075 file
->private_data
= NULL
;
1077 } /* End Function mtrr_close */
1079 static struct file_operations mtrr_fops
=
1082 mtrr_read
, /* Read */
1083 mtrr_write
, /* Write */
1086 mtrr_ioctl
, /* IOctl */
1088 mtrr_open
, /* Open */
1090 mtrr_close
, /* Release */
1093 NULL
, /* CheckMediaChange */
1094 NULL
, /* Revalidate */
1098 static struct inode_operations proc_mtrr_inode_operations
= {
1099 &mtrr_fops
, /* default property file-ops */
1109 NULL
, /* readlink */
1110 NULL
, /* follow_link */
1111 NULL
, /* readpage */
1112 NULL
, /* writepage */
1114 NULL
, /* truncate */
1115 NULL
/* permission */
1118 static struct proc_dir_entry proc_root_mtrr
= {
1119 PROC_MTRR
, 4, "mtrr",
1120 S_IFREG
| S_IWUSR
| S_IRUGO
, 1, 0, 0,
1121 0, &proc_mtrr_inode_operations
1124 static void compute_ascii (void)
1129 unsigned long base
, size
;
1131 ascii_buf_bytes
= 0;
1132 max
= get_num_var_ranges ();
1133 for (i
= 0; i
< max
; i
++)
1135 get_mtrr (i
, &base
, &size
, &type
);
1136 if (size
< 1) usage_table
[i
] = 0;
1139 if (size
< 0x100000)
1151 (ascii_buffer
+ ascii_buf_bytes
,
1152 "reg%02i: base=0x%08lx (%4liMB), size=%4li%cB: %s, count=%d\n",
1153 i
, base
, base
>>20, size
, factor
,
1154 attrib_to_str (type
), usage_table
[i
]);
1155 ascii_buf_bytes
+= strlen (ascii_buffer
+ ascii_buf_bytes
);
1158 proc_root_mtrr
.size
= ascii_buf_bytes
;
1159 } /* End Function compute_ascii */
1161 #endif /* CONFIG_PROC_FS */
1163 EXPORT_SYMBOL(mtrr_add
);
1164 EXPORT_SYMBOL(mtrr_del
);
1166 #if defined(__SMP__) && !defined(MODULE)
1168 static volatile unsigned long smp_changes_mask __initdata
= 0;
1169 static struct mtrr_state smp_mtrr_state __initdata
= {0, 0};
1171 __initfunc(void mtrr_init_boot_cpu (void))
1173 if ( !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
) ) return;
1174 printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION
);
1176 get_mtrr_state (&smp_mtrr_state
);
1177 } /* End Function mtrr_init_boot_cpu */
1179 __initfunc(void mtrr_init_secondary_cpu (void))
1181 unsigned long mask
, count
;
1182 struct set_mtrr_context ctxt
;
1184 if ( !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
) ) return;
1185 /* Note that this is not ideal, since the cache is only flushed/disabled
1186 for this CPU while the MTRRs are changed, but changing this requires
1187 more invasive changes to the way the kernel boots */
1188 set_mtrr_prepare (&ctxt
);
1189 mask
= set_mtrr_state (&smp_mtrr_state
, &ctxt
);
1190 set_mtrr_done (&ctxt
);
1191 /* Use the atomic bitops to update the global mask */
1192 for (count
= 0; count
< sizeof mask
* 8; ++count
)
1194 if (mask
& 0x01) set_bit (count
, &smp_changes_mask
);
1197 } /* End Function mtrr_init_secondary_cpu */
1202 int init_module (void)
1204 __initfunc(int mtrr_init(void))
1207 if ( !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
) ) return 0;
1208 # if !defined(__SMP__) || defined(MODULE)
1209 printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION
);
1216 finalize_mtrr_state (&smp_mtrr_state
);
1217 mtrr_state_warn (smp_changes_mask
);
1218 # endif /* MODULE */
1219 # endif /* __SMP__ */
1221 # ifdef CONFIG_PROC_FS
1222 proc_register (&proc_root
, &proc_root_mtrr
);
1230 void cleanup_module (void)
1232 if ( !(boot_cpu_data
.x86_capability
& X86_FEATURE_MTRR
) ) return;
1233 # ifdef CONFIG_PROC_FS
1234 proc_unregister (&proc_root
, PROC_MTRR
);