Import 2.1.118
[davej-history.git] / arch / i386 / kernel / mtrr.c
blobf56bcdfa15047b9dc62fae5924677a8abc186a8d
1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-1998 Richard Gooch
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23 Source: "Pentium Pro Family Developer's Manual, Volume 3:
24 Operating System Writer's Guide" (Intel document number 242692),
25 section 11.11.7
27 ChangeLog
29 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
30 Initial register-setting code (from proform-1.0).
31 19971216 Richard Gooch <rgooch@atnf.csiro.au>
32 Original version for /proc/mtrr interface, SMP-safe.
33 v1.0
34 19971217 Richard Gooch <rgooch@atnf.csiro.au>
35 Bug fix for ioctls()'s.
36 Added sample code in Documentation/mtrr.txt
37 v1.1
38 19971218 Richard Gooch <rgooch@atnf.csiro.au>
39 Disallow overlapping regions.
40 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
41 Register-setting fixups.
42 v1.2
43 19971222 Richard Gooch <rgooch@atnf.csiro.au>
44 Fixups for kernel 2.1.75.
45 v1.3
46 19971229 David Wragg <dpw@doc.ic.ac.uk>
47 Register-setting fixups and conformity with Intel conventions.
48 19971229 Richard Gooch <rgooch@atnf.csiro.au>
49 Cosmetic changes and wrote this ChangeLog ;-)
50 19980106 Richard Gooch <rgooch@atnf.csiro.au>
51 Fixups for kernel 2.1.78.
52 v1.4
53 19980119 David Wragg <dpw@doc.ic.ac.uk>
54 Included passive-release enable code (elsewhere in PCI setup).
55 v1.5
56 19980131 Richard Gooch <rgooch@atnf.csiro.au>
57 Replaced global kernel lock with private spinlock.
58 v1.6
59 19980201 Richard Gooch <rgooch@atnf.csiro.au>
60 Added wait for other CPUs to complete changes.
61 v1.7
62 19980202 Richard Gooch <rgooch@atnf.csiro.au>
63 Bug fix in definition of <set_mtrr> for UP.
64 v1.8
65 19980319 Richard Gooch <rgooch@atnf.csiro.au>
66 Fixups for kernel 2.1.90.
67 19980323 Richard Gooch <rgooch@atnf.csiro.au>
68 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
69 v1.9
70 19980325 Richard Gooch <rgooch@atnf.csiro.au>
71 Fixed test for overlapping regions: confused by adjacent regions
72 19980326 Richard Gooch <rgooch@atnf.csiro.au>
73 Added wbinvd in <set_mtrr_prepare>.
74 19980401 Richard Gooch <rgooch@atnf.csiro.au>
75 Bug fix for non-SMP compilation.
76 19980418 David Wragg <dpw@doc.ic.ac.uk>
77 Fixed-MTRR synchronisation for SMP and use atomic operations
78 instead of spinlocks.
79 19980418 Richard Gooch <rgooch@atnf.csiro.au>
80 Differentiate different MTRR register classes for BIOS fixup.
81 v1.10
82 19980419 David Wragg <dpw@doc.ic.ac.uk>
83 Bug fix in variable MTRR synchronisation.
84 v1.11
85 19980419 Richard Gooch <rgooch@atnf.csiro.au>
86 Fixups for kernel 2.1.97.
87 v1.12
88 19980421 Richard Gooch <rgooch@atnf.csiro.au>
89 Safer synchronisation across CPUs when changing MTRRs.
90 v1.13
91 19980423 Richard Gooch <rgooch@atnf.csiro.au>
92 Bugfix for SMP systems without MTRR support.
93 v1.14
94 19980427 Richard Gooch <rgooch@atnf.csiro.au>
95 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
96 v1.15
97 19980427 Richard Gooch <rgooch@atnf.csiro.au>
98 Use atomic bitops for setting SMP change mask.
99 v1.16
100 19980428 Richard Gooch <rgooch@atnf.csiro.au>
101 Removed spurious diagnostic message.
102 v1.17
103 19980429 Richard Gooch <rgooch@atnf.csiro.au>
104 Moved register-setting macros into this file.
105 Moved setup code from init/main.c to i386-specific areas.
106 v1.18
107 19980502 Richard Gooch <rgooch@atnf.csiro.au>
108 Moved MTRR detection outside conditionals in <mtrr_init>.
109 v1.19
110 19980502 Richard Gooch <rgooch@atnf.csiro.au>
111 Documentation improvement: mention Pentium II and AGP.
112 v1.20
113 19980521 Richard Gooch <rgooch@atnf.csiro.au>
114 Only manipulate interrupt enable flag on local CPU.
115 Allow enclosed uncachable regions.
116 v1.21
117 19980611 Richard Gooch <rgooch@atnf.csiro.au>
118 Always define <main_lock>.
119 v1.22
121 #include <linux/types.h>
122 #include <linux/errno.h>
123 #include <linux/sched.h>
124 #include <linux/tty.h>
125 #include <linux/timer.h>
126 #include <linux/config.h>
127 #include <linux/kernel.h>
128 #include <linux/wait.h>
129 #include <linux/string.h>
130 #include <linux/malloc.h>
131 #include <linux/ioport.h>
132 #include <linux/delay.h>
133 #include <linux/fs.h>
134 #include <linux/ctype.h>
135 #include <linux/proc_fs.h>
136 #include <linux/mm.h>
137 #include <linux/module.h>
138 #define MTRR_NEED_STRINGS
139 #include <asm/mtrr.h>
140 #include <linux/init.h>
141 #include <linux/smp.h>
143 #include <asm/uaccess.h>
144 #include <asm/io.h>
145 #include <asm/processor.h>
146 #include <asm/system.h>
147 #include <asm/pgtable.h>
148 #include <asm/segment.h>
149 #include <asm/bitops.h>
150 #include <asm/atomic.h>
152 #define MTRR_VERSION "1.22 (19980611)"
154 #define TRUE 1
155 #define FALSE 0
157 #define MTRRcap_MSR 0x0fe
158 #define MTRRdefType_MSR 0x2ff
160 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
161 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
163 #define NUM_FIXED_RANGES 88
164 #define MTRRfix64K_00000_MSR 0x250
165 #define MTRRfix16K_80000_MSR 0x258
166 #define MTRRfix16K_A0000_MSR 0x259
167 #define MTRRfix4K_C0000_MSR 0x268
168 #define MTRRfix4K_C8000_MSR 0x269
169 #define MTRRfix4K_D0000_MSR 0x26a
170 #define MTRRfix4K_D8000_MSR 0x26b
171 #define MTRRfix4K_E0000_MSR 0x26c
172 #define MTRRfix4K_E8000_MSR 0x26d
173 #define MTRRfix4K_F0000_MSR 0x26e
174 #define MTRRfix4K_F8000_MSR 0x26f
176 #ifdef __SMP__
177 # define MTRR_CHANGE_MASK_FIXED 0x01
178 # define MTRR_CHANGE_MASK_VARIABLE 0x02
179 # define MTRR_CHANGE_MASK_DEFTYPE 0x04
180 #endif
182 /* In the processor's MTRR interface, the MTRR type is always held in
183 an 8 bit field: */
184 typedef u8 mtrr_type;
186 #define LINE_SIZE 80
187 #define JIFFIE_TIMEOUT 100
189 #ifdef __SMP__
190 # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
191 #else
192 # define set_mtrr(reg,base,size,type) set_mtrr_up (reg, base, size, type,TRUE)
193 #endif
195 #ifndef CONFIG_PROC_FS
196 # define compute_ascii() while (0)
197 #endif
199 #ifdef CONFIG_PROC_FS
200 static char *ascii_buffer = NULL;
201 static unsigned int ascii_buf_bytes = 0;
202 #endif
203 static unsigned int *usage_table = NULL;
204 static spinlock_t main_lock = SPIN_LOCK_UNLOCKED;
206 /* Private functions */
207 #ifdef CONFIG_PROC_FS
208 static void compute_ascii (void);
209 #endif
212 struct set_mtrr_context
214 unsigned long flags;
215 unsigned long deftype_lo;
216 unsigned long deftype_hi;
217 unsigned long cr4val;
221 * Access to machine-specific registers (available on 586 and better only)
222 * Note: the rd* operations modify the parameters directly (without using
223 * pointer indirection), this allows gcc to optimize better
225 #define rdmsr(msr,val1,val2) \
226 __asm__ __volatile__("rdmsr" \
227 : "=a" (val1), "=d" (val2) \
228 : "c" (msr))
230 #define wrmsr(msr,val1,val2) \
231 __asm__ __volatile__("wrmsr" \
232 : /* no outputs */ \
233 : "c" (msr), "a" (val1), "d" (val2))
235 #define rdtsc(low,high) \
236 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
238 #define rdpmc(counter,low,high) \
239 __asm__ __volatile__("rdpmc" \
240 : "=a" (low), "=d" (high) \
241 : "c" (counter))
244 /* Put the processor into a state where MTRRs can be safely set. */
245 static void set_mtrr_prepare(struct set_mtrr_context *ctxt)
247 unsigned long tmp;
249 /* disable interrupts locally */
250 __save_flags (ctxt->flags); __cli ();
252 /* save value of CR4 and clear Page Global Enable (bit 7) */
253 asm volatile ("movl %%cr4, %0\n\t"
254 "movl %0, %1\n\t"
255 "andb $0x7f, %b1\n\t"
256 "movl %1, %%cr4\n\t"
257 : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory");
259 /* disable and flush caches. Note that wbinvd flushes the TLBs as
260 a side-effect. */
261 asm volatile ("movl %%cr0, %0\n\t"
262 "orl $0x40000000, %0\n\t"
263 "wbinvd\n\t"
264 "movl %0, %%cr0\n\t"
265 "wbinvd\n\t"
266 : "=r" (tmp) : : "memory");
268 /* disable MTRRs, and set the default type to uncached. */
269 rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
270 wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
271 } /* End Function set_mtrr_prepare */
274 /* Restore the processor after a set_mtrr_prepare */
275 static void set_mtrr_done(struct set_mtrr_context *ctxt)
277 unsigned long tmp;
279 /* flush caches and TLBs */
280 asm volatile ("wbinvd" : : : "memory" );
282 /* restore MTRRdefType */
283 wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
285 /* enable caches */
286 asm volatile ("movl %%cr0, %0\n\t"
287 "andl $0xbfffffff, %0\n\t"
288 "movl %0, %%cr0\n\t"
289 : "=r" (tmp) : : "memory");
291 /* restore value of CR4 */
292 asm volatile ("movl %0, %%cr4"
293 : : "r" (ctxt->cr4val) : "memory");
295 /* re-enable interrupts locally (if enabled previously) */
296 __restore_flags (ctxt->flags);
297 } /* End Function set_mtrr_done */
300 /* this function returns the number of variable MTRRs */
301 static unsigned int get_num_var_ranges (void)
303 unsigned long config, dummy;
305 rdmsr(MTRRcap_MSR, config, dummy);
306 return (config & 0xff);
307 } /* End Function get_num_var_ranges */
310 /* non-zero if we have the write-combining memory type. */
311 static int have_wrcomb (void)
313 unsigned long config, dummy;
315 rdmsr(MTRRcap_MSR, config, dummy);
316 return (config & (1<<10));
320 static void get_mtrr (unsigned int reg, unsigned long *base,
321 unsigned long *size, mtrr_type *type)
323 unsigned long dummy, mask_lo, base_lo;
325 rdmsr(MTRRphysMask_MSR(reg), mask_lo, dummy);
326 if ((mask_lo & 0x800) == 0) {
327 /* Invalid (i.e. free) range. */
328 *base = 0;
329 *size = 0;
330 *type = 0;
331 return;
334 rdmsr(MTRRphysBase_MSR(reg), base_lo, dummy);
336 /* We ignore the extra address bits (32-35). If someone wants to
337 run x86 Linux on a machine with >4GB memory, this will be the
338 least of their problems. */
340 /* Clean up mask_lo so it gives the real address mask. */
341 mask_lo = (mask_lo & 0xfffff000UL);
343 /* This works correctly if size is a power of two, i.e. a
344 contiguous range. */
345 *size = ~(mask_lo - 1);
347 *base = (base_lo & 0xfffff000UL);
348 *type = (base_lo & 0xff);
349 } /* End Function get_mtrr */
352 static void set_mtrr_up (unsigned int reg, unsigned long base,
353 unsigned long size, mtrr_type type, int do_safe)
354 /* [SUMMARY] Set variable MTRR register on the local CPU.
355 <reg> The register to set.
356 <base> The base address of the region.
357 <size> The size of the region. If this is 0 the region is disabled.
358 <type> The type of the region.
359 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
360 be done externally.
363 struct set_mtrr_context ctxt;
365 if (do_safe) set_mtrr_prepare (&ctxt);
366 if (size == 0)
368 /* The invalid bit is kept in the mask, so we simply clear the
369 relevant mask register to disable a range. */
370 wrmsr (MTRRphysMask_MSR (reg), 0, 0);
372 else
374 wrmsr (MTRRphysBase_MSR (reg), base | type, 0);
375 wrmsr (MTRRphysMask_MSR (reg), ~(size - 1) | 0x800, 0);
377 if (do_safe) set_mtrr_done (&ctxt);
378 } /* End Function set_mtrr_up */
381 #ifdef __SMP__
383 struct mtrr_var_range
385 unsigned long base_lo;
386 unsigned long base_hi;
387 unsigned long mask_lo;
388 unsigned long mask_hi;
392 /* Get the MSR pair relating to a var range. */
393 __initfunc(static void get_mtrr_var_range (unsigned int index,
394 struct mtrr_var_range *vr))
396 rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
397 rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
398 } /* End Function get_mtrr_var_range */
401 /* Set the MSR pair relating to a var range. Returns TRUE if
402 changes are made. */
403 __initfunc(static int set_mtrr_var_range_testing (unsigned int index,
404 struct mtrr_var_range *vr))
406 unsigned int lo, hi;
407 int changed = FALSE;
409 rdmsr(MTRRphysBase_MSR(index), lo, hi);
411 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
412 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
413 wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
414 changed = TRUE;
417 rdmsr(MTRRphysMask_MSR(index), lo, hi);
419 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
420 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
421 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
422 changed = TRUE;
425 return changed;
429 __initfunc(static void get_fixed_ranges(mtrr_type *frs))
431 unsigned long *p = (unsigned long *)frs;
432 int i;
434 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
436 for (i = 0; i < 2; i++)
437 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
439 for (i = 0; i < 8; i++)
440 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
444 __initfunc(static int set_fixed_ranges_testing(mtrr_type *frs))
446 unsigned long *p = (unsigned long *)frs;
447 int changed = FALSE;
448 int i;
449 unsigned long lo, hi;
451 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
452 if (p[0] != lo || p[1] != hi) {
453 wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
454 changed = TRUE;
457 for (i = 0; i < 2; i++) {
458 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
459 if (p[2 + i*2] != lo || p[3 + i*2] != hi) {
460 wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
461 changed = TRUE;
465 for (i = 0; i < 8; i++) {
466 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
467 if (p[6 + i*2] != lo || p[7 + i*2] != hi) {
468 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
469 changed = TRUE;
473 return changed;
477 struct mtrr_state
479 unsigned int num_var_ranges;
480 struct mtrr_var_range *var_ranges;
481 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
482 unsigned char enabled;
483 mtrr_type def_type;
487 /* Grab all of the MTRR state for this CPU into *state. */
488 __initfunc(static void get_mtrr_state(struct mtrr_state *state))
490 unsigned int nvrs, i;
491 struct mtrr_var_range *vrs;
492 unsigned long lo, dummy;
494 nvrs = state->num_var_ranges = get_num_var_ranges();
495 vrs = state->var_ranges
496 = kmalloc(nvrs * sizeof(struct mtrr_var_range), GFP_KERNEL);
497 if (vrs == NULL)
498 nvrs = state->num_var_ranges = 0;
500 for (i = 0; i < nvrs; i++)
501 get_mtrr_var_range(i, &vrs[i]);
503 get_fixed_ranges(state->fixed_ranges);
505 rdmsr(MTRRdefType_MSR, lo, dummy);
506 state->def_type = (lo & 0xff);
507 state->enabled = (lo & 0xc00) >> 10;
508 } /* End Function get_mtrr_state */
511 /* Free resources associated with a struct mtrr_state */
512 __initfunc(static void finalize_mtrr_state(struct mtrr_state *state))
514 if (state->var_ranges) kfree (state->var_ranges);
515 } /* End Function finalize_mtrr_state */
518 __initfunc(static unsigned long set_mtrr_state (struct mtrr_state *state,
519 struct set_mtrr_context *ctxt))
520 /* [SUMMARY] Set the MTRR state for this CPU.
521 <state> The MTRR state information to read.
522 <ctxt> Some relevant CPU context.
523 [NOTE] The CPU must already be in a safe state for MTRR changes.
524 [RETURNS] 0 if no changes made, else a mask indication what was changed.
527 unsigned int i;
528 unsigned long change_mask = 0;
530 for (i = 0; i < state->num_var_ranges; i++)
531 if (set_mtrr_var_range_testing(i, &state->var_ranges[i]))
532 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
534 if (set_fixed_ranges_testing(state->fixed_ranges))
535 change_mask |= MTRR_CHANGE_MASK_FIXED;
537 /* set_mtrr_restore restores the old value of MTRRdefType,
538 so to set it we fiddle with the saved value. */
539 if ((ctxt->deftype_lo & 0xff) != state->def_type
540 || ((ctxt->deftype_lo & 0xc00) >> 10) != state->enabled)
542 ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
543 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
546 return change_mask;
547 } /* End Function set_mtrr_state */
550 static atomic_t undone_count;
551 static void (*handler_func) (struct set_mtrr_context *ctxt, void *info);
552 static void *handler_info;
553 static volatile int wait_barrier_execute = FALSE;
554 static volatile int wait_barrier_cache_enable = FALSE;
556 static void sync_handler (void)
557 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
558 [RETURNS] Nothing.
561 struct set_mtrr_context ctxt;
563 set_mtrr_prepare (&ctxt);
564 /* Notify master CPU that I'm at the barrier and then wait */
565 atomic_dec (&undone_count);
566 while (wait_barrier_execute) barrier ();
567 /* The master has cleared me to execute */
568 (*handler_func) (&ctxt, handler_info);
569 /* Notify master CPU that I've executed the function */
570 atomic_dec (&undone_count);
571 /* Wait for master to clear me to enable cache and return */
572 while (wait_barrier_cache_enable) barrier ();
573 set_mtrr_done (&ctxt);
574 } /* End Function sync_handler */
576 static void do_all_cpus (void (*handler) (struct set_mtrr_context *ctxt,
577 void *info),
578 void *info, int local)
579 /* [SUMMARY] Execute a function on all CPUs, with caches flushed and disabled.
580 [PURPOSE] This function will synchronise all CPUs, flush and disable caches
581 on all CPUs, then call a specified function. When the specified function
582 finishes on all CPUs, caches are enabled on all CPUs.
583 <handler> The function to execute.
584 <info> An arbitrary information pointer which is passed to <<handler>>.
585 <local> If TRUE <<handler>> is executed locally.
586 [RETURNS] Nothing.
589 unsigned long timeout;
590 struct set_mtrr_context ctxt;
592 mtrr_hook = sync_handler;
593 handler_func = handler;
594 handler_info = info;
595 wait_barrier_execute = TRUE;
596 wait_barrier_cache_enable = TRUE;
597 /* Send a message to all other CPUs and wait for them to enter the
598 barrier */
599 atomic_set (&undone_count, smp_num_cpus - 1);
600 smp_message_pass (MSG_ALL_BUT_SELF, MSG_MTRR_CHANGE, 0, 0);
601 /* Wait for it to be done */
602 timeout = jiffies + JIFFIE_TIMEOUT;
603 while ( (atomic_read (&undone_count) > 0) && (jiffies < timeout) )
604 barrier ();
605 if (atomic_read (&undone_count) > 0)
607 panic ("mtrr: timed out waiting for other CPUs\n");
609 mtrr_hook = NULL;
610 /* All other CPUs should be waiting for the barrier, with their caches
611 already flushed and disabled. Prepare for function completion
612 notification */
613 atomic_set (&undone_count, smp_num_cpus - 1);
614 /* Flush and disable the local CPU's cache and release the barier, which
615 should cause the other CPUs to execute the function. Also execute it
616 locally if required */
617 set_mtrr_prepare (&ctxt);
618 wait_barrier_execute = FALSE;
619 if (local) (*handler) (&ctxt, info);
620 /* Now wait for other CPUs to complete the function */
621 while (atomic_read (&undone_count) > 0) barrier ();
622 /* Now all CPUs should have finished the function. Release the barrier to
623 allow them to re-enable their caches and return from their interrupt,
624 then enable the local cache and return */
625 wait_barrier_cache_enable = FALSE;
626 set_mtrr_done (&ctxt);
627 handler_func = NULL;
628 handler_info = NULL;
629 } /* End Function do_all_cpus */
632 struct set_mtrr_data
634 unsigned long smp_base;
635 unsigned long smp_size;
636 unsigned int smp_reg;
637 mtrr_type smp_type;
640 static void set_mtrr_handler (struct set_mtrr_context *ctxt, void *info)
642 struct set_mtrr_data *data = info;
644 set_mtrr_up (data->smp_reg, data->smp_base, data->smp_size, data->smp_type,
645 FALSE);
646 } /* End Function set_mtrr_handler */
648 static void set_mtrr_smp (unsigned int reg, unsigned long base,
649 unsigned long size, mtrr_type type)
651 struct set_mtrr_data data;
653 data.smp_reg = reg;
654 data.smp_base = base;
655 data.smp_size = size;
656 data.smp_type = type;
657 do_all_cpus (set_mtrr_handler, &data, TRUE);
658 } /* End Function set_mtrr_smp */
661 /* A warning that is common to the module and non-module cases. */
662 /* Some BIOS's are fucked and don't set all MTRRs the same! */
663 #ifdef MODULE
664 static void mtrr_state_warn (unsigned long mask)
665 #else
666 __initfunc(static void mtrr_state_warn (unsigned long mask))
667 #endif
669 if (!mask) return;
670 if (mask & MTRR_CHANGE_MASK_FIXED)
671 printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
672 if (mask & MTRR_CHANGE_MASK_VARIABLE)
673 printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
674 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
675 printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
676 printk ("mtrr: probably your BIOS does not setup all CPUs\n");
677 } /* End Function mtrr_state_warn */
679 #ifdef MODULE
680 /* As a module, copy the MTRR state using an IPI handler. */
682 static volatile unsigned long smp_changes_mask = 0;
684 static void copy_mtrr_state_handler (struct set_mtrr_context *ctxt, void *info)
686 unsigned long mask, count;
687 struct mtrr_state *smp_mtrr_state = info;
689 mask = set_mtrr_state (smp_mtrr_state, ctxt);
690 /* Use the atomic bitops to update the global mask */
691 for (count = 0; count < sizeof mask * 8; ++count)
693 if (mask & 0x01) set_bit (count, &smp_changes_mask);
694 mask >>= 1;
696 } /* End Function copy_mtrr_state_handler */
698 /* Copies the entire MTRR state of this CPU to all the others. */
699 static void copy_mtrr_state (void)
701 struct mtrr_state ms;
703 get_mtrr_state (&ms);
704 do_all_cpus (copy_mtrr_state_handler, &ms, FALSE);
705 finalize_mtrr_state (&ms);
706 mtrr_state_warn (smp_changes_mask);
707 } /* End Function copy_mtrr_state */
709 #endif /* MODULE */
710 #endif /* __SMP__ */
712 static char *attrib_to_str (int x)
714 return (x <= 6) ? mtrr_strings[x] : "?";
715 } /* End Function attrib_to_str */
717 static void init_table (void)
719 int i, max;
721 max = get_num_var_ranges ();
722 if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
723 == NULL )
725 printk ("mtrr: could not allocate\n");
726 return;
728 for (i = 0; i < max; i++) usage_table[i] = 1;
729 #ifdef CONFIG_PROC_FS
730 if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
732 printk ("mtrr: could not allocate\n");
733 return;
735 ascii_buf_bytes = 0;
736 compute_ascii ();
737 #endif
738 } /* End Function init_table */
740 int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
741 char increment)
742 /* [SUMMARY] Add an MTRR entry.
743 <base> The starting (base) address of the region.
744 <size> The size (in bytes) of the region.
745 <type> The type of the new region.
746 <increment> If true and the region already exists, the usage count will be
747 incremented.
748 [RETURNS] The MTRR register on success, else a negative number indicating
749 the error code.
750 [NOTE] This routine uses a spinlock.
753 int i, max;
754 mtrr_type ltype;
755 unsigned long lbase, lsize, last;
757 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
758 if ( (base & 0xfff) || (size & 0xfff) )
760 printk ("mtrr: size and base must be multiples of 4kB\n");
761 printk ("mtrr: size: %lx base: %lx\n", size, base);
762 return -EINVAL;
764 if (base + size < 0x100000)
766 printk ("mtrr: cannot set region below 1 MByte (0x%lx,0x%lx)\n",
767 base, size);
768 return -EINVAL;
770 /* Check upper bits of base and last are equal and lower bits are 0 for
771 base and 1 for last */
772 last = base + size - 1;
773 for (lbase = base; !(lbase & 1) && (last & 1);
774 lbase = lbase >> 1, last = last >> 1);
775 if (lbase != last)
777 printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n",
778 base, size);
779 return -EINVAL;
781 if (type >= MTRR_NUM_TYPES)
783 printk ("mtrr: type: %u illegal\n", type);
784 return -EINVAL;
786 /* If the type is WC, check that this processor supports it */
787 if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
789 printk ("mtrr: your processor doesn't support write-combining\n");
790 return -ENOSYS;
792 increment = increment ? 1 : 0;
793 max = get_num_var_ranges ();
794 /* Search for existing MTRR */
795 spin_lock (&main_lock);
796 for (i = 0; i < max; ++i)
798 get_mtrr (i, &lbase, &lsize, &ltype);
799 if (base >= lbase + lsize) continue;
800 if ( (base < lbase) && (base + size <= lbase) ) continue;
801 /* At this point we know there is some kind of overlap/enclosure */
802 if ( (base < lbase) || (base + size > lbase + lsize) )
804 spin_unlock (&main_lock);
805 printk ("mtrr: 0x%lx,0x%lx overlaps existing 0x%lx,0x%lx\n",
806 base, size, lbase, lsize);
807 return -EINVAL;
809 /* New region is enclosed by an existing region */
810 if (ltype != type)
812 if (type == MTRR_TYPE_UNCACHABLE) continue;
813 spin_unlock (&main_lock);
814 printk ( "mtrr: type mismatch for %lx,%lx old: %s new: %s\n",
815 base, size, attrib_to_str (ltype), attrib_to_str (type) );
816 return -EINVAL;
818 if (increment) ++usage_table[i];
819 compute_ascii ();
820 spin_unlock (&main_lock);
821 return i;
823 /* Search for an empty MTRR */
824 for (i = 0; i < max; ++i)
826 get_mtrr (i, &lbase, &lsize, &ltype);
827 if (lsize > 0) continue;
828 set_mtrr (i, base, size, type);
829 usage_table[i] = 1;
830 compute_ascii ();
831 spin_unlock (&main_lock);
832 return i;
834 spin_unlock (&main_lock);
835 printk ("mtrr: no more MTRRs available\n");
836 return -ENOSPC;
837 } /* End Function mtrr_add */
839 int mtrr_del (int reg, unsigned long base, unsigned long size)
840 /* [SUMMARY] Delete MTRR/decrement usage count.
841 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
842 be supplied.
843 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
844 <size> The size of the region. This is ignored if <<reg>> is >= 0.
845 [RETURNS] The register on success, else a negative number indicating
846 the error code.
847 [NOTE] This routine uses a spinlock.
850 int i, max;
851 mtrr_type ltype;
852 unsigned long lbase, lsize;
854 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
855 max = get_num_var_ranges ();
856 spin_lock (&main_lock);
857 if (reg < 0)
859 /* Search for existing MTRR */
860 for (i = 0; i < max; ++i)
862 get_mtrr (i, &lbase, &lsize, &ltype);
863 if ( (lbase == base) && (lsize == size) )
865 reg = i;
866 break;
869 if (reg < 0)
871 spin_unlock (&main_lock);
872 printk ("mtrr: no MTRR for %lx,%lx found\n", base, size);
873 return -EINVAL;
876 if (reg >= max)
878 spin_unlock (&main_lock);
879 printk ("mtrr: register: %d too big\n", reg);
880 return -EINVAL;
882 get_mtrr (reg, &lbase, &lsize, &ltype);
883 if (lsize < 1)
885 spin_unlock (&main_lock);
886 printk ("mtrr: MTRR %d not used\n", reg);
887 return -EINVAL;
889 if (usage_table[reg] < 1)
891 spin_unlock (&main_lock);
892 printk ("mtrr: reg: %d has count=0\n", reg);
893 return -EINVAL;
895 if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
896 compute_ascii ();
897 spin_unlock (&main_lock);
898 return reg;
899 } /* End Function mtrr_del */
901 #ifdef CONFIG_PROC_FS
903 static int mtrr_file_add (unsigned long base, unsigned long size,
904 unsigned int type, char increment, struct file *file)
906 int reg, max;
907 unsigned int *fcount = file->private_data;
909 max = get_num_var_ranges ();
910 if (fcount == NULL)
912 if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
914 printk ("mtrr: could not allocate\n");
915 return -ENOMEM;
917 memset (fcount, 0, max * sizeof *fcount);
918 file->private_data = fcount;
920 reg = mtrr_add (base, size, type, 1);
921 if (reg >= 0) ++fcount[reg];
922 return reg;
923 } /* End Function mtrr_file_add */
925 static int mtrr_file_del (unsigned long base, unsigned long size,
926 struct file *file)
928 int reg;
929 unsigned int *fcount = file->private_data;
931 reg = mtrr_del (-1, base, size);
932 if (reg < 0) return reg;
933 if (fcount != NULL) --fcount[reg];
934 return reg;
935 } /* End Function mtrr_file_del */
937 static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
938 loff_t *ppos)
940 if (*ppos >= ascii_buf_bytes) return 0;
941 if (*ppos + len > ascii_buf_bytes) len = ascii_buf_bytes - *ppos;
942 if ( copy_to_user (buf, ascii_buffer + *ppos, len) ) return -EFAULT;
943 *ppos += len;
944 return len;
945 } /* End Function mtrr_read */
947 static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
948 loff_t *ppos)
949 /* Format of control line:
950 "base=%lx size=%lx type=%s" OR:
951 "disable=%d"
954 int i, err;
955 unsigned long reg, base, size;
956 char *ptr;
957 char line[LINE_SIZE];
959 if ( !suser () ) return -EPERM;
960 /* Can't seek (pwrite) on this device */
961 if (ppos != &file->f_pos) return -ESPIPE;
962 memset (line, 0, LINE_SIZE);
963 if (len > LINE_SIZE) len = LINE_SIZE;
964 if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
965 ptr = line + strlen (line) - 1;
966 if (*ptr == '\n') *ptr = '\0';
967 if ( !strncmp (line, "disable=", 8) )
969 reg = simple_strtoul (line + 8, &ptr, 0);
970 err = mtrr_del (reg, 0, 0);
971 if (err < 0) return err;
972 return len;
974 if ( strncmp (line, "base=", 5) )
976 printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
977 return -EINVAL;
979 base = simple_strtoul (line + 5, &ptr, 0);
980 for (; isspace (*ptr); ++ptr);
981 if ( strncmp (ptr, "size=", 5) )
983 printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
984 return -EINVAL;
986 size = simple_strtoul (ptr + 5, &ptr, 0);
987 for (; isspace (*ptr); ++ptr);
988 if ( strncmp (ptr, "type=", 5) )
990 printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
991 return -EINVAL;
993 ptr += 5;
994 for (; isspace (*ptr); ++ptr);
995 for (i = 0; i < MTRR_NUM_TYPES; ++i)
997 if ( strcmp (ptr, mtrr_strings[i]) ) continue;
998 err = mtrr_add (base, size, i, 1);
999 if (err < 0) return err;
1000 return len;
1002 printk ("mtrr: illegal type: \"%s\"\n", ptr);
1003 return -EINVAL;
1004 } /* End Function mtrr_write */
1006 static int mtrr_ioctl (struct inode *inode, struct file *file,
1007 unsigned int cmd, unsigned long arg)
1009 int err;
1010 mtrr_type type;
1011 struct mtrr_sentry sentry;
1012 struct mtrr_gentry gentry;
1014 switch (cmd)
1016 default:
1017 return -ENOIOCTLCMD;
1018 case MTRRIOC_ADD_ENTRY:
1019 if ( !suser () ) return -EPERM;
1020 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1021 return -EFAULT;
1022 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file);
1023 if (err < 0) return err;
1024 break;
1025 case MTRRIOC_SET_ENTRY:
1026 if ( !suser () ) return -EPERM;
1027 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1028 return -EFAULT;
1029 err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1030 if (err < 0) return err;
1031 break;
1032 case MTRRIOC_DEL_ENTRY:
1033 if ( !suser () ) return -EPERM;
1034 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1035 return -EFAULT;
1036 err = mtrr_file_del (sentry.base, sentry.size, file);
1037 if (err < 0) return err;
1038 break;
1039 case MTRRIOC_GET_ENTRY:
1040 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1041 return -EFAULT;
1042 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1043 get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type);
1044 gentry.type = type;
1045 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1046 return -EFAULT;
1047 break;
1049 return 0;
1050 } /* End Function mtrr_ioctl */
1052 static int mtrr_open (struct inode *ino, struct file *filep)
1054 MOD_INC_USE_COUNT;
1055 return 0;
1056 } /* End Function mtrr_open */
1058 static int mtrr_close (struct inode *ino, struct file *file)
1060 int i, max;
1061 unsigned int *fcount = file->private_data;
1063 MOD_DEC_USE_COUNT;
1064 if (fcount == NULL) return 0;
1065 max = get_num_var_ranges ();
1066 for (i = 0; i < max; ++i)
1068 while (fcount[i] > 0)
1070 if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1071 --fcount[i];
1074 kfree (fcount);
1075 file->private_data = NULL;
1076 return 0;
1077 } /* End Function mtrr_close */
1079 static struct file_operations mtrr_fops =
1081 NULL, /* Seek */
1082 mtrr_read, /* Read */
1083 mtrr_write, /* Write */
1084 NULL, /* Readdir */
1085 NULL, /* Poll */
1086 mtrr_ioctl, /* IOctl */
1087 NULL, /* MMAP */
1088 mtrr_open, /* Open */
1089 NULL, /* Flush */
1090 mtrr_close, /* Release */
1091 NULL, /* Fsync */
1092 NULL, /* Fasync */
1093 NULL, /* CheckMediaChange */
1094 NULL, /* Revalidate */
1095 NULL, /* Lock */
1098 static struct inode_operations proc_mtrr_inode_operations = {
1099 &mtrr_fops, /* default property file-ops */
1100 NULL, /* create */
1101 NULL, /* lookup */
1102 NULL, /* link */
1103 NULL, /* unlink */
1104 NULL, /* symlink */
1105 NULL, /* mkdir */
1106 NULL, /* rmdir */
1107 NULL, /* mknod */
1108 NULL, /* rename */
1109 NULL, /* readlink */
1110 NULL, /* follow_link */
1111 NULL, /* readpage */
1112 NULL, /* writepage */
1113 NULL, /* bmap */
1114 NULL, /* truncate */
1115 NULL /* permission */
1118 static struct proc_dir_entry proc_root_mtrr = {
1119 PROC_MTRR, 4, "mtrr",
1120 S_IFREG | S_IWUSR | S_IRUGO, 1, 0, 0,
1121 0, &proc_mtrr_inode_operations
1124 static void compute_ascii (void)
1126 char factor;
1127 int i, max;
1128 mtrr_type type;
1129 unsigned long base, size;
1131 ascii_buf_bytes = 0;
1132 max = get_num_var_ranges ();
1133 for (i = 0; i < max; i++)
1135 get_mtrr (i, &base, &size, &type);
1136 if (size < 1) usage_table[i] = 0;
1137 else
1139 if (size < 0x100000)
1141 /* 1MB */
1142 factor = 'k';
1143 size >>= 10;
1145 else
1147 factor = 'M';
1148 size >>= 20;
1150 sprintf
1151 (ascii_buffer + ascii_buf_bytes,
1152 "reg%02i: base=0x%08lx (%4liMB), size=%4li%cB: %s, count=%d\n",
1153 i, base, base>>20, size, factor,
1154 attrib_to_str (type), usage_table[i]);
1155 ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1158 proc_root_mtrr.size = ascii_buf_bytes;
1159 } /* End Function compute_ascii */
1161 #endif /* CONFIG_PROC_FS */
1163 EXPORT_SYMBOL(mtrr_add);
1164 EXPORT_SYMBOL(mtrr_del);
1166 #if defined(__SMP__) && !defined(MODULE)
1168 static volatile unsigned long smp_changes_mask __initdata = 0;
1169 static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
1171 __initfunc(void mtrr_init_boot_cpu (void))
1173 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1174 printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
1176 get_mtrr_state (&smp_mtrr_state);
1177 } /* End Function mtrr_init_boot_cpu */
1179 __initfunc(void mtrr_init_secondary_cpu (void))
1181 unsigned long mask, count;
1182 struct set_mtrr_context ctxt;
1184 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1185 /* Note that this is not ideal, since the cache is only flushed/disabled
1186 for this CPU while the MTRRs are changed, but changing this requires
1187 more invasive changes to the way the kernel boots */
1188 set_mtrr_prepare (&ctxt);
1189 mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
1190 set_mtrr_done (&ctxt);
1191 /* Use the atomic bitops to update the global mask */
1192 for (count = 0; count < sizeof mask * 8; ++count)
1194 if (mask & 0x01) set_bit (count, &smp_changes_mask);
1195 mask >>= 1;
1197 } /* End Function mtrr_init_secondary_cpu */
1199 #endif
1201 #ifdef MODULE
1202 int init_module (void)
1203 #else
1204 __initfunc(int mtrr_init(void))
1205 #endif
1207 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return 0;
1208 # if !defined(__SMP__) || defined(MODULE)
1209 printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
1210 # endif
1212 # ifdef __SMP__
1213 # ifdef MODULE
1214 copy_mtrr_state ();
1215 # else /* MODULE */
1216 finalize_mtrr_state (&smp_mtrr_state);
1217 mtrr_state_warn (smp_changes_mask);
1218 # endif /* MODULE */
1219 # endif /* __SMP__ */
1221 # ifdef CONFIG_PROC_FS
1222 proc_register (&proc_root, &proc_root_mtrr);
1223 # endif
1225 init_table ();
1226 return 0;
1229 #ifdef MODULE
1230 void cleanup_module (void)
1232 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1233 # ifdef CONFIG_PROC_FS
1234 proc_unregister (&proc_root, PROC_MTRR);
1235 # endif
1236 # ifdef __SMP__
1237 mtrr_hook = NULL;
1238 # endif
1240 #endif