Import 2.3.48
[davej-history.git] / arch / i386 / kernel / mtrr.c
blobcc9c7eafe684a4dbea49c3f9783c6627825609d3
1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23 Source: "Pentium Pro Family Developer's Manual, Volume 3:
24 Operating System Writer's Guide" (Intel document number 242692),
25 section 11.11.7
27 ChangeLog
29 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
30 Initial register-setting code (from proform-1.0).
31 19971216 Richard Gooch <rgooch@atnf.csiro.au>
32 Original version for /proc/mtrr interface, SMP-safe.
33 v1.0
34 19971217 Richard Gooch <rgooch@atnf.csiro.au>
35 Bug fix for ioctls()'s.
36 Added sample code in Documentation/mtrr.txt
37 v1.1
38 19971218 Richard Gooch <rgooch@atnf.csiro.au>
39 Disallow overlapping regions.
40 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
41 Register-setting fixups.
42 v1.2
43 19971222 Richard Gooch <rgooch@atnf.csiro.au>
44 Fixups for kernel 2.1.75.
45 v1.3
46 19971229 David Wragg <dpw@doc.ic.ac.uk>
47 Register-setting fixups and conformity with Intel conventions.
48 19971229 Richard Gooch <rgooch@atnf.csiro.au>
49 Cosmetic changes and wrote this ChangeLog ;-)
50 19980106 Richard Gooch <rgooch@atnf.csiro.au>
51 Fixups for kernel 2.1.78.
52 v1.4
53 19980119 David Wragg <dpw@doc.ic.ac.uk>
54 Included passive-release enable code (elsewhere in PCI setup).
55 v1.5
56 19980131 Richard Gooch <rgooch@atnf.csiro.au>
57 Replaced global kernel lock with private spinlock.
58 v1.6
59 19980201 Richard Gooch <rgooch@atnf.csiro.au>
60 Added wait for other CPUs to complete changes.
61 v1.7
62 19980202 Richard Gooch <rgooch@atnf.csiro.au>
63 Bug fix in definition of <set_mtrr> for UP.
64 v1.8
65 19980319 Richard Gooch <rgooch@atnf.csiro.au>
66 Fixups for kernel 2.1.90.
67 19980323 Richard Gooch <rgooch@atnf.csiro.au>
68 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
69 v1.9
70 19980325 Richard Gooch <rgooch@atnf.csiro.au>
71 Fixed test for overlapping regions: confused by adjacent regions
72 19980326 Richard Gooch <rgooch@atnf.csiro.au>
73 Added wbinvd in <set_mtrr_prepare>.
74 19980401 Richard Gooch <rgooch@atnf.csiro.au>
75 Bug fix for non-SMP compilation.
76 19980418 David Wragg <dpw@doc.ic.ac.uk>
77 Fixed-MTRR synchronisation for SMP and use atomic operations
78 instead of spinlocks.
79 19980418 Richard Gooch <rgooch@atnf.csiro.au>
80 Differentiate different MTRR register classes for BIOS fixup.
81 v1.10
82 19980419 David Wragg <dpw@doc.ic.ac.uk>
83 Bug fix in variable MTRR synchronisation.
84 v1.11
85 19980419 Richard Gooch <rgooch@atnf.csiro.au>
86 Fixups for kernel 2.1.97.
87 v1.12
88 19980421 Richard Gooch <rgooch@atnf.csiro.au>
89 Safer synchronisation across CPUs when changing MTRRs.
90 v1.13
91 19980423 Richard Gooch <rgooch@atnf.csiro.au>
92 Bugfix for SMP systems without MTRR support.
93 v1.14
94 19980427 Richard Gooch <rgooch@atnf.csiro.au>
95 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
96 v1.15
97 19980427 Richard Gooch <rgooch@atnf.csiro.au>
98 Use atomic bitops for setting SMP change mask.
99 v1.16
100 19980428 Richard Gooch <rgooch@atnf.csiro.au>
101 Removed spurious diagnostic message.
102 v1.17
103 19980429 Richard Gooch <rgooch@atnf.csiro.au>
104 Moved register-setting macros into this file.
105 Moved setup code from init/main.c to i386-specific areas.
106 v1.18
107 19980502 Richard Gooch <rgooch@atnf.csiro.au>
108 Moved MTRR detection outside conditionals in <mtrr_init>.
109 v1.19
110 19980502 Richard Gooch <rgooch@atnf.csiro.au>
111 Documentation improvement: mention Pentium II and AGP.
112 v1.20
113 19980521 Richard Gooch <rgooch@atnf.csiro.au>
114 Only manipulate interrupt enable flag on local CPU.
115 Allow enclosed uncachable regions.
116 v1.21
117 19980611 Richard Gooch <rgooch@atnf.csiro.au>
118 Always define <main_lock>.
119 v1.22
120 19980901 Richard Gooch <rgooch@atnf.csiro.au>
121 Removed module support in order to tidy up code.
122 Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
123 Created addition queue for prior to SMP commence.
124 v1.23
125 19980902 Richard Gooch <rgooch@atnf.csiro.au>
126 Ported patch to kernel 2.1.120-pre3.
127 v1.24
128 19980910 Richard Gooch <rgooch@atnf.csiro.au>
129 Removed sanity checks and addition queue: Linus prefers an OOPS.
130 v1.25
131 19981001 Richard Gooch <rgooch@atnf.csiro.au>
132 Fixed harmless compiler warning in include/asm-i386/mtrr.h
133 Fixed version numbering and history for v1.23 -> v1.24.
134 v1.26
135 19990118 Richard Gooch <rgooch@atnf.csiro.au>
136 Added devfs support.
137 v1.27
138 19990123 Richard Gooch <rgooch@atnf.csiro.au>
139 Changed locking to spin with reschedule.
140 Made use of new <smp_call_function>.
141 v1.28
142 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu>
143 Extended the driver to be able to use Cyrix style ARRs.
144 19990204 Richard Gooch <rgooch@atnf.csiro.au>
145 Restructured Cyrix support.
146 v1.29
147 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu>
148 Refined ARR support: enable MAPEN in set_mtrr_prepare()
149 and disable MAPEN in set_mtrr_done().
150 19990205 Richard Gooch <rgooch@atnf.csiro.au>
151 Minor cleanups.
152 v1.30
153 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu>
154 Protect plain 6x86s (and other processors without the
155 Page Global Enable feature) against accessing CR4 in
156 set_mtrr_prepare() and set_mtrr_done().
157 19990210 Richard Gooch <rgooch@atnf.csiro.au>
158 Turned <set_mtrr_up> and <get_mtrr> into function pointers.
159 v1.31
160 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu>
161 Major rewrite of cyrix_arr_init(): do not touch ARRs,
162 leave them as the BIOS have set them up.
163 Enable usage of all 8 ARRs.
164 Avoid multiplications by 3 everywhere and other
165 code clean ups/speed ups.
166 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu>
167 Set up other Cyrix processors identical to the boot cpu.
168 Since Cyrix don't support Intel APIC, this is l'art pour l'art.
169 Weigh ARRs by size:
170 If size <= 32M is given, set up ARR# we were given.
171 If size > 32M is given, set up ARR7 only if it is free,
172 fail otherwise.
173 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu>
174 Also check for size >= 256K if we are to set up ARR7,
175 mtrr_add() returns the value it gets from set_mtrr()
176 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu>
177 Remove Cyrix "coma bug" workaround from here.
178 Moved to linux/arch/i386/kernel/setup.c and
179 linux/include/asm-i386/bugs.h
180 19990228 Richard Gooch <rgooch@atnf.csiro.au>
181 Added MTRRIOC_KILL_ENTRY ioctl(2)
182 Trap for counter underflow in <mtrr_file_del>.
183 Trap for 4 MiB aligned regions for PPro, stepping <= 7.
184 19990301 Richard Gooch <rgooch@atnf.csiro.au>
185 Created <get_free_region> hook.
186 19990305 Richard Gooch <rgooch@atnf.csiro.au>
187 Temporarily disable AMD support now MTRR capability flag is set.
188 v1.32
189 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu>
190 Adjust my changes (19990212-19990218) to Richard Gooch's
191 latest changes. (19990228-19990305)
192 v1.33
193 19990309 Richard Gooch <rgooch@atnf.csiro.au>
194 Fixed typo in <printk> message.
195 19990310 Richard Gooch <rgooch@atnf.csiro.au>
196 Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
197 v1.34
198 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
199 Support Centaur C6 MCR's.
200 19990512 Richard Gooch <rgooch@atnf.csiro.au>
201 Minor cleanups.
202 v1.35
203 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu>
204 Check whether ARR3 is protected in cyrix_get_free_region()
205 and mtrr_del(). The code won't attempt to delete or change it
206 from now on if the BIOS protected ARR3. It silently skips ARR3
207 in cyrix_get_free_region() or returns with an error code from
208 mtrr_del().
209 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu>
210 Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
211 if ARR3 isn't protected. This is needed because if SMM is active
212 and ARR3 isn't protected then deleting and setting ARR3 again
213 may lock up the processor. With SMM entirely disabled, it does
214 not happen.
215 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu>
216 Rearrange switch() statements so the driver accomodates to
217 the fact that the AMD Athlon handles its MTRRs the same way
218 as Intel does.
219 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu>
220 Double check for Intel in mtrr_add()'s big switch() because
221 that revision check is only valid for Intel CPUs.
222 19990819 Alan Cox <alan@redhat.com>
223 Tested Zoltan's changes on a pre production Athlon - 100%
224 success.
225 19991008 Manfred Spraul <manfreds@colorfullife.com>
226 replaced spin_lock_reschedule() with a normal semaphore.
227 v1.36
228 20000221 Richard Gooch <rgooch@atnf.csiro.au>
229 Compile fix if procfs and devfs not enabled.
230 Formatting changes.
232 #include <linux/types.h>
233 #include <linux/errno.h>
234 #include <linux/sched.h>
235 #include <linux/tty.h>
236 #include <linux/timer.h>
237 #include <linux/config.h>
238 #include <linux/kernel.h>
239 #include <linux/wait.h>
240 #include <linux/string.h>
241 #include <linux/malloc.h>
242 #include <linux/ioport.h>
243 #include <linux/delay.h>
244 #include <linux/fs.h>
245 #include <linux/ctype.h>
246 #include <linux/proc_fs.h>
247 #include <linux/devfs_fs_kernel.h>
248 #include <linux/mm.h>
249 #include <linux/module.h>
250 #define MTRR_NEED_STRINGS
251 #include <asm/mtrr.h>
252 #include <linux/init.h>
253 #include <linux/smp.h>
255 #include <asm/uaccess.h>
256 #include <asm/io.h>
257 #include <asm/processor.h>
258 #include <asm/system.h>
259 #include <asm/pgtable.h>
260 #include <asm/segment.h>
261 #include <asm/bitops.h>
262 #include <asm/atomic.h>
263 #include <asm/msr.h>
265 #include <asm/hardirq.h>
266 #include <linux/irq.h>
268 #define MTRR_VERSION "1.36 (20000221)"
270 #define TRUE 1
271 #define FALSE 0
273 #define MTRRcap_MSR 0x0fe
274 #define MTRRdefType_MSR 0x2ff
276 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
277 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
279 #define NUM_FIXED_RANGES 88
280 #define MTRRfix64K_00000_MSR 0x250
281 #define MTRRfix16K_80000_MSR 0x258
282 #define MTRRfix16K_A0000_MSR 0x259
283 #define MTRRfix4K_C0000_MSR 0x268
284 #define MTRRfix4K_C8000_MSR 0x269
285 #define MTRRfix4K_D0000_MSR 0x26a
286 #define MTRRfix4K_D8000_MSR 0x26b
287 #define MTRRfix4K_E0000_MSR 0x26c
288 #define MTRRfix4K_E8000_MSR 0x26d
289 #define MTRRfix4K_F0000_MSR 0x26e
290 #define MTRRfix4K_F8000_MSR 0x26f
292 #ifdef __SMP__
293 # define MTRR_CHANGE_MASK_FIXED 0x01
294 # define MTRR_CHANGE_MASK_VARIABLE 0x02
295 # define MTRR_CHANGE_MASK_DEFTYPE 0x04
296 #endif
298 /* In the Intel processor's MTRR interface, the MTRR type is always held in
299 an 8 bit field: */
300 typedef u8 mtrr_type;
302 #define LINE_SIZE 80
303 #define JIFFIE_TIMEOUT 100
305 #ifdef __SMP__
306 # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
307 #else
308 # define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
309 TRUE)
310 #endif
312 #if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS)
313 # define USERSPACE_INTERFACE
314 #endif
316 #ifndef USERSPACE_INTERFACE
317 # define compute_ascii() while (0)
318 #endif
320 #ifdef USERSPACE_INTERFACE
321 static char *ascii_buffer = NULL;
322 static unsigned int ascii_buf_bytes = 0;
323 #endif
324 static unsigned int *usage_table = NULL;
325 static DECLARE_MUTEX(main_lock);
327 /* Private functions */
328 #ifdef USERSPACE_INTERFACE
329 static void compute_ascii (void);
330 #endif
333 struct set_mtrr_context
335 unsigned long flags;
336 unsigned long deftype_lo;
337 unsigned long deftype_hi;
338 unsigned long cr4val;
339 unsigned long ccr3;
342 static int arr3_protected;
344 /* Put the processor into a state where MTRRs can be safely set */
345 static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
347 unsigned long tmp;
349 /* Disable interrupts locally */
350 __save_flags (ctxt->flags); __cli ();
352 switch (boot_cpu_data.x86_vendor)
354 case X86_VENDOR_AMD:
355 if (boot_cpu_data.x86 >= 6) break; /* Athlon and post-Athlon CPUs */
356 /* else fall through */
357 case X86_VENDOR_CENTAUR:
358 return;
359 /*break;*/
361 /* Save value of CR4 and clear Page Global Enable (bit 7) */
362 if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
363 asm volatile ("movl %%cr4, %0\n\t"
364 "movl %0, %1\n\t"
365 "andb $0x7f, %b1\n\t"
366 "movl %1, %%cr4\n\t"
367 : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory");
369 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
370 a side-effect */
371 asm volatile ("movl %%cr0, %0\n\t"
372 "orl $0x40000000, %0\n\t"
373 "wbinvd\n\t"
374 "movl %0, %%cr0\n\t"
375 "wbinvd\n\t"
376 : "=r" (tmp) : : "memory");
378 switch (boot_cpu_data.x86_vendor)
380 case X86_VENDOR_AMD:
381 case X86_VENDOR_INTEL:
382 /* Disable MTRRs, and set the default type to uncached */
383 rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
384 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
385 break;
386 case X86_VENDOR_CYRIX:
387 tmp = getCx86 (CX86_CCR3);
388 setCx86 (CX86_CCR3, (tmp & 0x0f) | 0x10);
389 ctxt->ccr3 = tmp;
390 break;
392 } /* End Function set_mtrr_prepare */
394 /* Restore the processor after a set_mtrr_prepare */
395 static void set_mtrr_done (struct set_mtrr_context *ctxt)
397 unsigned long tmp;
399 switch (boot_cpu_data.x86_vendor)
401 case X86_VENDOR_AMD:
402 if (boot_cpu_data.x86 >= 6) break; /* Athlon and post-Athlon CPUs */
403 /* else fall through */
404 case X86_VENDOR_CENTAUR:
405 __restore_flags (ctxt->flags);
406 return;
407 /*break;*/
409 /* Flush caches and TLBs */
410 asm volatile ("wbinvd" : : : "memory" );
412 /* Restore MTRRdefType */
413 switch (boot_cpu_data.x86_vendor)
415 case X86_VENDOR_AMD:
416 case X86_VENDOR_INTEL:
417 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
418 break;
419 case X86_VENDOR_CYRIX:
420 setCx86 (CX86_CCR3, ctxt->ccr3);
421 break;
424 /* Enable caches */
425 asm volatile ("movl %%cr0, %0\n\t"
426 "andl $0xbfffffff, %0\n\t"
427 "movl %0, %%cr0\n\t"
428 : "=r" (tmp) : : "memory");
430 /* Restore value of CR4 */
431 if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
432 asm volatile ("movl %0, %%cr4"
433 : : "r" (ctxt->cr4val) : "memory");
435 /* Re-enable interrupts locally (if enabled previously) */
436 __restore_flags (ctxt->flags);
437 } /* End Function set_mtrr_done */
439 /* This function returns the number of variable MTRRs */
440 static unsigned int get_num_var_ranges (void)
442 unsigned long config, dummy;
444 switch (boot_cpu_data.x86_vendor)
446 case X86_VENDOR_AMD:
447 if (boot_cpu_data.x86 < 6) return 2; /* pre-Athlon CPUs */
448 /* else fall through */
449 case X86_VENDOR_INTEL:
450 rdmsr (MTRRcap_MSR, config, dummy);
451 return (config & 0xff);
452 /*break;*/
453 case X86_VENDOR_CYRIX:
454 /* Cyrix have 8 ARRs */
455 case X86_VENDOR_CENTAUR:
456 /* and Centaur has 8 MCR's */
457 return 8;
458 /*break;*/
460 return 0;
461 } /* End Function get_num_var_ranges */
463 /* Returns non-zero if we have the write-combining memory type */
464 static int have_wrcomb (void)
466 unsigned long config, dummy;
468 switch (boot_cpu_data.x86_vendor)
470 case X86_VENDOR_AMD:
471 if (boot_cpu_data.x86 < 6) return 1; /* pre-Athlon CPUs */
472 /* else fall through */
473 case X86_VENDOR_INTEL:
474 rdmsr (MTRRcap_MSR, config, dummy);
475 return (config & (1<<10));
476 /*break;*/
477 case X86_VENDOR_CYRIX:
478 case X86_VENDOR_CENTAUR:
479 return 1;
480 /*break;*/
482 return 0;
483 } /* End Function have_wrcomb */
485 static void intel_get_mtrr (unsigned int reg, unsigned long *base,
486 unsigned long *size, mtrr_type *type)
488 unsigned long dummy, mask_lo, base_lo;
490 rdmsr (MTRRphysMask_MSR(reg), mask_lo, dummy);
491 if ( (mask_lo & 0x800) == 0 )
493 /* Invalid (i.e. free) range */
494 *base = 0;
495 *size = 0;
496 *type = 0;
497 return;
500 rdmsr(MTRRphysBase_MSR(reg), base_lo, dummy);
502 /* We ignore the extra address bits (32-35). If someone wants to
503 run x86 Linux on a machine with >4GB memory, this will be the
504 least of their problems. */
506 /* Clean up mask_lo so it gives the real address mask. */
507 mask_lo = (mask_lo & 0xfffff000UL);
508 /* This works correctly if size is a power of two, i.e. a
509 contiguous range. */
510 *size = ~(mask_lo - 1);
512 *base = (base_lo & 0xfffff000UL);
513 *type = (base_lo & 0xff);
514 } /* End Function intel_get_mtrr */
516 static void cyrix_get_arr (unsigned int reg, unsigned long *base,
517 unsigned long *size, mtrr_type *type)
519 unsigned long flags;
520 unsigned char arr, ccr3, rcr, shift;
522 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
524 /* Save flags and disable interrupts */
525 __save_flags (flags); __cli ();
527 ccr3 = getCx86 (CX86_CCR3);
528 setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
529 ((unsigned char *) base)[3] = getCx86 (arr);
530 ((unsigned char *) base)[2] = getCx86 (arr+1);
531 ((unsigned char *) base)[1] = getCx86 (arr+2);
532 rcr = getCx86(CX86_RCR_BASE + reg);
533 setCx86 (CX86_CCR3, ccr3); /* disable MAPEN */
535 /* Enable interrupts if it was enabled previously */
536 __restore_flags (flags);
537 shift = ((unsigned char *) base)[1] & 0x0f;
538 *base &= 0xfffff000UL;
540 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
541 * Note: shift==0xf means 4G, this is unsupported.
543 if (shift)
544 *size = (reg < 7 ? 0x800UL : 0x20000UL) << shift;
545 else
546 *size = 0;
548 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
549 if (reg < 7)
551 switch (rcr)
553 case 1: *type = MTRR_TYPE_UNCACHABLE; break;
554 case 8: *type = MTRR_TYPE_WRBACK; break;
555 case 9: *type = MTRR_TYPE_WRCOMB; break;
556 case 24:
557 default: *type = MTRR_TYPE_WRTHROUGH; break;
559 } else
561 switch (rcr)
563 case 0: *type = MTRR_TYPE_UNCACHABLE; break;
564 case 8: *type = MTRR_TYPE_WRCOMB; break;
565 case 9: *type = MTRR_TYPE_WRBACK; break;
566 case 25:
567 default: *type = MTRR_TYPE_WRTHROUGH; break;
570 } /* End Function cyrix_get_arr */
572 static void amd_get_mtrr (unsigned int reg, unsigned long *base,
573 unsigned long *size, mtrr_type *type)
575 unsigned long low, high;
577 rdmsr (0xC0000085, low, high);
578 /* Upper dword is region 1, lower is region 0 */
579 if (reg == 1) low = high;
580 /* The base masks off on the right alignment */
581 *base = low & 0xFFFE0000;
582 *type = 0;
583 if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
584 if (low & 2) *type = MTRR_TYPE_WRCOMB;
585 if ( !(low & 3) )
587 *size = 0;
588 return;
591 * This needs a little explaining. The size is stored as an
592 * inverted mask of bits of 128K granularity 15 bits long offset
593 * 2 bits
595 * So to get a size we do invert the mask and add 1 to the lowest
596 * mask bit (4 as its 2 bits in). This gives us a size we then shift
597 * to turn into 128K blocks
599 * eg 111 1111 1111 1100 is 512K
601 * invert 000 0000 0000 0011
602 * +1 000 0000 0000 0100
603 * *128K ...
605 low = (~low) & 0x1FFFC;
606 *size = (low + 4) << 15;
607 return;
608 } /* End Function amd_get_mtrr */
610 static struct
612 unsigned long high;
613 unsigned long low;
614 } centaur_mcr[8];
616 static void centaur_get_mcr (unsigned int reg, unsigned long *base,
617 unsigned long *size, mtrr_type *type)
619 *base = centaur_mcr[reg].high & 0xfffff000;
620 *size = (~(centaur_mcr[reg].low & 0xfffff000))+1;
621 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
622 } /* End Function centaur_get_mcr */
624 static void (*get_mtrr) (unsigned int reg, unsigned long *base,
625 unsigned long *size, mtrr_type *type) = NULL;
627 static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
628 unsigned long size, mtrr_type type, int do_safe)
629 /* [SUMMARY] Set variable MTRR register on the local CPU.
630 <reg> The register to set.
631 <base> The base address of the region.
632 <size> The size of the region. If this is 0 the region is disabled.
633 <type> The type of the region.
634 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
635 be done externally.
636 [RETURNS] Nothing.
639 struct set_mtrr_context ctxt;
641 if (do_safe) set_mtrr_prepare (&ctxt);
642 if (size == 0)
644 /* The invalid bit is kept in the mask, so we simply clear the
645 relevant mask register to disable a range. */
646 wrmsr (MTRRphysMask_MSR (reg), 0, 0);
648 else
650 wrmsr (MTRRphysBase_MSR (reg), base | type, 0);
651 wrmsr (MTRRphysMask_MSR (reg), ~(size - 1) | 0x800, 0);
653 if (do_safe) set_mtrr_done (&ctxt);
654 } /* End Function intel_set_mtrr_up */
656 static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
657 unsigned long size, mtrr_type type, int do_safe)
659 struct set_mtrr_context ctxt;
660 unsigned char arr, arr_type, arr_size;
662 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
664 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
665 size >>= (reg < 7 ? 12 : 18);
666 size &= 0x7fff; /* make sure arr_size <= 14 */
667 for(arr_size = 0; size; arr_size++, size >>= 1);
669 if (reg<7)
671 switch (type) {
672 case MTRR_TYPE_UNCACHABLE: arr_type = 1; break;
673 case MTRR_TYPE_WRCOMB: arr_type = 9; break;
674 case MTRR_TYPE_WRTHROUGH: arr_type = 24; break;
675 default: arr_type = 8; break;
678 else
680 switch (type)
682 case MTRR_TYPE_UNCACHABLE: arr_type = 0; break;
683 case MTRR_TYPE_WRCOMB: arr_type = 8; break;
684 case MTRR_TYPE_WRTHROUGH: arr_type = 25; break;
685 default: arr_type = 9; break;
689 if (do_safe) set_mtrr_prepare (&ctxt);
690 setCx86(arr, ((unsigned char *) &base)[3]);
691 setCx86(arr+1, ((unsigned char *) &base)[2]);
692 setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
693 setCx86(CX86_RCR_BASE + reg, arr_type);
694 if (do_safe) set_mtrr_done (&ctxt);
695 } /* End Function cyrix_set_arr_up */
697 static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
698 unsigned long size, mtrr_type type, int do_safe)
699 /* [SUMMARY] Set variable MTRR register on the local CPU.
700 <reg> The register to set.
701 <base> The base address of the region.
702 <size> The size of the region. If this is 0 the region is disabled.
703 <type> The type of the region.
704 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
705 be done externally.
706 [RETURNS] Nothing.
709 u32 low, high;
710 struct set_mtrr_context ctxt;
712 if (do_safe) set_mtrr_prepare (&ctxt);
714 * Low is MTRR0 , High MTRR 1
716 rdmsr (0xC0000085, low, high);
718 * Blank to disable
720 if (size == 0)
721 *(reg ? &high : &low) = 0;
722 else
723 /* Set the register to the base (already shifted for us), the
724 type (off by one) and an inverted bitmask of the size
725 The size is the only odd bit. We are fed say 512K
726 We invert this and we get 111 1111 1111 1011 but
727 if you subtract one and invert you get the desired
728 111 1111 1111 1100 mask
730 *(reg ? &high : &low)=(((~(size-1))>>15)&0x0001FFFC)|base|(type+1);
732 * The writeback rule is quite specific. See the manual. Its
733 * disable local interrupts, write back the cache, set the mtrr
735 __asm__ __volatile__ ("wbinvd" : : : "memory");
736 wrmsr (0xC0000085, low, high);
737 if (do_safe) set_mtrr_done (&ctxt);
738 } /* End Function amd_set_mtrr_up */
741 static void centaur_set_mcr_up (unsigned int reg, unsigned long base,
742 unsigned long size, mtrr_type type,
743 int do_safe)
745 struct set_mtrr_context ctxt;
746 unsigned long low, high;
748 if (do_safe) set_mtrr_prepare( &ctxt );
749 if (size == 0)
751 /* Disable */
752 high = low = 0;
754 else
756 high = base & 0xfffff000; /* base works on 4K pages... */
757 low = ((~(size-1))&0xfffff000);
758 low |= 0x1f; /* only support write-combining... */
760 centaur_mcr[reg].high = high;
761 centaur_mcr[reg].low = low;
762 wrmsr (0x110 + reg, low, high);
763 if (do_safe) set_mtrr_done( &ctxt );
764 } /* End Function centaur_set_mtrr_up */
766 static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
767 unsigned long size, mtrr_type type,
768 int do_safe) = NULL;
770 #ifdef __SMP__
772 struct mtrr_var_range
774 unsigned long base_lo;
775 unsigned long base_hi;
776 unsigned long mask_lo;
777 unsigned long mask_hi;
781 /* Get the MSR pair relating to a var range */
782 static void __init get_mtrr_var_range (unsigned int index,
783 struct mtrr_var_range *vr)
785 rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
786 rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
787 } /* End Function get_mtrr_var_range */
790 /* Set the MSR pair relating to a var range. Returns TRUE if
791 changes are made */
792 static int __init set_mtrr_var_range_testing (unsigned int index,
793 struct mtrr_var_range *vr)
795 unsigned int lo, hi;
796 int changed = FALSE;
798 rdmsr(MTRRphysBase_MSR(index), lo, hi);
799 if ( (vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
800 || (vr->base_hi & 0xfUL) != (hi & 0xfUL) )
802 wrmsr (MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
803 changed = TRUE;
806 rdmsr (MTRRphysMask_MSR(index), lo, hi);
808 if ( (vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
809 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL) )
811 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
812 changed = TRUE;
814 return changed;
815 } /* End Function set_mtrr_var_range_testing */
817 static void __init get_fixed_ranges(mtrr_type *frs)
819 unsigned long *p = (unsigned long *)frs;
820 int i;
822 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
824 for (i = 0; i < 2; i++)
825 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
826 for (i = 0; i < 8; i++)
827 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
828 } /* End Function get_fixed_ranges */
830 static int __init set_fixed_ranges_testing(mtrr_type *frs)
832 unsigned long *p = (unsigned long *)frs;
833 int changed = FALSE;
834 int i;
835 unsigned long lo, hi;
837 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
838 if (p[0] != lo || p[1] != hi)
840 wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
841 changed = TRUE;
844 for (i = 0; i < 2; i++)
846 rdmsr (MTRRfix16K_80000_MSR + i, lo, hi);
847 if (p[2 + i*2] != lo || p[3 + i*2] != hi)
849 wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
850 changed = TRUE;
854 for (i = 0; i < 8; i++)
856 rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi);
857 if (p[6 + i*2] != lo || p[7 + i*2] != hi)
859 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
860 changed = TRUE;
863 return changed;
864 } /* End Function set_fixed_ranges_testing */
866 struct mtrr_state
868 unsigned int num_var_ranges;
869 struct mtrr_var_range *var_ranges;
870 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
871 unsigned char enabled;
872 mtrr_type def_type;
876 /* Grab all of the MTRR state for this CPU into *state */
877 static void __init get_mtrr_state(struct mtrr_state *state)
879 unsigned int nvrs, i;
880 struct mtrr_var_range *vrs;
881 unsigned long lo, dummy;
883 nvrs = state->num_var_ranges = get_num_var_ranges();
884 vrs = state->var_ranges
885 = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
886 if (vrs == NULL)
887 nvrs = state->num_var_ranges = 0;
889 for (i = 0; i < nvrs; i++)
890 get_mtrr_var_range (i, &vrs[i]);
891 get_fixed_ranges (state->fixed_ranges);
893 rdmsr (MTRRdefType_MSR, lo, dummy);
894 state->def_type = (lo & 0xff);
895 state->enabled = (lo & 0xc00) >> 10;
896 } /* End Function get_mtrr_state */
899 /* Free resources associated with a struct mtrr_state */
900 static void __init finalize_mtrr_state(struct mtrr_state *state)
902 if (state->var_ranges) kfree (state->var_ranges);
903 } /* End Function finalize_mtrr_state */
906 static unsigned long __init set_mtrr_state (struct mtrr_state *state,
907 struct set_mtrr_context *ctxt)
908 /* [SUMMARY] Set the MTRR state for this CPU.
909 <state> The MTRR state information to read.
910 <ctxt> Some relevant CPU context.
911 [NOTE] The CPU must already be in a safe state for MTRR changes.
912 [RETURNS] 0 if no changes made, else a mask indication what was changed.
915 unsigned int i;
916 unsigned long change_mask = 0;
918 for (i = 0; i < state->num_var_ranges; i++)
919 if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
920 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
922 if ( set_fixed_ranges_testing(state->fixed_ranges) )
923 change_mask |= MTRR_CHANGE_MASK_FIXED;
924 /* Set_mtrr_restore restores the old value of MTRRdefType,
925 so to set it we fiddle with the saved value */
926 if ( (ctxt->deftype_lo & 0xff) != state->def_type
927 || ( (ctxt->deftype_lo & 0xc00) >> 10 ) != state->enabled)
929 ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
930 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
933 return change_mask;
934 } /* End Function set_mtrr_state */
937 static atomic_t undone_count;
938 static volatile int wait_barrier_execute = FALSE;
939 static volatile int wait_barrier_cache_enable = FALSE;
941 struct set_mtrr_data
943 unsigned long smp_base;
944 unsigned long smp_size;
945 unsigned int smp_reg;
946 mtrr_type smp_type;
949 static void ipi_handler (void *info)
950 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
951 [RETURNS] Nothing.
954 struct set_mtrr_data *data = info;
955 struct set_mtrr_context ctxt;
957 set_mtrr_prepare (&ctxt);
958 /* Notify master that I've flushed and disabled my cache */
959 atomic_dec (&undone_count);
960 while (wait_barrier_execute) barrier ();
961 /* The master has cleared me to execute */
962 (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
963 data->smp_type, FALSE);
964 /* Notify master CPU that I've executed the function */
965 atomic_dec (&undone_count);
966 /* Wait for master to clear me to enable cache and return */
967 while (wait_barrier_cache_enable) barrier ();
968 set_mtrr_done (&ctxt);
969 } /* End Function ipi_handler */
971 static void set_mtrr_smp (unsigned int reg, unsigned long base,
972 unsigned long size, mtrr_type type)
974 struct set_mtrr_data data;
975 struct set_mtrr_context ctxt;
977 data.smp_reg = reg;
978 data.smp_base = base;
979 data.smp_size = size;
980 data.smp_type = type;
981 wait_barrier_execute = TRUE;
982 wait_barrier_cache_enable = TRUE;
983 atomic_set (&undone_count, smp_num_cpus - 1);
984 /* Start the ball rolling on other CPUs */
985 if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
986 panic ("mtrr: timed out waiting for other CPUs\n");
987 /* Flush and disable the local CPU's cache */
988 set_mtrr_prepare (&ctxt);
989 /* Wait for all other CPUs to flush and disable their caches */
990 while (atomic_read (&undone_count) > 0) barrier ();
991 /* Set up for completion wait and then release other CPUs to change MTRRs*/
992 atomic_set (&undone_count, smp_num_cpus - 1);
993 wait_barrier_execute = FALSE;
994 (*set_mtrr_up) (reg, base, size, type, FALSE);
995 /* Now wait for other CPUs to complete the function */
996 while (atomic_read (&undone_count) > 0) barrier ();
997 /* Now all CPUs should have finished the function. Release the barrier to
998 allow them to re-enable their caches and return from their interrupt,
999 then enable the local cache and return */
1000 wait_barrier_cache_enable = FALSE;
1001 set_mtrr_done (&ctxt);
1002 } /* End Function set_mtrr_smp */
1005 /* Some BIOS's are fucked and don't set all MTRRs the same! */
1006 static void __init mtrr_state_warn(unsigned long mask)
1008 if (!mask) return;
1009 if (mask & MTRR_CHANGE_MASK_FIXED)
1010 printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
1011 if (mask & MTRR_CHANGE_MASK_VARIABLE)
1012 printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
1013 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
1014 printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
1015 printk ("mtrr: probably your BIOS does not setup all CPUs\n");
1016 } /* End Function mtrr_state_warn */
1018 #endif /* __SMP__ */
1020 static char *attrib_to_str (int x)
1022 return (x <= 6) ? mtrr_strings[x] : "?";
1023 } /* End Function attrib_to_str */
1025 static void init_table (void)
1027 int i, max;
1029 max = get_num_var_ranges ();
1030 if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
1031 == NULL )
1033 printk ("mtrr: could not allocate\n");
1034 return;
1036 for (i = 0; i < max; i++) usage_table[i] = 1;
1037 #ifdef USERSPACE_INTERFACE
1038 if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
1040 printk ("mtrr: could not allocate\n");
1041 return;
1043 ascii_buf_bytes = 0;
1044 compute_ascii ();
1045 #endif
1046 } /* End Function init_table */
1048 static int generic_get_free_region (unsigned long base, unsigned long size)
1049 /* [SUMMARY] Get a free MTRR.
1050 <base> The starting (base) address of the region.
1051 <size> The size (in bytes) of the region.
1052 [RETURNS] The index of the region on success, else -1 on error.
1055 int i, max;
1056 mtrr_type ltype;
1057 unsigned long lbase, lsize;
1059 max = get_num_var_ranges ();
1060 for (i = 0; i < max; ++i)
1062 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1063 if (lsize < 1) return i;
1065 return -ENOSPC;
1066 } /* End Function generic_get_free_region */
1068 static int cyrix_get_free_region (unsigned long base, unsigned long size)
1069 /* [SUMMARY] Get a free ARR.
1070 <base> The starting (base) address of the region.
1071 <size> The size (in bytes) of the region.
1072 [RETURNS] The index of the region on success, else -1 on error.
1075 int i;
1076 mtrr_type ltype;
1077 unsigned long lbase, lsize;
1079 /* If we are to set up a region >32M then look at ARR7 immediately */
1080 if (size > 0x2000000UL)
1082 cyrix_get_arr (7, &lbase, &lsize, &ltype);
1083 if (lsize < 1) return 7;
1084 /* Else try ARR0-ARR6 first */
1086 else
1088 for (i = 0; i < 7; i++)
1090 cyrix_get_arr (i, &lbase, &lsize, &ltype);
1091 if ((i == 3) && arr3_protected) continue;
1092 if (lsize < 1) return i;
1094 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
1095 cyrix_get_arr (i, &lbase, &lsize, &ltype);
1096 if ((lsize < 1) && (size >= 0x40000)) return i;
1098 return -ENOSPC;
1099 } /* End Function cyrix_get_free_region */
1101 static int (*get_free_region) (unsigned long base,
1102 unsigned long size) = generic_get_free_region;
1104 int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
1105 char increment)
1106 /* [SUMMARY] Add an MTRR entry.
1107 <base> The starting (base) address of the region.
1108 <size> The size (in bytes) of the region.
1109 <type> The type of the new region.
1110 <increment> If true and the region already exists, the usage count will be
1111 incremented.
1112 [RETURNS] The MTRR register on success, else a negative number indicating
1113 the error code.
1114 [NOTE] This routine uses a spinlock.
1117 int i, max;
1118 mtrr_type ltype;
1119 unsigned long lbase, lsize, last;
1121 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
1122 switch (boot_cpu_data.x86_vendor)
1124 case X86_VENDOR_AMD:
1125 if (boot_cpu_data.x86 < 6)
1126 { /* pre-Athlon CPUs */
1127 /* Apply the K6 block alignment and size rules
1128 In order
1129 o Uncached or gathering only
1130 o 128K or bigger block
1131 o Power of 2 block
1132 o base suitably aligned to the power
1134 if ( type > MTRR_TYPE_WRCOMB || size < (1 << 17) ||
1135 (size & ~(size-1))-size || ( base & (size-1) ) )
1136 return -EINVAL;
1137 break;
1139 /* Else fall through */
1140 case X86_VENDOR_INTEL:
1141 /* Double check for Intel, we may run on Athlon */
1142 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1144 /* For Intel PPro stepping <= 7, must be 4 MiB aligned */
1145 if ( (boot_cpu_data.x86 == 6) && (boot_cpu_data.x86_model == 1) &&
1146 (boot_cpu_data.x86_mask <= 7) && ( base & ( (1 << 22) -1 ) ) )
1148 printk ("mtrr: base(0x%lx) is not 4 MiB aligned\n", base);
1149 return -EINVAL;
1152 /* Fall through */
1153 case X86_VENDOR_CYRIX:
1154 case X86_VENDOR_CENTAUR:
1155 if ( (base & 0xfff) || (size & 0xfff) )
1157 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1158 printk ("mtrr: size: %lx base: %lx\n", size, base);
1159 return -EINVAL;
1161 if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
1163 if (type != MTRR_TYPE_WRCOMB)
1165 printk ("mtrr: only write-combining is supported\n");
1166 return -EINVAL;
1169 else if (base + size < 0x100000)
1171 printk ("mtrr: cannot set region below 1 MiB (0x%lx,0x%lx)\n",
1172 base, size);
1173 return -EINVAL;
1175 /* Check upper bits of base and last are equal and lower bits are 0
1176 for base and 1 for last */
1177 last = base + size - 1;
1178 for (lbase = base; !(lbase & 1) && (last & 1);
1179 lbase = lbase >> 1, last = last >> 1);
1180 if (lbase != last)
1182 printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n",
1183 base, size);
1184 return -EINVAL;
1186 break;
1187 default:
1188 return -EINVAL;
1189 /*break;*/
1191 if (type >= MTRR_NUM_TYPES)
1193 printk ("mtrr: type: %u illegal\n", type);
1194 return -EINVAL;
1196 /* If the type is WC, check that this processor supports it */
1197 if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
1199 printk ("mtrr: your processor doesn't support write-combining\n");
1200 return -ENOSYS;
1202 increment = increment ? 1 : 0;
1203 max = get_num_var_ranges ();
1204 /* Search for existing MTRR */
1205 down(&main_lock);
1206 for (i = 0; i < max; ++i)
1208 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1209 if (base >= lbase + lsize) continue;
1210 if ( (base < lbase) && (base + size <= lbase) ) continue;
1211 /* At this point we know there is some kind of overlap/enclosure */
1212 if ( (base < lbase) || (base + size > lbase + lsize) )
1214 up(&main_lock);
1215 printk ("mtrr: 0x%lx,0x%lx overlaps existing 0x%lx,0x%lx\n",
1216 base, size, lbase, lsize);
1217 return -EINVAL;
1219 /* New region is enclosed by an existing region */
1220 if (ltype != type)
1222 if (type == MTRR_TYPE_UNCACHABLE) continue;
1223 up(&main_lock);
1224 printk ( "mtrr: type mismatch for %lx,%lx old: %s new: %s\n",
1225 base, size, attrib_to_str (ltype), attrib_to_str (type) );
1226 return -EINVAL;
1228 if (increment) ++usage_table[i];
1229 compute_ascii ();
1230 up(&main_lock);
1231 return i;
1233 /* Search for an empty MTRR */
1234 i = (*get_free_region) (base, size);
1235 if (i < 0)
1237 up(&main_lock);
1238 printk ("mtrr: no more MTRRs available\n");
1239 return i;
1241 set_mtrr (i, base, size, type);
1242 usage_table[i] = 1;
1243 compute_ascii ();
1244 up(&main_lock);
1245 return i;
1246 } /* End Function mtrr_add */
1248 int mtrr_del (int reg, unsigned long base, unsigned long size)
1249 /* [SUMMARY] Delete MTRR/decrement usage count.
1250 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1251 be supplied.
1252 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1253 <size> The size of the region. This is ignored if <<reg>> is >= 0.
1254 [RETURNS] The register on success, else a negative number indicating
1255 the error code.
1256 [NOTE] This routine uses a spinlock.
1259 int i, max;
1260 mtrr_type ltype;
1261 unsigned long lbase, lsize;
1263 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
1264 max = get_num_var_ranges ();
1265 down (&main_lock);
1266 if (reg < 0)
1268 /* Search for existing MTRR */
1269 for (i = 0; i < max; ++i)
1271 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1272 if ( (lbase == base) && (lsize == size) )
1274 reg = i;
1275 break;
1278 if (reg < 0)
1280 up(&main_lock);
1281 printk ("mtrr: no MTRR for %lx,%lx found\n", base, size);
1282 return -EINVAL;
1285 if (reg >= max)
1287 up (&main_lock);
1288 printk ("mtrr: register: %d too big\n", reg);
1289 return -EINVAL;
1291 if (boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX)
1293 if ( (reg == 3) && arr3_protected )
1295 up (&main_lock);
1296 printk ("mtrr: ARR3 cannot be changed\n");
1297 return -EINVAL;
1300 (*get_mtrr) (reg, &lbase, &lsize, &ltype);
1301 if (lsize < 1)
1303 up (&main_lock);
1304 printk ("mtrr: MTRR %d not used\n", reg);
1305 return -EINVAL;
1307 if (usage_table[reg] < 1)
1309 up (&main_lock);
1310 printk ("mtrr: reg: %d has count=0\n", reg);
1311 return -EINVAL;
1313 if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
1314 compute_ascii ();
1315 up (&main_lock);
1316 return reg;
1317 } /* End Function mtrr_del */
1319 #ifdef USERSPACE_INTERFACE
1321 static int mtrr_file_add (unsigned long base, unsigned long size,
1322 unsigned int type, char increment, struct file *file)
1324 int reg, max;
1325 unsigned int *fcount = file->private_data;
1327 max = get_num_var_ranges ();
1328 if (fcount == NULL)
1330 if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
1332 printk ("mtrr: could not allocate\n");
1333 return -ENOMEM;
1335 memset (fcount, 0, max * sizeof *fcount);
1336 file->private_data = fcount;
1338 reg = mtrr_add (base, size, type, 1);
1339 if (reg >= 0) ++fcount[reg];
1340 return reg;
1341 } /* End Function mtrr_file_add */
1343 static int mtrr_file_del (unsigned long base, unsigned long size,
1344 struct file *file)
1346 int reg;
1347 unsigned int *fcount = file->private_data;
1349 reg = mtrr_del (-1, base, size);
1350 if (reg < 0) return reg;
1351 if (fcount == NULL) return reg;
1352 if (fcount[reg] < 1) return -EINVAL;
1353 --fcount[reg];
1354 return reg;
1355 } /* End Function mtrr_file_del */
1357 static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
1358 loff_t *ppos)
1360 if (*ppos >= ascii_buf_bytes) return 0;
1361 if (*ppos + len > ascii_buf_bytes) len = ascii_buf_bytes - *ppos;
1362 if ( copy_to_user (buf, ascii_buffer + *ppos, len) ) return -EFAULT;
1363 *ppos += len;
1364 return len;
1365 } /* End Function mtrr_read */
1367 static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
1368 loff_t *ppos)
1369 /* Format of control line:
1370 "base=%lx size=%lx type=%s" OR:
1371 "disable=%d"
1374 int i, err;
1375 unsigned long reg, base, size;
1376 char *ptr;
1377 char line[LINE_SIZE];
1379 if ( !suser () ) return -EPERM;
1380 /* Can't seek (pwrite) on this device */
1381 if (ppos != &file->f_pos) return -ESPIPE;
1382 memset (line, 0, LINE_SIZE);
1383 if (len > LINE_SIZE) len = LINE_SIZE;
1384 if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
1385 ptr = line + strlen (line) - 1;
1386 if (*ptr == '\n') *ptr = '\0';
1387 if ( !strncmp (line, "disable=", 8) )
1389 reg = simple_strtoul (line + 8, &ptr, 0);
1390 err = mtrr_del (reg, 0, 0);
1391 if (err < 0) return err;
1392 return len;
1394 if ( strncmp (line, "base=", 5) )
1396 printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
1397 return -EINVAL;
1399 base = simple_strtoul (line + 5, &ptr, 0);
1400 for (; isspace (*ptr); ++ptr);
1401 if ( strncmp (ptr, "size=", 5) )
1403 printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
1404 return -EINVAL;
1406 size = simple_strtoul (ptr + 5, &ptr, 0);
1407 for (; isspace (*ptr); ++ptr);
1408 if ( strncmp (ptr, "type=", 5) )
1410 printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
1411 return -EINVAL;
1413 ptr += 5;
1414 for (; isspace (*ptr); ++ptr);
1415 for (i = 0; i < MTRR_NUM_TYPES; ++i)
1417 if ( strcmp (ptr, mtrr_strings[i]) ) continue;
1418 err = mtrr_add (base, size, i, 1);
1419 if (err < 0) return err;
1420 return len;
1422 printk ("mtrr: illegal type: \"%s\"\n", ptr);
1423 return -EINVAL;
1424 } /* End Function mtrr_write */
1426 static int mtrr_ioctl (struct inode *inode, struct file *file,
1427 unsigned int cmd, unsigned long arg)
1429 int err;
1430 mtrr_type type;
1431 struct mtrr_sentry sentry;
1432 struct mtrr_gentry gentry;
1434 switch (cmd)
1436 default:
1437 return -ENOIOCTLCMD;
1438 case MTRRIOC_ADD_ENTRY:
1439 if ( !suser () ) return -EPERM;
1440 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1441 return -EFAULT;
1442 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file);
1443 if (err < 0) return err;
1444 break;
1445 case MTRRIOC_SET_ENTRY:
1446 if ( !suser () ) return -EPERM;
1447 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1448 return -EFAULT;
1449 err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1450 if (err < 0) return err;
1451 break;
1452 case MTRRIOC_DEL_ENTRY:
1453 if ( !suser () ) return -EPERM;
1454 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1455 return -EFAULT;
1456 err = mtrr_file_del (sentry.base, sentry.size, file);
1457 if (err < 0) return err;
1458 break;
1459 case MTRRIOC_KILL_ENTRY:
1460 if ( !suser () ) return -EPERM;
1461 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1462 return -EFAULT;
1463 err = mtrr_del (-1, sentry.base, sentry.size);
1464 if (err < 0) return err;
1465 break;
1466 case MTRRIOC_GET_ENTRY:
1467 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1468 return -EFAULT;
1469 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1470 (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1471 gentry.type = type;
1472 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1473 return -EFAULT;
1474 break;
1476 return 0;
1477 } /* End Function mtrr_ioctl */
1479 static int mtrr_close (struct inode *ino, struct file *file)
1481 int i, max;
1482 unsigned int *fcount = file->private_data;
1484 MOD_DEC_USE_COUNT;
1485 if (fcount == NULL) return 0;
1486 max = get_num_var_ranges ();
1487 for (i = 0; i < max; ++i)
1489 while (fcount[i] > 0)
1491 if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1492 --fcount[i];
1495 kfree (fcount);
1496 file->private_data = NULL;
1497 return 0;
1498 } /* End Function mtrr_close */
1500 static struct file_operations mtrr_fops =
1502 read: mtrr_read,
1503 write: mtrr_write,
1504 ioctl: mtrr_ioctl,
1505 release: mtrr_close,
1508 # ifdef CONFIG_PROC_FS
1510 static struct proc_dir_entry *proc_root_mtrr;
1512 # endif /* CONFIG_PROC_FS */
1514 static devfs_handle_t devfs_handle = NULL;
1516 static void compute_ascii (void)
1518 char factor;
1519 int i, max;
1520 mtrr_type type;
1521 unsigned long base, size;
1523 ascii_buf_bytes = 0;
1524 max = get_num_var_ranges ();
1525 for (i = 0; i < max; i++)
1527 (*get_mtrr) (i, &base, &size, &type);
1528 if (size < 1) usage_table[i] = 0;
1529 else
1531 if (size < 0x100000)
1533 /* 1MB */
1534 factor = 'k';
1535 size >>= 10;
1537 else
1539 factor = 'M';
1540 size >>= 20;
1542 sprintf
1543 (ascii_buffer + ascii_buf_bytes,
1544 "reg%02i: base=0x%08lx (%4liMB), size=%4li%cB: %s, count=%d\n",
1545 i, base, base>>20, size, factor,
1546 attrib_to_str (type), usage_table[i]);
1547 ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1550 devfs_set_file_size (devfs_handle, ascii_buf_bytes);
1551 # ifdef CONFIG_PROC_FS
1552 proc_root_mtrr->size = ascii_buf_bytes;
1553 # endif /* CONFIG_PROC_FS */
1554 } /* End Function compute_ascii */
1556 #endif /* USERSPACE_INTERFACE */
1558 EXPORT_SYMBOL(mtrr_add);
1559 EXPORT_SYMBOL(mtrr_del);
1561 #ifdef __SMP__
1563 typedef struct
1565 unsigned long base;
1566 unsigned long size;
1567 mtrr_type type;
1568 } arr_state_t;
1570 arr_state_t arr_state[8] __initdata =
1572 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
1573 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
1576 unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
1578 static void __init cyrix_arr_init_secondary(void)
1580 struct set_mtrr_context ctxt;
1581 int i;
1583 set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
1585 /* the CCRs are not contiguous */
1586 for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
1587 for( ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
1588 for(i=0; i<8; i++)
1589 cyrix_set_arr_up(i,
1590 arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
1592 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1593 } /* End Function cyrix_arr_init_secondary */
1595 #endif
1598 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
1599 * with the SMM (System Management Mode) mode. So we need the following:
1600 * Check whether SMI_LOCK (CCR3 bit 0) is set
1601 * if it is set, write a warning message: ARR3 cannot be changed!
1602 * (it cannot be changed until the next processor reset)
1603 * if it is reset, then we can change it, set all the needed bits:
1604 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
1605 * - disable access to SMM memory (CCR1 bit 2 reset)
1606 * - disable SMM mode (CCR1 bit 1 reset)
1607 * - disable write protection of ARR3 (CCR6 bit 1 reset)
1608 * - (maybe) disable ARR3
1609 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
1611 static void __init cyrix_arr_init(void)
1613 struct set_mtrr_context ctxt;
1614 unsigned char ccr[7];
1615 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
1616 #ifdef __SMP__
1617 int i;
1618 #endif
1620 set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
1622 /* Save all CCRs locally */
1623 ccr[0] = getCx86 (CX86_CCR0);
1624 ccr[1] = getCx86 (CX86_CCR1);
1625 ccr[2] = getCx86 (CX86_CCR2);
1626 ccr[3] = ctxt.ccr3;
1627 ccr[4] = getCx86 (CX86_CCR4);
1628 ccr[5] = getCx86 (CX86_CCR5);
1629 ccr[6] = getCx86 (CX86_CCR6);
1631 if (ccr[3] & 1)
1633 ccrc[3] = 1;
1634 arr3_protected = 1;
1636 else
1638 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
1639 * access to SMM memory through ARR3 (bit 7).
1641 if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
1642 if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
1643 if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
1644 arr3_protected = 0;
1645 if (ccr[6] & 0x02) {
1646 ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3 */
1647 setCx86 (CX86_CCR6, ccr[6]);
1649 /* Disable ARR3. This is safe now that we disabled SMM. */
1650 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
1652 /* If we changed CCR1 in memory, change it in the processor, too. */
1653 if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
1655 /* Enable ARR usage by the processor */
1656 if (!(ccr[5] & 0x20))
1658 ccr[5] |= 0x20; ccrc[5] = 1;
1659 setCx86 (CX86_CCR5, ccr[5]);
1662 #ifdef __SMP__
1663 for(i=0; i<7; i++) ccr_state[i] = ccr[i];
1664 for(i=0; i<8; i++)
1665 cyrix_get_arr(i,
1666 &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
1667 #endif
1669 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1671 if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
1672 if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
1674 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
1675 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
1676 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
1678 if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
1679 } /* End Function cyrix_arr_init */
1681 static void __init centaur_mcr_init(void)
1683 unsigned i;
1684 struct set_mtrr_context ctxt;
1686 set_mtrr_prepare (&ctxt);
1687 /* Unfortunately, MCR's are read-only, so there is no way to
1688 * find out what the bios might have done.
1690 /* Clear all MCR's.
1691 * This way we are sure that the centaur_mcr array contains the actual
1692 * values. The disadvantage is that any BIOS tweaks are thus undone.
1694 for (i = 0; i < 8; ++i)
1696 centaur_mcr[i].high = 0;
1697 centaur_mcr[i].low = 0;
1698 wrmsr (0x110 + i , 0, 0);
1700 /* Throw the main write-combining switch... */
1701 wrmsr (0x120, 0x01f0001f, 0);
1702 set_mtrr_done (&ctxt);
1703 } /* End Function centaur_mcr_init */
1705 static void __init mtrr_setup(void)
1707 printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
1708 switch (boot_cpu_data.x86_vendor)
1710 case X86_VENDOR_AMD:
1711 if (boot_cpu_data.x86 < 6)
1713 /* pre-Athlon CPUs */
1714 get_mtrr = amd_get_mtrr;
1715 set_mtrr_up = amd_set_mtrr_up;
1716 break;
1718 /* Else fall through */
1719 case X86_VENDOR_INTEL:
1720 get_mtrr = intel_get_mtrr;
1721 set_mtrr_up = intel_set_mtrr_up;
1722 break;
1723 case X86_VENDOR_CYRIX:
1724 get_mtrr = cyrix_get_arr;
1725 set_mtrr_up = cyrix_set_arr_up;
1726 get_free_region = cyrix_get_free_region;
1727 break;
1728 case X86_VENDOR_CENTAUR:
1729 get_mtrr = centaur_get_mcr;
1730 set_mtrr_up = centaur_set_mcr_up;
1731 break;
1733 } /* End Function mtrr_setup */
1735 #ifdef __SMP__
1737 static volatile unsigned long smp_changes_mask __initdata = 0;
1738 static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
1740 void __init mtrr_init_boot_cpu(void)
1742 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1743 mtrr_setup ();
1744 switch (boot_cpu_data.x86_vendor)
1746 case X86_VENDOR_AMD:
1747 if (boot_cpu_data.x86 < 6) break; /* Pre-Athlon CPUs */
1748 case X86_VENDOR_INTEL:
1749 get_mtrr_state (&smp_mtrr_state);
1750 break;
1751 case X86_VENDOR_CYRIX:
1752 cyrix_arr_init ();
1753 break;
1754 case X86_VENDOR_CENTAUR:
1755 centaur_mcr_init ();
1756 break;
1758 } /* End Function mtrr_init_boot_cpu */
1760 static void __init intel_mtrr_init_secondary_cpu(void)
1762 unsigned long mask, count;
1763 struct set_mtrr_context ctxt;
1765 /* Note that this is not ideal, since the cache is only flushed/disabled
1766 for this CPU while the MTRRs are changed, but changing this requires
1767 more invasive changes to the way the kernel boots */
1768 set_mtrr_prepare (&ctxt);
1769 mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
1770 set_mtrr_done (&ctxt);
1771 /* Use the atomic bitops to update the global mask */
1772 for (count = 0; count < sizeof mask * 8; ++count)
1774 if (mask & 0x01) set_bit (count, &smp_changes_mask);
1775 mask >>= 1;
1777 } /* End Function intel_mtrr_init_secondary_cpu */
1779 void __init mtrr_init_secondary_cpu(void)
1781 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1782 switch (boot_cpu_data.x86_vendor)
1784 case X86_VENDOR_AMD:
1785 /* Just for robustness: pre-Athlon CPUs cannot do SMP */
1786 if (boot_cpu_data.x86 < 6) break;
1787 case X86_VENDOR_INTEL:
1788 intel_mtrr_init_secondary_cpu ();
1789 break;
1790 case X86_VENDOR_CYRIX:
1791 /* This is _completely theoretical_!
1792 * I assume here that one day Cyrix will support Intel APIC.
1793 * In reality on non-Intel CPUs we won't even get to this routine.
1794 * Hopefully no one will plug two Cyrix processors in a dual P5 board.
1795 * :-)
1797 cyrix_arr_init_secondary ();
1798 break;
1799 default:
1800 printk ("mtrr: SMP support incomplete for this vendor\n");
1801 break;
1803 } /* End Function mtrr_init_secondary_cpu */
1804 #endif /* __SMP__ */
1806 int __init mtrr_init(void)
1808 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return 0;
1809 #ifdef __SMP__
1810 switch (boot_cpu_data.x86_vendor)
1812 case X86_VENDOR_AMD:
1813 if (boot_cpu_data.x86 < 6) break; /* Pre-Athlon CPUs */
1814 case X86_VENDOR_INTEL:
1815 finalize_mtrr_state (&smp_mtrr_state);
1816 mtrr_state_warn (smp_changes_mask);
1817 break;
1819 #else /* __SMP__ */
1820 mtrr_setup ();
1821 switch (boot_cpu_data.x86_vendor)
1823 case X86_VENDOR_CYRIX:
1824 cyrix_arr_init ();
1825 break;
1826 case X86_VENDOR_CENTAUR:
1827 centaur_mcr_init ();
1828 break;
1830 #endif /* !__SMP__ */
1832 #ifdef CONFIG_PROC_FS
1833 proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
1834 proc_root_mtrr->proc_fops = &mtrr_fops;
1835 #endif
1836 #ifdef CONFIG_DEVFS_FS
1837 devfs_handle = devfs_register (NULL, "cpu/mtrr", 0, DEVFS_FL_DEFAULT, 0, 0,
1838 S_IFREG | S_IRUGO | S_IWUSR, 0, 0,
1839 &mtrr_fops, NULL);
1840 #endif
1841 init_table ();
1842 return 0;
1843 } /* End Function mtrr_init */