Import 2.3.12pre3
[davej-history.git] / arch / i386 / kernel / mtrr.c
blob084ad431c43708b2b08039a4b8d29d2870993273
1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-1999 Richard Gooch
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23 Source: "Pentium Pro Family Developer's Manual, Volume 3:
24 Operating System Writer's Guide" (Intel document number 242692),
25 section 11.11.7
27 ChangeLog
29 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
30 Initial register-setting code (from proform-1.0).
31 19971216 Richard Gooch <rgooch@atnf.csiro.au>
32 Original version for /proc/mtrr interface, SMP-safe.
33 v1.0
34 19971217 Richard Gooch <rgooch@atnf.csiro.au>
35 Bug fix for ioctls()'s.
36 Added sample code in Documentation/mtrr.txt
37 v1.1
38 19971218 Richard Gooch <rgooch@atnf.csiro.au>
39 Disallow overlapping regions.
40 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
41 Register-setting fixups.
42 v1.2
43 19971222 Richard Gooch <rgooch@atnf.csiro.au>
44 Fixups for kernel 2.1.75.
45 v1.3
46 19971229 David Wragg <dpw@doc.ic.ac.uk>
47 Register-setting fixups and conformity with Intel conventions.
48 19971229 Richard Gooch <rgooch@atnf.csiro.au>
49 Cosmetic changes and wrote this ChangeLog ;-)
50 19980106 Richard Gooch <rgooch@atnf.csiro.au>
51 Fixups for kernel 2.1.78.
52 v1.4
53 19980119 David Wragg <dpw@doc.ic.ac.uk>
54 Included passive-release enable code (elsewhere in PCI setup).
55 v1.5
56 19980131 Richard Gooch <rgooch@atnf.csiro.au>
57 Replaced global kernel lock with private spinlock.
58 v1.6
59 19980201 Richard Gooch <rgooch@atnf.csiro.au>
60 Added wait for other CPUs to complete changes.
61 v1.7
62 19980202 Richard Gooch <rgooch@atnf.csiro.au>
63 Bug fix in definition of <set_mtrr> for UP.
64 v1.8
65 19980319 Richard Gooch <rgooch@atnf.csiro.au>
66 Fixups for kernel 2.1.90.
67 19980323 Richard Gooch <rgooch@atnf.csiro.au>
68 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
69 v1.9
70 19980325 Richard Gooch <rgooch@atnf.csiro.au>
71 Fixed test for overlapping regions: confused by adjacent regions
72 19980326 Richard Gooch <rgooch@atnf.csiro.au>
73 Added wbinvd in <set_mtrr_prepare>.
74 19980401 Richard Gooch <rgooch@atnf.csiro.au>
75 Bug fix for non-SMP compilation.
76 19980418 David Wragg <dpw@doc.ic.ac.uk>
77 Fixed-MTRR synchronisation for SMP and use atomic operations
78 instead of spinlocks.
79 19980418 Richard Gooch <rgooch@atnf.csiro.au>
80 Differentiate different MTRR register classes for BIOS fixup.
81 v1.10
82 19980419 David Wragg <dpw@doc.ic.ac.uk>
83 Bug fix in variable MTRR synchronisation.
84 v1.11
85 19980419 Richard Gooch <rgooch@atnf.csiro.au>
86 Fixups for kernel 2.1.97.
87 v1.12
88 19980421 Richard Gooch <rgooch@atnf.csiro.au>
89 Safer synchronisation across CPUs when changing MTRRs.
90 v1.13
91 19980423 Richard Gooch <rgooch@atnf.csiro.au>
92 Bugfix for SMP systems without MTRR support.
93 v1.14
94 19980427 Richard Gooch <rgooch@atnf.csiro.au>
95 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
96 v1.15
97 19980427 Richard Gooch <rgooch@atnf.csiro.au>
98 Use atomic bitops for setting SMP change mask.
99 v1.16
100 19980428 Richard Gooch <rgooch@atnf.csiro.au>
101 Removed spurious diagnostic message.
102 v1.17
103 19980429 Richard Gooch <rgooch@atnf.csiro.au>
104 Moved register-setting macros into this file.
105 Moved setup code from init/main.c to i386-specific areas.
106 v1.18
107 19980502 Richard Gooch <rgooch@atnf.csiro.au>
108 Moved MTRR detection outside conditionals in <mtrr_init>.
109 v1.19
110 19980502 Richard Gooch <rgooch@atnf.csiro.au>
111 Documentation improvement: mention Pentium II and AGP.
112 v1.20
113 19980521 Richard Gooch <rgooch@atnf.csiro.au>
114 Only manipulate interrupt enable flag on local CPU.
115 Allow enclosed uncachable regions.
116 v1.21
117 19980611 Richard Gooch <rgooch@atnf.csiro.au>
118 Always define <main_lock>.
119 v1.22
120 19980901 Richard Gooch <rgooch@atnf.csiro.au>
121 Removed module support in order to tidy up code.
122 Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
123 Created addition queue for prior to SMP commence.
124 v1.23
125 19980902 Richard Gooch <rgooch@atnf.csiro.au>
126 Ported patch to kernel 2.1.120-pre3.
127 v1.24
128 19980910 Richard Gooch <rgooch@atnf.csiro.au>
129 Removed sanity checks and addition queue: Linus prefers an OOPS.
130 v1.25
131 19981001 Richard Gooch <rgooch@atnf.csiro.au>
132 Fixed harmless compiler warning in include/asm-i386/mtrr.h
133 Fixed version numbering and history for v1.23 -> v1.24.
134 v1.26
135 19990118 Richard Gooch <rgooch@atnf.csiro.au>
136 PLACEHOLDER.
137 v1.27
138 19990123 Richard Gooch <rgooch@atnf.csiro.au>
139 Changed locking to spin with reschedule.
140 Made use of new <smp_call_function>.
141 v1.28
142 19990201 Zoltan Boszormenyi <zboszor@mol.hu>
143 Extended the driver to be able to use Cyrix style ARRs.
144 19990204 Richard Gooch <rgooch@atnf.csiro.au>
145 Restructured Cyrix support.
146 v1.29
147 19990204 Zoltan Boszormenyi <zboszor@mol.hu>
148 Refined ARR support: enable MAPEN in set_mtrr_prepare()
149 and disable MAPEN in set_mtrr_done().
150 19990205 Richard Gooch <rgooch@atnf.csiro.au>
151 Minor cleanups.
152 v1.30
153 19990208 Zoltan Boszormenyi <zboszor@mol.hu>
154 Protect plain 6x86s (and other processors without the
155 Page Global Enable feature) against accessing CR4 in
156 set_mtrr_prepare() and set_mtrr_done().
157 19990210 Richard Gooch <rgooch@atnf.csiro.au>
158 Turned <set_mtrr_up> and <get_mtrr> into function pointers.
159 v1.31
160 19990212 Zoltan Boszormenyi <zboszor@mol.hu>
161 Major rewrite of cyrix_arr_init(): do not touch ARRs,
162 leave them as the BIOS have set them up.
163 Enable usage of all 8 ARRs.
164 Avoid multiplications by 3 everywhere and other
165 code clean ups/speed ups.
166 19990213 Zoltan Boszormenyi <zboszor@mol.hu>
167 Set up other Cyrix processors identical to the boot cpu.
168 Since Cyrix don't support Intel APIC, this is l'art pour l'art.
169 Weigh ARRs by size:
170 If size <= 32M is given, set up ARR# we were given.
171 If size > 32M is given, set up ARR7 only if it is free,
172 fail otherwise.
173 19990214 Zoltan Boszormenyi <zboszor@mol.hu>
174 Also check for size >= 256K if we are to set up ARR7,
175 mtrr_add() returns the value it gets from set_mtrr()
176 19990218 Zoltan Boszormenyi <zboszor@mol.hu>
177 Remove Cyrix "coma bug" workaround from here.
178 Moved to linux/arch/i386/kernel/setup.c and
179 linux/include/asm-i386/bugs.h
180 19990228 Richard Gooch <rgooch@atnf.csiro.au>
181 Added #ifdef CONFIG_DEVFS_FS
182 Added MTRRIOC_KILL_ENTRY ioctl(2)
183 Trap for counter underflow in <mtrr_file_del>.
184 Trap for 4 MiB aligned regions for PPro, stepping <= 7.
185 19990301 Richard Gooch <rgooch@atnf.csiro.au>
186 Created <get_free_region> hook.
187 19990305 Richard Gooch <rgooch@atnf.csiro.au>
188 Temporarily disable AMD support now MTRR capability flag is set.
189 v1.32
190 19990308 Zoltan Boszormenyi <zboszor@mol.hu>
191 Adjust my changes (19990212-19990218) to Richard Gooch's
192 latest changes. (19990228-19990305)
193 v1.33
194 19990309 Richard Gooch <rgooch@atnf.csiro.au>
195 Fixed typo in <printk> message.
196 19990310 Richard Gooch <rgooch@atnf.csiro.au>
197 Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
198 v1.34
199 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
200 Support Centaur C6 MCR's.
201 19990512 Richard Gooch <rgooch@atnf.csiro.au>
202 Minor cleanups.
203 v1.35
205 #include <linux/types.h>
206 #include <linux/errno.h>
207 #include <linux/sched.h>
208 #include <linux/tty.h>
209 #include <linux/timer.h>
210 #include <linux/config.h>
211 #include <linux/kernel.h>
212 #include <linux/wait.h>
213 #include <linux/string.h>
214 #include <linux/malloc.h>
215 #include <linux/ioport.h>
216 #include <linux/delay.h>
217 #include <linux/fs.h>
218 #include <linux/ctype.h>
219 #include <linux/proc_fs.h>
220 #include <linux/mm.h>
221 #include <linux/module.h>
222 #define MTRR_NEED_STRINGS
223 #include <asm/mtrr.h>
224 #include <linux/init.h>
225 #include <linux/smp.h>
227 #include <asm/uaccess.h>
228 #include <asm/io.h>
229 #include <asm/processor.h>
230 #include <asm/system.h>
231 #include <asm/pgtable.h>
232 #include <asm/segment.h>
233 #include <asm/bitops.h>
234 #include <asm/atomic.h>
235 #include <asm/msr.h>
237 #include <asm/hardirq.h>
238 #include "irq.h"
240 #define MTRR_VERSION "1.35 (19990512)"
242 #define TRUE 1
243 #define FALSE 0
245 #define MTRRcap_MSR 0x0fe
246 #define MTRRdefType_MSR 0x2ff
248 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
249 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
251 #define NUM_FIXED_RANGES 88
252 #define MTRRfix64K_00000_MSR 0x250
253 #define MTRRfix16K_80000_MSR 0x258
254 #define MTRRfix16K_A0000_MSR 0x259
255 #define MTRRfix4K_C0000_MSR 0x268
256 #define MTRRfix4K_C8000_MSR 0x269
257 #define MTRRfix4K_D0000_MSR 0x26a
258 #define MTRRfix4K_D8000_MSR 0x26b
259 #define MTRRfix4K_E0000_MSR 0x26c
260 #define MTRRfix4K_E8000_MSR 0x26d
261 #define MTRRfix4K_F0000_MSR 0x26e
262 #define MTRRfix4K_F8000_MSR 0x26f
264 #ifdef __SMP__
265 # define MTRR_CHANGE_MASK_FIXED 0x01
266 # define MTRR_CHANGE_MASK_VARIABLE 0x02
267 # define MTRR_CHANGE_MASK_DEFTYPE 0x04
268 #endif
270 /* In the Intel processor's MTRR interface, the MTRR type is always held in
271 an 8 bit field: */
272 typedef u8 mtrr_type;
274 #define LINE_SIZE 80
275 #define JIFFIE_TIMEOUT 100
277 #ifdef __SMP__
278 # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
279 #else
280 # define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
281 TRUE)
282 #endif
284 #define spin_lock_reschedule(lock) while (!spin_trylock(lock)) schedule ();
286 #ifndef CONFIG_PROC_FS
287 # define compute_ascii() while (0)
288 #endif
290 #ifdef CONFIG_PROC_FS
291 static char *ascii_buffer = NULL;
292 static unsigned int ascii_buf_bytes = 0;
293 #endif
294 static unsigned int *usage_table = NULL;
295 static spinlock_t main_lock = SPIN_LOCK_UNLOCKED;
297 /* Private functions */
298 #ifdef CONFIG_PROC_FS
299 static void compute_ascii (void);
300 #endif
303 struct set_mtrr_context
305 unsigned long flags;
306 unsigned long deftype_lo;
307 unsigned long deftype_hi;
308 unsigned long cr4val;
309 unsigned long ccr3;
313 /* Put the processor into a state where MTRRs can be safely set */
314 static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
316 unsigned long tmp;
318 /* Disable interrupts locally */
319 __save_flags (ctxt->flags); __cli ();
321 switch (boot_cpu_data.x86_vendor)
323 case X86_VENDOR_AMD:
324 case X86_VENDOR_CENTAUR:
325 return;
326 /*break;*/
328 /* Save value of CR4 and clear Page Global Enable (bit 7) */
329 if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
330 asm volatile ("movl %%cr4, %0\n\t"
331 "movl %0, %1\n\t"
332 "andb $0x7f, %b1\n\t"
333 "movl %1, %%cr4\n\t"
334 : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory");
336 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
337 a side-effect */
338 asm volatile ("movl %%cr0, %0\n\t"
339 "orl $0x40000000, %0\n\t"
340 "wbinvd\n\t"
341 "movl %0, %%cr0\n\t"
342 "wbinvd\n\t"
343 : "=r" (tmp) : : "memory");
345 switch (boot_cpu_data.x86_vendor)
347 case X86_VENDOR_INTEL:
348 /* Disable MTRRs, and set the default type to uncached */
349 rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
350 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
351 break;
352 case X86_VENDOR_CYRIX:
353 tmp = getCx86 (CX86_CCR3);
354 setCx86 (CX86_CCR3, (tmp & 0x0f) | 0x10);
355 ctxt->ccr3 = tmp;
356 break;
358 } /* End Function set_mtrr_prepare */
360 /* Restore the processor after a set_mtrr_prepare */
361 static void set_mtrr_done (struct set_mtrr_context *ctxt)
363 unsigned long tmp;
365 switch (boot_cpu_data.x86_vendor)
367 case X86_VENDOR_AMD:
368 case X86_VENDOR_CENTAUR:
369 __restore_flags (ctxt->flags);
370 return;
371 /*break;*/
373 /* Flush caches and TLBs */
374 asm volatile ("wbinvd" : : : "memory" );
376 /* Restore MTRRdefType */
377 switch (boot_cpu_data.x86_vendor)
379 case X86_VENDOR_INTEL:
380 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
381 break;
382 case X86_VENDOR_CYRIX:
383 setCx86 (CX86_CCR3, ctxt->ccr3);
384 break;
387 /* Enable caches */
388 asm volatile ("movl %%cr0, %0\n\t"
389 "andl $0xbfffffff, %0\n\t"
390 "movl %0, %%cr0\n\t"
391 : "=r" (tmp) : : "memory");
393 /* Restore value of CR4 */
394 if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
395 asm volatile ("movl %0, %%cr4"
396 : : "r" (ctxt->cr4val) : "memory");
398 /* Re-enable interrupts locally (if enabled previously) */
399 __restore_flags (ctxt->flags);
400 } /* End Function set_mtrr_done */
402 /* This function returns the number of variable MTRRs */
403 static unsigned int get_num_var_ranges (void)
405 unsigned long config, dummy;
407 switch (boot_cpu_data.x86_vendor)
409 case X86_VENDOR_INTEL:
410 rdmsr (MTRRcap_MSR, config, dummy);
411 return (config & 0xff);
412 /*break;*/
413 case X86_VENDOR_CYRIX:
414 /* Cyrix have 8 ARRs */
415 case X86_VENDOR_CENTAUR:
416 /* and Centaur has 8 MCR's */
417 return 8;
418 /*break;*/
419 case X86_VENDOR_AMD:
420 return 2;
421 /*break;*/
423 return 0;
424 } /* End Function get_num_var_ranges */
426 /* Returns non-zero if we have the write-combining memory type */
427 static int have_wrcomb (void)
429 unsigned long config, dummy;
431 switch (boot_cpu_data.x86_vendor)
433 case X86_VENDOR_INTEL:
434 rdmsr (MTRRcap_MSR, config, dummy);
435 return (config & (1<<10));
436 /*break;*/
437 case X86_VENDOR_CYRIX:
438 case X86_VENDOR_AMD:
439 case X86_VENDOR_CENTAUR:
440 return 1;
441 /*break;*/
443 return 0;
444 } /* End Function have_wrcomb */
446 static void intel_get_mtrr (unsigned int reg, unsigned long *base,
447 unsigned long *size, mtrr_type *type)
449 unsigned long dummy, mask_lo, base_lo;
451 rdmsr (MTRRphysMask_MSR(reg), mask_lo, dummy);
452 if ((mask_lo & 0x800) == 0) {
453 /* Invalid (i.e. free) range. */
454 *base = 0;
455 *size = 0;
456 *type = 0;
457 return;
460 rdmsr(MTRRphysBase_MSR(reg), base_lo, dummy);
462 /* We ignore the extra address bits (32-35). If someone wants to
463 run x86 Linux on a machine with >4GB memory, this will be the
464 least of their problems. */
466 /* Clean up mask_lo so it gives the real address mask. */
467 mask_lo = (mask_lo & 0xfffff000UL);
468 /* This works correctly if size is a power of two, i.e. a
469 contiguous range. */
470 *size = ~(mask_lo - 1);
472 *base = (base_lo & 0xfffff000UL);
473 *type = (base_lo & 0xff);
474 } /* End Function intel_get_mtrr */
476 static void cyrix_get_arr (unsigned int reg, unsigned long *base,
477 unsigned long *size, mtrr_type *type)
479 unsigned long flags;
480 unsigned char arr, ccr3, rcr, shift;
482 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
484 /* Save flags and disable interrupts */
485 __save_flags (flags); __cli ();
487 ccr3 = getCx86 (CX86_CCR3);
488 setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
489 ((unsigned char *) base)[3] = getCx86 (arr);
490 ((unsigned char *) base)[2] = getCx86 (arr+1);
491 ((unsigned char *) base)[1] = getCx86 (arr+2);
492 rcr = getCx86(CX86_RCR_BASE + reg);
493 setCx86 (CX86_CCR3, ccr3); /* disable MAPEN */
495 /* Enable interrupts if it was enabled previously */
496 __restore_flags (flags);
497 shift = ((unsigned char *) base)[1] & 0x0f;
498 *base &= 0xfffff000UL;
500 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
501 * Note: shift==0xf means 4G, this is unsupported.
503 if (shift)
504 *size = (reg < 7 ? 0x800UL : 0x20000UL) << shift;
505 else
506 *size = 0;
508 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
509 if (reg < 7) {
510 switch (rcr) {
511 case 1: *type = MTRR_TYPE_UNCACHABLE; break;
512 case 8: *type = MTRR_TYPE_WRBACK; break;
513 case 9: *type = MTRR_TYPE_WRCOMB; break;
514 case 24:
515 default: *type = MTRR_TYPE_WRTHROUGH; break;
517 } else {
518 switch (rcr) {
519 case 0: *type = MTRR_TYPE_UNCACHABLE; break;
520 case 8: *type = MTRR_TYPE_WRCOMB; break;
521 case 9: *type = MTRR_TYPE_WRBACK; break;
522 case 25:
523 default: *type = MTRR_TYPE_WRTHROUGH; break;
526 } /* End Function cyrix_get_arr */
528 static void amd_get_mtrr (unsigned int reg, unsigned long *base,
529 unsigned long *size, mtrr_type *type)
531 unsigned long low, high;
533 rdmsr (0xC0000085, low, high);
534 /* Upper dword is region 1, lower is region 0 */
535 if (reg == 1) low = high;
536 /* The base masks off on the right alignment */
537 *base = low & 0xFFFE0000;
538 *type = 0;
539 if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
540 if (low & 2) *type = MTRR_TYPE_WRCOMB;
541 if ( !(low & 3) )
543 *size = 0;
544 return;
547 * This needs a little explaining. The size is stored as an
548 * inverted mask of bits of 128K granularity 15 bits long offset
549 * 2 bits
551 * So to get a size we do invert the mask and add 1 to the lowest
552 * mask bit (4 as its 2 bits in). This gives us a size we then shift
553 * to turn into 128K blocks
555 * eg 111 1111 1111 1100 is 512K
557 * invert 000 0000 0000 0011
558 * +1 000 0000 0000 0100
559 * *128K ...
561 low = (~low) & 0x1FFFC;
562 *size = (low + 4) << 15;
563 return;
564 } /* End Function amd_get_mtrr */
566 static struct
568 unsigned long high;
569 unsigned long low;
570 } centaur_mcr[8];
572 static void centaur_get_mcr (unsigned int reg, unsigned long *base,
573 unsigned long *size, mtrr_type *type)
575 *base = centaur_mcr[reg].high & 0xfffff000;
576 *size = (~(centaur_mcr[reg].low & 0xfffff000))+1;
577 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
578 } /* End Function centaur_get_mcr */
580 static void (*get_mtrr) (unsigned int reg, unsigned long *base,
581 unsigned long *size, mtrr_type *type) = NULL;
583 static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
584 unsigned long size, mtrr_type type, int do_safe)
585 /* [SUMMARY] Set variable MTRR register on the local CPU.
586 <reg> The register to set.
587 <base> The base address of the region.
588 <size> The size of the region. If this is 0 the region is disabled.
589 <type> The type of the region.
590 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
591 be done externally.
592 [RETURNS] Nothing.
595 struct set_mtrr_context ctxt;
597 if (do_safe) set_mtrr_prepare (&ctxt);
598 if (size == 0)
600 /* The invalid bit is kept in the mask, so we simply clear the
601 relevant mask register to disable a range. */
602 wrmsr (MTRRphysMask_MSR (reg), 0, 0);
604 else
606 wrmsr (MTRRphysBase_MSR (reg), base | type, 0);
607 wrmsr (MTRRphysMask_MSR (reg), ~(size - 1) | 0x800, 0);
609 if (do_safe) set_mtrr_done (&ctxt);
610 } /* End Function intel_set_mtrr_up */
612 static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
613 unsigned long size, mtrr_type type, int do_safe)
615 struct set_mtrr_context ctxt;
616 unsigned char arr, arr_type, arr_size;
618 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
620 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
621 size >>= (reg < 7 ? 12 : 18);
622 size &= 0x7fff; /* make sure arr_size <= 14 */
623 for(arr_size = 0; size; arr_size++, size >>= 1);
625 if (reg<7) {
626 switch (type) {
627 case MTRR_TYPE_UNCACHABLE: arr_type = 1; break;
628 case MTRR_TYPE_WRCOMB: arr_type = 9; break;
629 case MTRR_TYPE_WRTHROUGH: arr_type = 24; break;
630 default: arr_type = 8; break;
632 } else {
633 switch (type) {
634 case MTRR_TYPE_UNCACHABLE: arr_type = 0; break;
635 case MTRR_TYPE_WRCOMB: arr_type = 8; break;
636 case MTRR_TYPE_WRTHROUGH: arr_type = 25; break;
637 default: arr_type = 9; break;
641 if (do_safe) set_mtrr_prepare (&ctxt);
642 setCx86(arr, ((unsigned char *) &base)[3]);
643 setCx86(arr+1, ((unsigned char *) &base)[2]);
644 setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
645 setCx86(CX86_RCR_BASE + reg, arr_type);
646 if (do_safe) set_mtrr_done (&ctxt);
647 } /* End Function cyrix_set_arr_up */
649 static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
650 unsigned long size, mtrr_type type, int do_safe)
651 /* [SUMMARY] Set variable MTRR register on the local CPU.
652 <reg> The register to set.
653 <base> The base address of the region.
654 <size> The size of the region. If this is 0 the region is disabled.
655 <type> The type of the region.
656 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
657 be done externally.
658 [RETURNS] Nothing.
661 u32 low, high;
662 struct set_mtrr_context ctxt;
664 if (do_safe) set_mtrr_prepare (&ctxt);
666 * Low is MTRR0 , High MTRR 1
668 rdmsr (0xC0000085, low, high);
670 * Blank to disable
672 if (size == 0)
673 *(reg ? &high : &low) = 0;
674 else
675 /* Set the register to the base (already shifted for us), the
676 type (off by one) and an inverted bitmask of the size
677 The size is the only odd bit. We are fed say 512K
678 We invert this and we get 111 1111 1111 1011 but
679 if you subtract one and invert you get the desired
680 111 1111 1111 1100 mask
682 *(reg ? &high : &low)=(((~(size-1))>>15)&0x0001FFFC)|base|(type+1);
684 * The writeback rule is quite specific. See the manual. Its
685 * disable local interrupts, write back the cache, set the mtrr
687 __asm__ __volatile__ ("wbinvd" : : : "memory");
688 wrmsr (0xC0000085, low, high);
689 if (do_safe) set_mtrr_done (&ctxt);
690 } /* End Function amd_set_mtrr_up */
693 static void centaur_set_mcr_up (unsigned int reg, unsigned long base,
694 unsigned long size, mtrr_type type,
695 int do_safe)
697 struct set_mtrr_context ctxt;
698 unsigned long low, high;
700 if (do_safe) set_mtrr_prepare( &ctxt );
701 if (size == 0)
703 /* Disable */
704 high = low = 0;
706 else
708 high = base & 0xfffff000; /* base works on 4K pages... */
709 low = ((~(size-1))&0xfffff000);
710 low |= 0x1f; /* only support write-combining... */
712 centaur_mcr[reg].high = high;
713 centaur_mcr[reg].low = low;
714 wrmsr (0x110 + reg, low, high);
715 if (do_safe) set_mtrr_done( &ctxt );
716 } /* End Function centaur_set_mtrr_up */
718 static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
719 unsigned long size, mtrr_type type,
720 int do_safe) = NULL;
722 #ifdef __SMP__
724 struct mtrr_var_range
726 unsigned long base_lo;
727 unsigned long base_hi;
728 unsigned long mask_lo;
729 unsigned long mask_hi;
733 /* Get the MSR pair relating to a var range */
734 __initfunc(static void get_mtrr_var_range (unsigned int index,
735 struct mtrr_var_range *vr))
737 rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
738 rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
739 } /* End Function get_mtrr_var_range */
742 /* Set the MSR pair relating to a var range. Returns TRUE if
743 changes are made */
744 __initfunc(static int set_mtrr_var_range_testing (unsigned int index,
745 struct mtrr_var_range *vr))
747 unsigned int lo, hi;
748 int changed = FALSE;
750 rdmsr(MTRRphysBase_MSR(index), lo, hi);
751 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
752 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
753 wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
754 changed = TRUE;
757 rdmsr(MTRRphysMask_MSR(index), lo, hi);
759 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
760 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
761 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
762 changed = TRUE;
764 return changed;
765 } /* End Function set_mtrr_var_range_testing */
767 __initfunc(static void get_fixed_ranges(mtrr_type *frs))
769 unsigned long *p = (unsigned long *)frs;
770 int i;
772 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
774 for (i = 0; i < 2; i++)
775 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
776 for (i = 0; i < 8; i++)
777 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
778 } /* End Function get_fixed_ranges */
780 __initfunc(static int set_fixed_ranges_testing(mtrr_type *frs))
782 unsigned long *p = (unsigned long *)frs;
783 int changed = FALSE;
784 int i;
785 unsigned long lo, hi;
787 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
788 if (p[0] != lo || p[1] != hi) {
789 wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
790 changed = TRUE;
793 for (i = 0; i < 2; i++) {
794 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
795 if (p[2 + i*2] != lo || p[3 + i*2] != hi) {
796 wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
797 changed = TRUE;
801 for (i = 0; i < 8; i++) {
802 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
803 if (p[6 + i*2] != lo || p[7 + i*2] != hi) {
804 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
805 changed = TRUE;
808 return changed;
809 } /* End Function set_fixed_ranges_testing */
811 struct mtrr_state
813 unsigned int num_var_ranges;
814 struct mtrr_var_range *var_ranges;
815 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
816 unsigned char enabled;
817 mtrr_type def_type;
821 /* Grab all of the MTRR state for this CPU into *state */
822 __initfunc(static void get_mtrr_state(struct mtrr_state *state))
824 unsigned int nvrs, i;
825 struct mtrr_var_range *vrs;
826 unsigned long lo, dummy;
828 nvrs = state->num_var_ranges = get_num_var_ranges();
829 vrs = state->var_ranges
830 = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
831 if (vrs == NULL)
832 nvrs = state->num_var_ranges = 0;
834 for (i = 0; i < nvrs; i++)
835 get_mtrr_var_range (i, &vrs[i]);
836 get_fixed_ranges (state->fixed_ranges);
838 rdmsr (MTRRdefType_MSR, lo, dummy);
839 state->def_type = (lo & 0xff);
840 state->enabled = (lo & 0xc00) >> 10;
841 } /* End Function get_mtrr_state */
844 /* Free resources associated with a struct mtrr_state */
845 __initfunc(static void finalize_mtrr_state(struct mtrr_state *state))
847 if (state->var_ranges) kfree (state->var_ranges);
848 } /* End Function finalize_mtrr_state */
851 __initfunc(static unsigned long set_mtrr_state (struct mtrr_state *state,
852 struct set_mtrr_context *ctxt))
853 /* [SUMMARY] Set the MTRR state for this CPU.
854 <state> The MTRR state information to read.
855 <ctxt> Some relevant CPU context.
856 [NOTE] The CPU must already be in a safe state for MTRR changes.
857 [RETURNS] 0 if no changes made, else a mask indication what was changed.
860 unsigned int i;
861 unsigned long change_mask = 0;
863 for (i = 0; i < state->num_var_ranges; i++)
864 if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
865 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
867 if ( set_fixed_ranges_testing(state->fixed_ranges) )
868 change_mask |= MTRR_CHANGE_MASK_FIXED;
869 /* Set_mtrr_restore restores the old value of MTRRdefType,
870 so to set it we fiddle with the saved value */
871 if ((ctxt->deftype_lo & 0xff) != state->def_type
872 || ((ctxt->deftype_lo & 0xc00) >> 10) != state->enabled)
874 ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
875 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
878 return change_mask;
879 } /* End Function set_mtrr_state */
882 static atomic_t undone_count;
883 static volatile int wait_barrier_execute = FALSE;
884 static volatile int wait_barrier_cache_enable = FALSE;
886 struct set_mtrr_data
888 unsigned long smp_base;
889 unsigned long smp_size;
890 unsigned int smp_reg;
891 mtrr_type smp_type;
894 static void ipi_handler (void *info)
895 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
896 [RETURNS] Nothing.
899 struct set_mtrr_data *data = info;
900 struct set_mtrr_context ctxt;
902 set_mtrr_prepare (&ctxt);
903 /* Notify master that I've flushed and disabled my cache */
904 atomic_dec (&undone_count);
905 while (wait_barrier_execute) barrier ();
906 /* The master has cleared me to execute */
907 (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
908 data->smp_type, FALSE);
909 /* Notify master CPU that I've executed the function */
910 atomic_dec (&undone_count);
911 /* Wait for master to clear me to enable cache and return */
912 while (wait_barrier_cache_enable) barrier ();
913 set_mtrr_done (&ctxt);
914 } /* End Function ipi_handler */
916 static void set_mtrr_smp (unsigned int reg, unsigned long base,
917 unsigned long size, mtrr_type type)
919 struct set_mtrr_data data;
920 struct set_mtrr_context ctxt;
922 data.smp_reg = reg;
923 data.smp_base = base;
924 data.smp_size = size;
925 data.smp_type = type;
926 wait_barrier_execute = TRUE;
927 wait_barrier_cache_enable = TRUE;
928 atomic_set (&undone_count, smp_num_cpus - 1);
929 /* Flush and disable the local CPU's cache and start the ball rolling on
930 other CPUs */
931 set_mtrr_prepare (&ctxt);
932 if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
933 panic ("mtrr: timed out waiting for other CPUs\n");
934 /* Wait for all other CPUs to flush and disable their caches */
935 while (atomic_read (&undone_count) > 0) barrier ();
936 /* Set up for completion wait and then release other CPUs to change MTRRs*/
937 atomic_set (&undone_count, smp_num_cpus - 1);
938 wait_barrier_execute = FALSE;
939 (*set_mtrr_up) (reg, base, size, type, FALSE);
940 /* Now wait for other CPUs to complete the function */
941 while (atomic_read (&undone_count) > 0) barrier ();
942 /* Now all CPUs should have finished the function. Release the barrier to
943 allow them to re-enable their caches and return from their interrupt,
944 then enable the local cache and return */
945 wait_barrier_cache_enable = FALSE;
946 set_mtrr_done (&ctxt);
947 } /* End Function set_mtrr_smp */
950 /* Some BIOS's are fucked and don't set all MTRRs the same! */
951 __initfunc(static void mtrr_state_warn (unsigned long mask))
953 if (!mask) return;
954 if (mask & MTRR_CHANGE_MASK_FIXED)
955 printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
956 if (mask & MTRR_CHANGE_MASK_VARIABLE)
957 printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
958 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
959 printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
960 printk ("mtrr: probably your BIOS does not setup all CPUs\n");
961 } /* End Function mtrr_state_warn */
963 #endif /* __SMP__ */
965 static char *attrib_to_str (int x)
967 return (x <= 6) ? mtrr_strings[x] : "?";
968 } /* End Function attrib_to_str */
970 static void init_table (void)
972 int i, max;
974 max = get_num_var_ranges ();
975 if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
976 == NULL )
978 printk ("mtrr: could not allocate\n");
979 return;
981 for (i = 0; i < max; i++) usage_table[i] = 1;
982 #ifdef CONFIG_PROC_FS
983 if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
985 printk ("mtrr: could not allocate\n");
986 return;
988 ascii_buf_bytes = 0;
989 compute_ascii ();
990 #endif
991 } /* End Function init_table */
993 static int generic_get_free_region (unsigned long base, unsigned long size)
994 /* [SUMMARY] Get a free MTRR.
995 <base> The starting (base) address of the region.
996 <size> The size (in bytes) of the region.
997 [RETURNS] The index of the region on success, else -1 on error.
1000 int i, max;
1001 mtrr_type ltype;
1002 unsigned long lbase, lsize;
1004 max = get_num_var_ranges ();
1005 for (i = 0; i < max; ++i)
1007 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1008 if (lsize < 1) return i;
1010 return -ENOSPC;
1011 } /* End Function generic_get_free_region */
1013 static int cyrix_get_free_region (unsigned long base, unsigned long size)
1014 /* [SUMMARY] Get a free ARR.
1015 <base> The starting (base) address of the region.
1016 <size> The size (in bytes) of the region.
1017 [RETURNS] The index of the region on success, else -1 on error.
1020 int i;
1021 mtrr_type ltype;
1022 unsigned long lbase, lsize;
1024 /* If we are to set up a region >32M then look at ARR7 immediately */
1025 if (size > 0x2000000UL) {
1026 cyrix_get_arr (7, &lbase, &lsize, &ltype);
1027 if (lsize < 1) return 7;
1028 /* else try ARR0-ARR6 first */
1029 } else {
1030 for (i = 0; i < 7; i++)
1032 cyrix_get_arr (i, &lbase, &lsize, &ltype);
1033 if (lsize < 1) return i;
1035 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
1036 cyrix_get_arr (i, &lbase, &lsize, &ltype);
1037 if ((lsize < 1) && (size >= 0x40000)) return i;
1039 return -ENOSPC;
1040 } /* End Function cyrix_get_free_region */
1042 static int (*get_free_region) (unsigned long base,
1043 unsigned long size) = generic_get_free_region;
1045 int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
1046 char increment)
1047 /* [SUMMARY] Add an MTRR entry.
1048 <base> The starting (base) address of the region.
1049 <size> The size (in bytes) of the region.
1050 <type> The type of the new region.
1051 <increment> If true and the region already exists, the usage count will be
1052 incremented.
1053 [RETURNS] The MTRR register on success, else a negative number indicating
1054 the error code.
1055 [NOTE] This routine uses a spinlock.
1058 int i, max;
1059 mtrr_type ltype;
1060 unsigned long lbase, lsize, last;
1062 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
1063 switch (boot_cpu_data.x86_vendor)
1065 case X86_VENDOR_INTEL:
1066 /* For Intel PPro stepping <= 7, must be 4 MiB aligned */
1067 if ( (boot_cpu_data.x86 == 6) && (boot_cpu_data.x86_model == 1) &&
1068 (boot_cpu_data.x86_mask <= 7) && ( base & ( (1 << 22) - 1 ) ) )
1070 printk ("mtrr: base(0x%lx) is not 4 MiB aligned\n", base);
1071 return -EINVAL;
1073 /* Fall through */
1074 case X86_VENDOR_CYRIX:
1075 case X86_VENDOR_CENTAUR:
1076 if ( (base & 0xfff) || (size & 0xfff) )
1078 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1079 printk ("mtrr: size: %lx base: %lx\n", size, base);
1080 return -EINVAL;
1082 if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
1084 if (type != MTRR_TYPE_WRCOMB)
1086 printk ("mtrr: only write-combining is supported\n");
1087 return -EINVAL;
1090 else if (base + size < 0x100000)
1092 printk ("mtrr: cannot set region below 1 MiB (0x%lx,0x%lx)\n",
1093 base, size);
1094 return -EINVAL;
1096 /* Check upper bits of base and last are equal and lower bits are 0
1097 for base and 1 for last */
1098 last = base + size - 1;
1099 for (lbase = base; !(lbase & 1) && (last & 1);
1100 lbase = lbase >> 1, last = last >> 1);
1101 if (lbase != last)
1103 printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n",
1104 base, size);
1105 return -EINVAL;
1107 break;
1108 case X86_VENDOR_AMD:
1109 /* Apply the K6 block alignment and size rules
1110 In order
1111 o Uncached or gathering only
1112 o 128K or bigger block
1113 o Power of 2 block
1114 o base suitably aligned to the power
1116 if (type > MTRR_TYPE_WRCOMB || size < (1 << 17) ||
1117 (size & ~(size-1))-size || (base & (size-1)))
1118 return -EINVAL;
1119 break;
1120 default:
1121 return -EINVAL;
1122 /*break;*/
1124 if (type >= MTRR_NUM_TYPES)
1126 printk ("mtrr: type: %u illegal\n", type);
1127 return -EINVAL;
1129 /* If the type is WC, check that this processor supports it */
1130 if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
1132 printk ("mtrr: your processor doesn't support write-combining\n");
1133 return -ENOSYS;
1135 increment = increment ? 1 : 0;
1136 max = get_num_var_ranges ();
1137 /* Search for existing MTRR */
1138 spin_lock_reschedule (&main_lock);
1139 for (i = 0; i < max; ++i)
1141 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1142 if (base >= lbase + lsize) continue;
1143 if ( (base < lbase) && (base + size <= lbase) ) continue;
1144 /* At this point we know there is some kind of overlap/enclosure */
1145 if ( (base < lbase) || (base + size > lbase + lsize) )
1147 spin_unlock (&main_lock);
1148 printk ("mtrr: 0x%lx,0x%lx overlaps existing 0x%lx,0x%lx\n",
1149 base, size, lbase, lsize);
1150 return -EINVAL;
1152 /* New region is enclosed by an existing region */
1153 if (ltype != type)
1155 if (type == MTRR_TYPE_UNCACHABLE) continue;
1156 spin_unlock (&main_lock);
1157 printk ( "mtrr: type mismatch for %lx,%lx old: %s new: %s\n",
1158 base, size, attrib_to_str (ltype), attrib_to_str (type) );
1159 return -EINVAL;
1161 if (increment) ++usage_table[i];
1162 compute_ascii ();
1163 spin_unlock (&main_lock);
1164 return i;
1166 /* Search for an empty MTRR */
1167 i = (*get_free_region) (base, size);
1168 if (i < 0)
1170 spin_unlock (&main_lock);
1171 printk ("mtrr: no more MTRRs available\n");
1172 return i;
1174 set_mtrr (i, base, size, type);
1175 usage_table[i] = 1;
1176 compute_ascii ();
1177 spin_unlock (&main_lock);
1178 return i;
1179 } /* End Function mtrr_add */
1181 int mtrr_del (int reg, unsigned long base, unsigned long size)
1182 /* [SUMMARY] Delete MTRR/decrement usage count.
1183 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1184 be supplied.
1185 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1186 <size> The size of the region. This is ignored if <<reg>> is >= 0.
1187 [RETURNS] The register on success, else a negative number indicating
1188 the error code.
1189 [NOTE] This routine uses a spinlock.
1192 int i, max;
1193 mtrr_type ltype;
1194 unsigned long lbase, lsize;
1196 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
1197 max = get_num_var_ranges ();
1198 spin_lock_reschedule (&main_lock);
1199 if (reg < 0)
1201 /* Search for existing MTRR */
1202 for (i = 0; i < max; ++i)
1204 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1205 if ( (lbase == base) && (lsize == size) )
1207 reg = i;
1208 break;
1211 if (reg < 0)
1213 spin_unlock (&main_lock);
1214 printk ("mtrr: no MTRR for %lx,%lx found\n", base, size);
1215 return -EINVAL;
1218 if (reg >= max)
1220 spin_unlock (&main_lock);
1221 printk ("mtrr: register: %d too big\n", reg);
1222 return -EINVAL;
1224 (*get_mtrr) (reg, &lbase, &lsize, &ltype);
1225 if (lsize < 1)
1227 spin_unlock (&main_lock);
1228 printk ("mtrr: MTRR %d not used\n", reg);
1229 return -EINVAL;
1231 if (usage_table[reg] < 1)
1233 spin_unlock (&main_lock);
1234 printk ("mtrr: reg: %d has count=0\n", reg);
1235 return -EINVAL;
1237 if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
1238 compute_ascii ();
1239 spin_unlock (&main_lock);
1240 return reg;
1241 } /* End Function mtrr_del */
1243 #ifdef CONFIG_PROC_FS
1245 static int mtrr_file_add (unsigned long base, unsigned long size,
1246 unsigned int type, char increment, struct file *file)
1248 int reg, max;
1249 unsigned int *fcount = file->private_data;
1251 max = get_num_var_ranges ();
1252 if (fcount == NULL)
1254 if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
1256 printk ("mtrr: could not allocate\n");
1257 return -ENOMEM;
1259 memset (fcount, 0, max * sizeof *fcount);
1260 file->private_data = fcount;
1262 reg = mtrr_add (base, size, type, 1);
1263 if (reg >= 0) ++fcount[reg];
1264 return reg;
1265 } /* End Function mtrr_file_add */
1267 static int mtrr_file_del (unsigned long base, unsigned long size,
1268 struct file *file)
1270 int reg;
1271 unsigned int *fcount = file->private_data;
1273 reg = mtrr_del (-1, base, size);
1274 if (reg < 0) return reg;
1275 if (fcount == NULL) return reg;
1276 if (fcount[reg] < 1) return -EINVAL;
1277 --fcount[reg];
1278 return reg;
1279 } /* End Function mtrr_file_del */
1281 static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
1282 loff_t *ppos)
1284 if (*ppos >= ascii_buf_bytes) return 0;
1285 if (*ppos + len > ascii_buf_bytes) len = ascii_buf_bytes - *ppos;
1286 if ( copy_to_user (buf, ascii_buffer + *ppos, len) ) return -EFAULT;
1287 *ppos += len;
1288 return len;
1289 } /* End Function mtrr_read */
1291 static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
1292 loff_t *ppos)
1293 /* Format of control line:
1294 "base=%lx size=%lx type=%s" OR:
1295 "disable=%d"
1298 int i, err;
1299 unsigned long reg, base, size;
1300 char *ptr;
1301 char line[LINE_SIZE];
1303 if ( !suser () ) return -EPERM;
1304 /* Can't seek (pwrite) on this device */
1305 if (ppos != &file->f_pos) return -ESPIPE;
1306 memset (line, 0, LINE_SIZE);
1307 if (len > LINE_SIZE) len = LINE_SIZE;
1308 if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
1309 ptr = line + strlen (line) - 1;
1310 if (*ptr == '\n') *ptr = '\0';
1311 if ( !strncmp (line, "disable=", 8) )
1313 reg = simple_strtoul (line + 8, &ptr, 0);
1314 err = mtrr_del (reg, 0, 0);
1315 if (err < 0) return err;
1316 return len;
1318 if ( strncmp (line, "base=", 5) )
1320 printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
1321 return -EINVAL;
1323 base = simple_strtoul (line + 5, &ptr, 0);
1324 for (; isspace (*ptr); ++ptr);
1325 if ( strncmp (ptr, "size=", 5) )
1327 printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
1328 return -EINVAL;
1330 size = simple_strtoul (ptr + 5, &ptr, 0);
1331 for (; isspace (*ptr); ++ptr);
1332 if ( strncmp (ptr, "type=", 5) )
1334 printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
1335 return -EINVAL;
1337 ptr += 5;
1338 for (; isspace (*ptr); ++ptr);
1339 for (i = 0; i < MTRR_NUM_TYPES; ++i)
1341 if ( strcmp (ptr, mtrr_strings[i]) ) continue;
1342 err = mtrr_add (base, size, i, 1);
1343 if (err < 0) return err;
1344 return len;
1346 printk ("mtrr: illegal type: \"%s\"\n", ptr);
1347 return -EINVAL;
1348 } /* End Function mtrr_write */
1350 static int mtrr_ioctl (struct inode *inode, struct file *file,
1351 unsigned int cmd, unsigned long arg)
1353 int err;
1354 mtrr_type type;
1355 struct mtrr_sentry sentry;
1356 struct mtrr_gentry gentry;
1358 switch (cmd)
1360 default:
1361 return -ENOIOCTLCMD;
1362 case MTRRIOC_ADD_ENTRY:
1363 if ( !suser () ) return -EPERM;
1364 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1365 return -EFAULT;
1366 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file);
1367 if (err < 0) return err;
1368 break;
1369 case MTRRIOC_SET_ENTRY:
1370 if ( !suser () ) return -EPERM;
1371 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1372 return -EFAULT;
1373 err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1374 if (err < 0) return err;
1375 break;
1376 case MTRRIOC_DEL_ENTRY:
1377 if ( !suser () ) return -EPERM;
1378 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1379 return -EFAULT;
1380 err = mtrr_file_del (sentry.base, sentry.size, file);
1381 if (err < 0) return err;
1382 break;
1383 case MTRRIOC_KILL_ENTRY:
1384 if ( !suser () ) return -EPERM;
1385 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1386 return -EFAULT;
1387 err = mtrr_del (-1, sentry.base, sentry.size);
1388 if (err < 0) return err;
1389 break;
1390 case MTRRIOC_GET_ENTRY:
1391 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1392 return -EFAULT;
1393 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1394 (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1395 gentry.type = type;
1396 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1397 return -EFAULT;
1398 break;
1400 return 0;
1401 } /* End Function mtrr_ioctl */
1403 static int mtrr_open (struct inode *ino, struct file *filep)
1405 MOD_INC_USE_COUNT;
1406 return 0;
1407 } /* End Function mtrr_open */
1409 static int mtrr_close (struct inode *ino, struct file *file)
1411 int i, max;
1412 unsigned int *fcount = file->private_data;
1414 MOD_DEC_USE_COUNT;
1415 if (fcount == NULL) return 0;
1416 max = get_num_var_ranges ();
1417 for (i = 0; i < max; ++i)
1419 while (fcount[i] > 0)
1421 if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1422 --fcount[i];
1425 kfree (fcount);
1426 file->private_data = NULL;
1427 return 0;
1428 } /* End Function mtrr_close */
1430 static struct file_operations mtrr_fops =
1432 NULL, /* Seek */
1433 mtrr_read, /* Read */
1434 mtrr_write, /* Write */
1435 NULL, /* Readdir */
1436 NULL, /* Poll */
1437 mtrr_ioctl, /* IOctl */
1438 NULL, /* MMAP */
1439 mtrr_open, /* Open */
1440 NULL, /* Flush */
1441 mtrr_close, /* Release */
1442 NULL, /* Fsync */
1443 NULL, /* Fasync */
1444 NULL, /* CheckMediaChange */
1445 NULL, /* Revalidate */
1446 NULL, /* Lock */
1449 static struct inode_operations proc_mtrr_inode_operations = {
1450 &mtrr_fops, /* default property file-ops */
1451 NULL, /* create */
1452 NULL, /* lookup */
1453 NULL, /* link */
1454 NULL, /* unlink */
1455 NULL, /* symlink */
1456 NULL, /* mkdir */
1457 NULL, /* rmdir */
1458 NULL, /* mknod */
1459 NULL, /* rename */
1460 NULL, /* readlink */
1461 NULL, /* follow_link */
1462 NULL, /* get_block */
1463 NULL, /* readpage */
1464 NULL, /* writepage */
1465 NULL, /* flushpage */
1466 NULL, /* truncate */
1467 NULL, /* permission */
1468 NULL, /* smap */
1469 NULL /* revalidate */
1472 static struct proc_dir_entry proc_root_mtrr = {
1473 PROC_MTRR, 4, "mtrr",
1474 S_IFREG | S_IWUSR | S_IRUGO, 1, 0, 0,
1475 0, &proc_mtrr_inode_operations
1478 static void compute_ascii (void)
1480 char factor;
1481 int i, max;
1482 mtrr_type type;
1483 unsigned long base, size;
1485 ascii_buf_bytes = 0;
1486 max = get_num_var_ranges ();
1487 for (i = 0; i < max; i++)
1489 (*get_mtrr) (i, &base, &size, &type);
1490 if (size < 1) usage_table[i] = 0;
1491 else
1493 if (size < 0x100000)
1495 /* 1MB */
1496 factor = 'k';
1497 size >>= 10;
1499 else
1501 factor = 'M';
1502 size >>= 20;
1504 sprintf
1505 (ascii_buffer + ascii_buf_bytes,
1506 "reg%02i: base=0x%08lx (%4liMB), size=%4li%cB: %s, count=%d\n",
1507 i, base, base>>20, size, factor,
1508 attrib_to_str (type), usage_table[i]);
1509 ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1512 proc_root_mtrr.size = ascii_buf_bytes;
1513 } /* End Function compute_ascii */
1515 #endif /* CONFIG_PROC_FS */
1517 EXPORT_SYMBOL(mtrr_add);
1518 EXPORT_SYMBOL(mtrr_del);
1520 #ifdef __SMP__
1522 typedef struct {
1523 unsigned long base;
1524 unsigned long size;
1525 mtrr_type type;
1526 } arr_state_t;
1528 arr_state_t arr_state[8] __initdata = {
1529 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
1530 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
1533 unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
1535 __initfunc(static void cyrix_arr_init_secondary(void))
1537 struct set_mtrr_context ctxt;
1538 int i;
1540 set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
1542 /* the CCRs are not contiguous */
1543 for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
1544 for( ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
1545 for(i=0; i<8; i++)
1546 cyrix_set_arr_up(i,
1547 arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
1549 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1550 } /* End Function cyrix_arr_init_secondary */
1552 #endif
1555 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
1556 * with the SMM (System Management Mode) mode. So we need the following:
1557 * Check whether SMI_LOCK (CCR3 bit 0) is set
1558 * if it is set, write a warning message: ARR3 cannot be changed!
1559 * (it cannot be changed until the next processor reset)
1560 * if it is reset, then we can change it, set all the needed bits:
1561 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
1562 * - disable access to SMM memory (CCR1 bit 2 reset)
1563 * - disable SMM mode (CCR1 bit 1 reset)
1564 * - disable write protection of ARR3 (CCR6 bit 1 reset)
1565 * - (maybe) disable ARR3
1566 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
1568 __initfunc(static void cyrix_arr_init(void))
1570 struct set_mtrr_context ctxt;
1571 unsigned char ccr[7];
1572 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
1573 #ifdef __SMP__
1574 int i;
1575 #endif
1577 set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
1579 /* Save all CCRs locally */
1580 ccr[0] = getCx86 (CX86_CCR0);
1581 ccr[1] = getCx86 (CX86_CCR1);
1582 ccr[2] = getCx86 (CX86_CCR2);
1583 ccr[3] = ctxt.ccr3;
1584 ccr[4] = getCx86 (CX86_CCR4);
1585 ccr[5] = getCx86 (CX86_CCR5);
1586 ccr[6] = getCx86 (CX86_CCR6);
1588 if (ccr[3] & 1)
1589 ccrc[3] = 1;
1590 else {
1591 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
1592 * access to SMM memory through ARR3 (bit 7).
1595 if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
1596 if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
1597 if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
1599 if (ccr[6] & 0x02) {
1600 ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3. */
1601 setCx86 (CX86_CCR6, ccr[6]);
1603 /* Disable ARR3. */
1604 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
1606 /* If we changed CCR1 in memory, change it in the processor, too. */
1607 if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
1609 /* Enable ARR usage by the processor */
1610 if (!(ccr[5] & 0x20)) {
1611 ccr[5] |= 0x20; ccrc[5] = 1;
1612 setCx86 (CX86_CCR5, ccr[5]);
1615 #ifdef __SMP__
1616 for(i=0; i<7; i++) ccr_state[i] = ccr[i];
1617 for(i=0; i<8; i++)
1618 cyrix_get_arr(i,
1619 &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
1620 #endif
1622 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1624 if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
1625 if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
1627 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
1628 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
1629 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
1631 if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
1632 } /* End Function cyrix_arr_init */
1634 __initfunc(static void centaur_mcr_init (void))
1636 unsigned i;
1637 struct set_mtrr_context ctxt;
1639 set_mtrr_prepare (&ctxt);
1640 /* Unfortunately, MCR's are read-only, so there is no way to
1641 * find out what the bios might have done.
1643 /* Clear all MCR's.
1644 * This way we are sure that the centaur_mcr array contains the actual
1645 * values. The disadvantage is that any BIOS tweaks are thus undone.
1647 for (i = 0; i < 8; ++i)
1649 centaur_mcr[i].high = 0;
1650 centaur_mcr[i].low = 0;
1651 wrmsr (0x110 + i , 0, 0);
1653 /* Throw the main write-combining switch... */
1654 wrmsr (0x120, 0x01f0001f, 0);
1655 set_mtrr_done (&ctxt);
1656 } /* End Function centaur_mcr_init */
1658 __initfunc(static void mtrr_setup (void))
1660 printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
1661 switch (boot_cpu_data.x86_vendor)
1663 case X86_VENDOR_INTEL:
1664 get_mtrr = intel_get_mtrr;
1665 set_mtrr_up = intel_set_mtrr_up;
1666 break;
1667 case X86_VENDOR_CYRIX:
1668 get_mtrr = cyrix_get_arr;
1669 set_mtrr_up = cyrix_set_arr_up;
1670 get_free_region = cyrix_get_free_region;
1671 break;
1672 case X86_VENDOR_AMD:
1673 get_mtrr = amd_get_mtrr;
1674 set_mtrr_up = amd_set_mtrr_up;
1675 break;
1676 case X86_VENDOR_CENTAUR:
1677 get_mtrr = centaur_get_mcr;
1678 set_mtrr_up = centaur_set_mcr_up;
1679 break;
1681 } /* End Function mtrr_setup */
1683 #ifdef __SMP__
1685 static volatile unsigned long smp_changes_mask __initdata = 0;
1686 static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
1688 __initfunc(void mtrr_init_boot_cpu (void))
1690 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1691 mtrr_setup ();
1692 switch (boot_cpu_data.x86_vendor)
1694 case X86_VENDOR_INTEL:
1695 get_mtrr_state (&smp_mtrr_state);
1696 break;
1697 case X86_VENDOR_CYRIX:
1698 cyrix_arr_init ();
1699 break;
1700 case X86_VENDOR_CENTAUR:
1701 centaur_mcr_init ();
1702 break;
1704 } /* End Function mtrr_init_boot_cpu */
1706 __initfunc(static void intel_mtrr_init_secondary_cpu (void))
1708 unsigned long mask, count;
1709 struct set_mtrr_context ctxt;
1711 /* Note that this is not ideal, since the cache is only flushed/disabled
1712 for this CPU while the MTRRs are changed, but changing this requires
1713 more invasive changes to the way the kernel boots */
1714 set_mtrr_prepare (&ctxt);
1715 mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
1716 set_mtrr_done (&ctxt);
1717 /* Use the atomic bitops to update the global mask */
1718 for (count = 0; count < sizeof mask * 8; ++count)
1720 if (mask & 0x01) set_bit (count, &smp_changes_mask);
1721 mask >>= 1;
1723 } /* End Function intel_mtrr_init_secondary_cpu */
1725 __initfunc(void mtrr_init_secondary_cpu (void))
1727 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
1728 switch (boot_cpu_data.x86_vendor)
1730 case X86_VENDOR_INTEL:
1731 intel_mtrr_init_secondary_cpu ();
1732 break;
1733 case X86_VENDOR_CYRIX:
1734 /* This is _completely theoretical_!
1735 * I assume here that one day Cyrix will support Intel APIC.
1736 * In reality on non-Intel CPUs we won't even get to this routine.
1737 * Hopefully no one will plug two Cyrix processors in a dual P5 board.
1738 * :-)
1740 cyrix_arr_init_secondary ();
1741 break;
1742 default:
1743 printk ("mtrr: SMP support incomplete for this vendor\n");
1744 break;
1746 } /* End Function mtrr_init_secondary_cpu */
1747 #endif /* __SMP__ */
1749 __initfunc(int mtrr_init(void))
1751 if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return 0;
1752 # ifdef __SMP__
1753 switch (boot_cpu_data.x86_vendor)
1755 case X86_VENDOR_INTEL:
1756 finalize_mtrr_state (&smp_mtrr_state);
1757 mtrr_state_warn (smp_changes_mask);
1758 break;
1760 # else /* __SMP__ */
1761 mtrr_setup ();
1762 switch (boot_cpu_data.x86_vendor)
1764 case X86_VENDOR_CYRIX:
1765 cyrix_arr_init ();
1766 break;
1767 case X86_VENDOR_CENTAUR:
1768 centaur_mcr_init ();
1769 break;
1771 # endif /* !__SMP__ */
1773 # ifdef CONFIG_PROC_FS
1774 proc_register (&proc_root, &proc_root_mtrr);
1775 # endif
1777 init_table ();
1778 return 0;
1779 } /* End Function mtrr_init */