- Stephen Rothwell: APM updates
[davej-history.git] / arch / i386 / kernel / mtrr.c
blob60764e3cd717d42e1b7b0ac325cc94425f284c3a
1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23 Source: "Pentium Pro Family Developer's Manual, Volume 3:
24 Operating System Writer's Guide" (Intel document number 242692),
25 section 11.11.7
27 ChangeLog
29 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
30 Initial register-setting code (from proform-1.0).
31 19971216 Richard Gooch <rgooch@atnf.csiro.au>
32 Original version for /proc/mtrr interface, SMP-safe.
33 v1.0
34 19971217 Richard Gooch <rgooch@atnf.csiro.au>
35 Bug fix for ioctls()'s.
36 Added sample code in Documentation/mtrr.txt
37 v1.1
38 19971218 Richard Gooch <rgooch@atnf.csiro.au>
39 Disallow overlapping regions.
40 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
41 Register-setting fixups.
42 v1.2
43 19971222 Richard Gooch <rgooch@atnf.csiro.au>
44 Fixups for kernel 2.1.75.
45 v1.3
46 19971229 David Wragg <dpw@doc.ic.ac.uk>
47 Register-setting fixups and conformity with Intel conventions.
48 19971229 Richard Gooch <rgooch@atnf.csiro.au>
49 Cosmetic changes and wrote this ChangeLog ;-)
50 19980106 Richard Gooch <rgooch@atnf.csiro.au>
51 Fixups for kernel 2.1.78.
52 v1.4
53 19980119 David Wragg <dpw@doc.ic.ac.uk>
54 Included passive-release enable code (elsewhere in PCI setup).
55 v1.5
56 19980131 Richard Gooch <rgooch@atnf.csiro.au>
57 Replaced global kernel lock with private spinlock.
58 v1.6
59 19980201 Richard Gooch <rgooch@atnf.csiro.au>
60 Added wait for other CPUs to complete changes.
61 v1.7
62 19980202 Richard Gooch <rgooch@atnf.csiro.au>
63 Bug fix in definition of <set_mtrr> for UP.
64 v1.8
65 19980319 Richard Gooch <rgooch@atnf.csiro.au>
66 Fixups for kernel 2.1.90.
67 19980323 Richard Gooch <rgooch@atnf.csiro.au>
68 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
69 v1.9
70 19980325 Richard Gooch <rgooch@atnf.csiro.au>
71 Fixed test for overlapping regions: confused by adjacent regions
72 19980326 Richard Gooch <rgooch@atnf.csiro.au>
73 Added wbinvd in <set_mtrr_prepare>.
74 19980401 Richard Gooch <rgooch@atnf.csiro.au>
75 Bug fix for non-SMP compilation.
76 19980418 David Wragg <dpw@doc.ic.ac.uk>
77 Fixed-MTRR synchronisation for SMP and use atomic operations
78 instead of spinlocks.
79 19980418 Richard Gooch <rgooch@atnf.csiro.au>
80 Differentiate different MTRR register classes for BIOS fixup.
81 v1.10
82 19980419 David Wragg <dpw@doc.ic.ac.uk>
83 Bug fix in variable MTRR synchronisation.
84 v1.11
85 19980419 Richard Gooch <rgooch@atnf.csiro.au>
86 Fixups for kernel 2.1.97.
87 v1.12
88 19980421 Richard Gooch <rgooch@atnf.csiro.au>
89 Safer synchronisation across CPUs when changing MTRRs.
90 v1.13
91 19980423 Richard Gooch <rgooch@atnf.csiro.au>
92 Bugfix for SMP systems without MTRR support.
93 v1.14
94 19980427 Richard Gooch <rgooch@atnf.csiro.au>
95 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
96 v1.15
97 19980427 Richard Gooch <rgooch@atnf.csiro.au>
98 Use atomic bitops for setting SMP change mask.
99 v1.16
100 19980428 Richard Gooch <rgooch@atnf.csiro.au>
101 Removed spurious diagnostic message.
102 v1.17
103 19980429 Richard Gooch <rgooch@atnf.csiro.au>
104 Moved register-setting macros into this file.
105 Moved setup code from init/main.c to i386-specific areas.
106 v1.18
107 19980502 Richard Gooch <rgooch@atnf.csiro.au>
108 Moved MTRR detection outside conditionals in <mtrr_init>.
109 v1.19
110 19980502 Richard Gooch <rgooch@atnf.csiro.au>
111 Documentation improvement: mention Pentium II and AGP.
112 v1.20
113 19980521 Richard Gooch <rgooch@atnf.csiro.au>
114 Only manipulate interrupt enable flag on local CPU.
115 Allow enclosed uncachable regions.
116 v1.21
117 19980611 Richard Gooch <rgooch@atnf.csiro.au>
118 Always define <main_lock>.
119 v1.22
120 19980901 Richard Gooch <rgooch@atnf.csiro.au>
121 Removed module support in order to tidy up code.
122 Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
123 Created addition queue for prior to SMP commence.
124 v1.23
125 19980902 Richard Gooch <rgooch@atnf.csiro.au>
126 Ported patch to kernel 2.1.120-pre3.
127 v1.24
128 19980910 Richard Gooch <rgooch@atnf.csiro.au>
129 Removed sanity checks and addition queue: Linus prefers an OOPS.
130 v1.25
131 19981001 Richard Gooch <rgooch@atnf.csiro.au>
132 Fixed harmless compiler warning in include/asm-i386/mtrr.h
133 Fixed version numbering and history for v1.23 -> v1.24.
134 v1.26
135 19990118 Richard Gooch <rgooch@atnf.csiro.au>
136 Added devfs support.
137 v1.27
138 19990123 Richard Gooch <rgooch@atnf.csiro.au>
139 Changed locking to spin with reschedule.
140 Made use of new <smp_call_function>.
141 v1.28
142 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu>
143 Extended the driver to be able to use Cyrix style ARRs.
144 19990204 Richard Gooch <rgooch@atnf.csiro.au>
145 Restructured Cyrix support.
146 v1.29
147 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu>
148 Refined ARR support: enable MAPEN in set_mtrr_prepare()
149 and disable MAPEN in set_mtrr_done().
150 19990205 Richard Gooch <rgooch@atnf.csiro.au>
151 Minor cleanups.
152 v1.30
153 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu>
154 Protect plain 6x86s (and other processors without the
155 Page Global Enable feature) against accessing CR4 in
156 set_mtrr_prepare() and set_mtrr_done().
157 19990210 Richard Gooch <rgooch@atnf.csiro.au>
158 Turned <set_mtrr_up> and <get_mtrr> into function pointers.
159 v1.31
160 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu>
161 Major rewrite of cyrix_arr_init(): do not touch ARRs,
162 leave them as the BIOS have set them up.
163 Enable usage of all 8 ARRs.
164 Avoid multiplications by 3 everywhere and other
165 code clean ups/speed ups.
166 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu>
167 Set up other Cyrix processors identical to the boot cpu.
168 Since Cyrix don't support Intel APIC, this is l'art pour l'art.
169 Weigh ARRs by size:
170 If size <= 32M is given, set up ARR# we were given.
171 If size > 32M is given, set up ARR7 only if it is free,
172 fail otherwise.
173 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu>
174 Also check for size >= 256K if we are to set up ARR7,
175 mtrr_add() returns the value it gets from set_mtrr()
176 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu>
177 Remove Cyrix "coma bug" workaround from here.
178 Moved to linux/arch/i386/kernel/setup.c and
179 linux/include/asm-i386/bugs.h
180 19990228 Richard Gooch <rgooch@atnf.csiro.au>
181 Added MTRRIOC_KILL_ENTRY ioctl(2)
182 Trap for counter underflow in <mtrr_file_del>.
183 Trap for 4 MiB aligned regions for PPro, stepping <= 7.
184 19990301 Richard Gooch <rgooch@atnf.csiro.au>
185 Created <get_free_region> hook.
186 19990305 Richard Gooch <rgooch@atnf.csiro.au>
187 Temporarily disable AMD support now MTRR capability flag is set.
188 v1.32
189 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu>
190 Adjust my changes (19990212-19990218) to Richard Gooch's
191 latest changes. (19990228-19990305)
192 v1.33
193 19990309 Richard Gooch <rgooch@atnf.csiro.au>
194 Fixed typo in <printk> message.
195 19990310 Richard Gooch <rgooch@atnf.csiro.au>
196 Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
197 v1.34
198 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
199 Support Centaur C6 MCR's.
200 19990512 Richard Gooch <rgooch@atnf.csiro.au>
201 Minor cleanups.
202 v1.35
203 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu>
204 Check whether ARR3 is protected in cyrix_get_free_region()
205 and mtrr_del(). The code won't attempt to delete or change it
206 from now on if the BIOS protected ARR3. It silently skips ARR3
207 in cyrix_get_free_region() or returns with an error code from
208 mtrr_del().
209 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu>
210 Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
211 if ARR3 isn't protected. This is needed because if SMM is active
212 and ARR3 isn't protected then deleting and setting ARR3 again
213 may lock up the processor. With SMM entirely disabled, it does
214 not happen.
215 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu>
216 Rearrange switch() statements so the driver accomodates to
217 the fact that the AMD Athlon handles its MTRRs the same way
218 as Intel does.
219 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu>
220 Double check for Intel in mtrr_add()'s big switch() because
221 that revision check is only valid for Intel CPUs.
222 19990819 Alan Cox <alan@redhat.com>
223 Tested Zoltan's changes on a pre production Athlon - 100%
224 success.
225 19991008 Manfred Spraul <manfreds@colorfullife.com>
226 replaced spin_lock_reschedule() with a normal semaphore.
227 v1.36
228 20000221 Richard Gooch <rgooch@atnf.csiro.au>
229 Compile fix if procfs and devfs not enabled.
230 Formatting changes.
231 v1.37
232 20001109 H. Peter Anvin <hpa@zytor.com>
233 Use the new centralized CPU feature detects.
235 #include <linux/types.h>
236 #include <linux/errno.h>
237 #include <linux/sched.h>
238 #include <linux/tty.h>
239 #include <linux/timer.h>
240 #include <linux/config.h>
241 #include <linux/kernel.h>
242 #include <linux/wait.h>
243 #include <linux/string.h>
244 #include <linux/malloc.h>
245 #include <linux/ioport.h>
246 #include <linux/delay.h>
247 #include <linux/fs.h>
248 #include <linux/ctype.h>
249 #include <linux/proc_fs.h>
250 #include <linux/devfs_fs_kernel.h>
251 #include <linux/mm.h>
252 #include <linux/module.h>
253 #define MTRR_NEED_STRINGS
254 #include <asm/mtrr.h>
255 #include <linux/init.h>
256 #include <linux/smp.h>
257 #include <linux/smp_lock.h>
259 #include <asm/uaccess.h>
260 #include <asm/io.h>
261 #include <asm/processor.h>
262 #include <asm/system.h>
263 #include <asm/pgtable.h>
264 #include <asm/segment.h>
265 #include <asm/bitops.h>
266 #include <asm/atomic.h>
267 #include <asm/msr.h>
269 #include <asm/hardirq.h>
270 #include <linux/irq.h>
272 #define MTRR_VERSION "1.37 (20001109)"
274 #define TRUE 1
275 #define FALSE 0
278 * The code assumes all processors support the same MTRR
279 * interface. This is generally a good assumption, but could
280 * potentially be a problem.
282 enum mtrr_if_type {
283 MTRR_IF_NONE, /* No MTRRs supported */
284 MTRR_IF_INTEL, /* Intel (P6) standard MTRRs */
285 MTRR_IF_AMD_K6, /* AMD pre-Athlon MTRRs */
286 MTRR_IF_CYRIX_ARR, /* Cyrix ARRs */
287 MTRR_IF_CENTAUR_MCR, /* Centaur MCRs */
288 } mtrr_if = MTRR_IF_NONE;
290 static __initdata char *mtrr_if_name[] = {
291 "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
294 #define MTRRcap_MSR 0x0fe
295 #define MTRRdefType_MSR 0x2ff
297 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
298 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
300 #define NUM_FIXED_RANGES 88
301 #define MTRRfix64K_00000_MSR 0x250
302 #define MTRRfix16K_80000_MSR 0x258
303 #define MTRRfix16K_A0000_MSR 0x259
304 #define MTRRfix4K_C0000_MSR 0x268
305 #define MTRRfix4K_C8000_MSR 0x269
306 #define MTRRfix4K_D0000_MSR 0x26a
307 #define MTRRfix4K_D8000_MSR 0x26b
308 #define MTRRfix4K_E0000_MSR 0x26c
309 #define MTRRfix4K_E8000_MSR 0x26d
310 #define MTRRfix4K_F0000_MSR 0x26e
311 #define MTRRfix4K_F8000_MSR 0x26f
313 #ifdef CONFIG_SMP
314 # define MTRR_CHANGE_MASK_FIXED 0x01
315 # define MTRR_CHANGE_MASK_VARIABLE 0x02
316 # define MTRR_CHANGE_MASK_DEFTYPE 0x04
317 #endif
319 /* In the Intel processor's MTRR interface, the MTRR type is always held in
320 an 8 bit field: */
321 typedef u8 mtrr_type;
323 #define LINE_SIZE 80
324 #define JIFFIE_TIMEOUT 100
326 #ifdef CONFIG_SMP
327 # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
328 #else
329 # define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
330 TRUE)
331 #endif
333 #if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS)
334 # define USERSPACE_INTERFACE
335 #endif
337 #ifndef USERSPACE_INTERFACE
338 # define compute_ascii() while (0)
339 #endif
341 #ifdef USERSPACE_INTERFACE
342 static char *ascii_buffer;
343 static unsigned int ascii_buf_bytes;
344 #endif
345 static unsigned int *usage_table;
346 static DECLARE_MUTEX(main_lock);
348 /* Private functions */
349 #ifdef USERSPACE_INTERFACE
350 static void compute_ascii (void);
351 #endif
354 struct set_mtrr_context
356 unsigned long flags;
357 unsigned long deftype_lo;
358 unsigned long deftype_hi;
359 unsigned long cr4val;
360 unsigned long ccr3;
363 static int arr3_protected;
365 /* Put the processor into a state where MTRRs can be safely set */
366 static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
368 unsigned long tmp;
370 /* Disable interrupts locally */
371 __save_flags (ctxt->flags); __cli ();
373 if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
374 return;
376 /* Save value of CR4 and clear Page Global Enable (bit 7) */
377 if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
378 asm volatile ("movl %%cr4, %0\n\t"
379 "movl %0, %1\n\t"
380 "andb $0x7f, %b1\n\t"
381 "movl %1, %%cr4\n\t"
382 : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory");
384 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
385 a side-effect */
386 asm volatile ("movl %%cr0, %0\n\t"
387 "orl $0x40000000, %0\n\t"
388 "wbinvd\n\t"
389 "movl %0, %%cr0\n\t"
390 "wbinvd\n\t"
391 : "=r" (tmp) : : "memory");
393 if ( mtrr_if == MTRR_IF_INTEL ) {
394 /* Disable MTRRs, and set the default type to uncached */
395 rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
396 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
397 } else {
398 /* Cyrix ARRs - everything else were excluded at the top */
399 tmp = getCx86 (CX86_CCR3);
400 setCx86 (CX86_CCR3, (tmp & 0x0f) | 0x10);
401 ctxt->ccr3 = tmp;
403 } /* End Function set_mtrr_prepare */
405 /* Restore the processor after a set_mtrr_prepare */
406 static void set_mtrr_done (struct set_mtrr_context *ctxt)
408 unsigned long tmp;
410 if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR ) {
411 __restore_flags (ctxt->flags);
412 return;
415 /* Flush caches and TLBs */
416 asm volatile ("wbinvd" : : : "memory" );
418 /* Restore MTRRdefType */
419 if ( mtrr_if == MTRR_IF_INTEL ) {
420 /* Intel (P6) standard MTRRs */
421 wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
422 } else {
423 /* Cyrix ARRs - everything else was excluded at the top */
424 setCx86 (CX86_CCR3, ctxt->ccr3);
427 /* Enable caches */
428 asm volatile ("movl %%cr0, %0\n\t"
429 "andl $0xbfffffff, %0\n\t"
430 "movl %0, %%cr0\n\t"
431 : "=r" (tmp) : : "memory");
433 /* Restore value of CR4 */
434 if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
435 asm volatile ("movl %0, %%cr4"
436 : : "r" (ctxt->cr4val) : "memory");
438 /* Re-enable interrupts locally (if enabled previously) */
439 __restore_flags (ctxt->flags);
440 } /* End Function set_mtrr_done */
442 /* This function returns the number of variable MTRRs */
443 static unsigned int get_num_var_ranges (void)
445 unsigned long config, dummy;
447 switch ( mtrr_if )
449 case MTRR_IF_INTEL:
450 rdmsr (MTRRcap_MSR, config, dummy);
451 return (config & 0xff);
452 case MTRR_IF_AMD_K6:
453 return 2;
454 case MTRR_IF_CYRIX_ARR:
455 return 8;
456 case MTRR_IF_CENTAUR_MCR:
457 return 8;
458 default:
459 return 0;
461 } /* End Function get_num_var_ranges */
463 /* Returns non-zero if we have the write-combining memory type */
464 static int have_wrcomb (void)
466 unsigned long config, dummy;
468 switch ( mtrr_if )
470 case MTRR_IF_INTEL:
471 rdmsr (MTRRcap_MSR, config, dummy);
472 return (config & (1<<10));
473 return 1;
474 case MTRR_IF_AMD_K6:
475 case MTRR_IF_CENTAUR_MCR:
476 case MTRR_IF_CYRIX_ARR:
477 return 1;
478 default:
479 return 0;
481 } /* End Function have_wrcomb */
483 static u32 size_or_mask, size_and_mask;
485 static void intel_get_mtrr (unsigned int reg, unsigned long *base,
486 unsigned long *size, mtrr_type *type)
488 unsigned long mask_lo, mask_hi, base_lo, base_hi;
490 rdmsr (MTRRphysMask_MSR(reg), mask_lo, mask_hi);
491 if ( (mask_lo & 0x800) == 0 )
493 /* Invalid (i.e. free) range */
494 *base = 0;
495 *size = 0;
496 *type = 0;
497 return;
500 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
502 /* Work out the shifted address mask. */
503 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
504 | mask_lo >> PAGE_SHIFT;
506 /* This works correctly if size is a power of two, i.e. a
507 contiguous range. */
508 *size = -mask_lo;
509 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
510 *type = base_lo & 0xff;
511 } /* End Function intel_get_mtrr */
513 static void cyrix_get_arr (unsigned int reg, unsigned long *base,
514 unsigned long *size, mtrr_type *type)
516 unsigned long flags;
517 unsigned char arr, ccr3, rcr, shift;
519 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
521 /* Save flags and disable interrupts */
522 __save_flags (flags); __cli ();
524 ccr3 = getCx86 (CX86_CCR3);
525 setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
526 ((unsigned char *) base)[3] = getCx86 (arr);
527 ((unsigned char *) base)[2] = getCx86 (arr+1);
528 ((unsigned char *) base)[1] = getCx86 (arr+2);
529 rcr = getCx86(CX86_RCR_BASE + reg);
530 setCx86 (CX86_CCR3, ccr3); /* disable MAPEN */
532 /* Enable interrupts if it was enabled previously */
533 __restore_flags (flags);
534 shift = ((unsigned char *) base)[1] & 0x0f;
535 *base >>= PAGE_SHIFT;
537 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
538 * Note: shift==0xf means 4G, this is unsupported.
540 if (shift)
541 *size = (reg < 7 ? 0x1UL : 0x40UL) << shift;
542 else
543 *size = 0;
545 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
546 if (reg < 7)
548 switch (rcr)
550 case 1: *type = MTRR_TYPE_UNCACHABLE; break;
551 case 8: *type = MTRR_TYPE_WRBACK; break;
552 case 9: *type = MTRR_TYPE_WRCOMB; break;
553 case 24:
554 default: *type = MTRR_TYPE_WRTHROUGH; break;
556 } else
558 switch (rcr)
560 case 0: *type = MTRR_TYPE_UNCACHABLE; break;
561 case 8: *type = MTRR_TYPE_WRCOMB; break;
562 case 9: *type = MTRR_TYPE_WRBACK; break;
563 case 25:
564 default: *type = MTRR_TYPE_WRTHROUGH; break;
567 } /* End Function cyrix_get_arr */
569 static void amd_get_mtrr (unsigned int reg, unsigned long *base,
570 unsigned long *size, mtrr_type *type)
572 unsigned long low, high;
574 rdmsr (0xC0000085, low, high);
575 /* Upper dword is region 1, lower is region 0 */
576 if (reg == 1) low = high;
577 /* The base masks off on the right alignment */
578 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
579 *type = 0;
580 if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
581 if (low & 2) *type = MTRR_TYPE_WRCOMB;
582 if ( !(low & 3) )
584 *size = 0;
585 return;
588 * This needs a little explaining. The size is stored as an
589 * inverted mask of bits of 128K granularity 15 bits long offset
590 * 2 bits
592 * So to get a size we do invert the mask and add 1 to the lowest
593 * mask bit (4 as its 2 bits in). This gives us a size we then shift
594 * to turn into 128K blocks
596 * eg 111 1111 1111 1100 is 512K
598 * invert 000 0000 0000 0011
599 * +1 000 0000 0000 0100
600 * *128K ...
602 low = (~low) & 0x1FFFC;
603 *size = (low + 4) << (15 - PAGE_SHIFT);
604 return;
605 } /* End Function amd_get_mtrr */
607 static struct
609 unsigned long high;
610 unsigned long low;
611 } centaur_mcr[8];
613 static void centaur_get_mcr (unsigned int reg, unsigned long *base,
614 unsigned long *size, mtrr_type *type)
616 *base = centaur_mcr[reg].high >> PAGE_SHIFT;
617 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
618 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
619 } /* End Function centaur_get_mcr */
621 static void (*get_mtrr) (unsigned int reg, unsigned long *base,
622 unsigned long *size, mtrr_type *type);
624 static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
625 unsigned long size, mtrr_type type, int do_safe)
626 /* [SUMMARY] Set variable MTRR register on the local CPU.
627 <reg> The register to set.
628 <base> The base address of the region.
629 <size> The size of the region. If this is 0 the region is disabled.
630 <type> The type of the region.
631 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
632 be done externally.
633 [RETURNS] Nothing.
636 struct set_mtrr_context ctxt;
638 if (do_safe) set_mtrr_prepare (&ctxt);
639 if (size == 0)
641 /* The invalid bit is kept in the mask, so we simply clear the
642 relevant mask register to disable a range. */
643 wrmsr (MTRRphysMask_MSR (reg), 0, 0);
645 else
647 wrmsr (MTRRphysBase_MSR (reg), base << PAGE_SHIFT | type,
648 (base & size_and_mask) >> (32 - PAGE_SHIFT));
649 wrmsr (MTRRphysMask_MSR (reg), -size << PAGE_SHIFT | 0x800,
650 (-size & size_and_mask) >> (32 - PAGE_SHIFT));
652 if (do_safe) set_mtrr_done (&ctxt);
653 } /* End Function intel_set_mtrr_up */
655 static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
656 unsigned long size, mtrr_type type, int do_safe)
658 struct set_mtrr_context ctxt;
659 unsigned char arr, arr_type, arr_size;
661 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
663 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
664 if (reg >= 7)
665 size >>= 6;
667 size &= 0x7fff; /* make sure arr_size <= 14 */
668 for(arr_size = 0; size; arr_size++, size >>= 1);
670 if (reg<7)
672 switch (type) {
673 case MTRR_TYPE_UNCACHABLE: arr_type = 1; break;
674 case MTRR_TYPE_WRCOMB: arr_type = 9; break;
675 case MTRR_TYPE_WRTHROUGH: arr_type = 24; break;
676 default: arr_type = 8; break;
679 else
681 switch (type)
683 case MTRR_TYPE_UNCACHABLE: arr_type = 0; break;
684 case MTRR_TYPE_WRCOMB: arr_type = 8; break;
685 case MTRR_TYPE_WRTHROUGH: arr_type = 25; break;
686 default: arr_type = 9; break;
690 if (do_safe) set_mtrr_prepare (&ctxt);
691 base <<= PAGE_SHIFT;
692 setCx86(arr, ((unsigned char *) &base)[3]);
693 setCx86(arr+1, ((unsigned char *) &base)[2]);
694 setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
695 setCx86(CX86_RCR_BASE + reg, arr_type);
696 if (do_safe) set_mtrr_done (&ctxt);
697 } /* End Function cyrix_set_arr_up */
699 static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
700 unsigned long size, mtrr_type type, int do_safe)
701 /* [SUMMARY] Set variable MTRR register on the local CPU.
702 <reg> The register to set.
703 <base> The base address of the region.
704 <size> The size of the region. If this is 0 the region is disabled.
705 <type> The type of the region.
706 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
707 be done externally.
708 [RETURNS] Nothing.
711 u32 regs[2];
712 struct set_mtrr_context ctxt;
714 if (do_safe) set_mtrr_prepare (&ctxt);
716 * Low is MTRR0 , High MTRR 1
718 rdmsr (0xC0000085, regs[0], regs[1]);
720 * Blank to disable
722 if (size == 0)
723 regs[reg] = 0;
724 else
725 /* Set the register to the base, the type (off by one) and an
726 inverted bitmask of the size The size is the only odd
727 bit. We are fed say 512K We invert this and we get 111 1111
728 1111 1011 but if you subtract one and invert you get the
729 desired 111 1111 1111 1100 mask
731 But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
732 regs[reg] = (-size>>(15-PAGE_SHIFT) & 0x0001FFFC)
733 | (base<<PAGE_SHIFT) | (type+1);
736 * The writeback rule is quite specific. See the manual. Its
737 * disable local interrupts, write back the cache, set the mtrr
739 __asm__ __volatile__ ("wbinvd" : : : "memory");
740 wrmsr (0xC0000085, regs[0], regs[1]);
741 if (do_safe) set_mtrr_done (&ctxt);
742 } /* End Function amd_set_mtrr_up */
745 static void centaur_set_mcr_up (unsigned int reg, unsigned long base,
746 unsigned long size, mtrr_type type,
747 int do_safe)
749 struct set_mtrr_context ctxt;
750 unsigned long low, high;
752 if (do_safe) set_mtrr_prepare( &ctxt );
753 if (size == 0)
755 /* Disable */
756 high = low = 0;
758 else
760 high = base << PAGE_SHIFT;
761 low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
763 centaur_mcr[reg].high = high;
764 centaur_mcr[reg].low = low;
765 wrmsr (0x110 + reg, low, high);
766 if (do_safe) set_mtrr_done( &ctxt );
767 } /* End Function centaur_set_mtrr_up */
769 static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
770 unsigned long size, mtrr_type type,
771 int do_safe);
773 #ifdef CONFIG_SMP
775 struct mtrr_var_range
777 unsigned long base_lo;
778 unsigned long base_hi;
779 unsigned long mask_lo;
780 unsigned long mask_hi;
784 /* Get the MSR pair relating to a var range */
785 static void __init get_mtrr_var_range (unsigned int index,
786 struct mtrr_var_range *vr)
788 rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
789 rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
790 } /* End Function get_mtrr_var_range */
793 /* Set the MSR pair relating to a var range. Returns TRUE if
794 changes are made */
795 static int __init set_mtrr_var_range_testing (unsigned int index,
796 struct mtrr_var_range *vr)
798 unsigned int lo, hi;
799 int changed = FALSE;
801 rdmsr(MTRRphysBase_MSR(index), lo, hi);
802 if ( (vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
803 || (vr->base_hi & 0xfUL) != (hi & 0xfUL) )
805 wrmsr (MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
806 changed = TRUE;
809 rdmsr (MTRRphysMask_MSR(index), lo, hi);
811 if ( (vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
812 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL) )
814 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
815 changed = TRUE;
817 return changed;
818 } /* End Function set_mtrr_var_range_testing */
820 static void __init get_fixed_ranges(mtrr_type *frs)
822 unsigned long *p = (unsigned long *)frs;
823 int i;
825 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
827 for (i = 0; i < 2; i++)
828 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
829 for (i = 0; i < 8; i++)
830 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
831 } /* End Function get_fixed_ranges */
833 static int __init set_fixed_ranges_testing(mtrr_type *frs)
835 unsigned long *p = (unsigned long *)frs;
836 int changed = FALSE;
837 int i;
838 unsigned long lo, hi;
840 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
841 if (p[0] != lo || p[1] != hi)
843 wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
844 changed = TRUE;
847 for (i = 0; i < 2; i++)
849 rdmsr (MTRRfix16K_80000_MSR + i, lo, hi);
850 if (p[2 + i*2] != lo || p[3 + i*2] != hi)
852 wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
853 changed = TRUE;
857 for (i = 0; i < 8; i++)
859 rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi);
860 if (p[6 + i*2] != lo || p[7 + i*2] != hi)
862 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
863 changed = TRUE;
866 return changed;
867 } /* End Function set_fixed_ranges_testing */
869 struct mtrr_state
871 unsigned int num_var_ranges;
872 struct mtrr_var_range *var_ranges;
873 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
874 unsigned char enabled;
875 mtrr_type def_type;
879 /* Grab all of the MTRR state for this CPU into *state */
880 static void __init get_mtrr_state(struct mtrr_state *state)
882 unsigned int nvrs, i;
883 struct mtrr_var_range *vrs;
884 unsigned long lo, dummy;
886 nvrs = state->num_var_ranges = get_num_var_ranges();
887 vrs = state->var_ranges
888 = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
889 if (vrs == NULL)
890 nvrs = state->num_var_ranges = 0;
892 for (i = 0; i < nvrs; i++)
893 get_mtrr_var_range (i, &vrs[i]);
894 get_fixed_ranges (state->fixed_ranges);
896 rdmsr (MTRRdefType_MSR, lo, dummy);
897 state->def_type = (lo & 0xff);
898 state->enabled = (lo & 0xc00) >> 10;
899 } /* End Function get_mtrr_state */
902 /* Free resources associated with a struct mtrr_state */
903 static void __init finalize_mtrr_state(struct mtrr_state *state)
905 if (state->var_ranges) kfree (state->var_ranges);
906 } /* End Function finalize_mtrr_state */
909 static unsigned long __init set_mtrr_state (struct mtrr_state *state,
910 struct set_mtrr_context *ctxt)
911 /* [SUMMARY] Set the MTRR state for this CPU.
912 <state> The MTRR state information to read.
913 <ctxt> Some relevant CPU context.
914 [NOTE] The CPU must already be in a safe state for MTRR changes.
915 [RETURNS] 0 if no changes made, else a mask indication what was changed.
918 unsigned int i;
919 unsigned long change_mask = 0;
921 for (i = 0; i < state->num_var_ranges; i++)
922 if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
923 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
925 if ( set_fixed_ranges_testing(state->fixed_ranges) )
926 change_mask |= MTRR_CHANGE_MASK_FIXED;
927 /* Set_mtrr_restore restores the old value of MTRRdefType,
928 so to set it we fiddle with the saved value */
929 if ( (ctxt->deftype_lo & 0xff) != state->def_type
930 || ( (ctxt->deftype_lo & 0xc00) >> 10 ) != state->enabled)
932 ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
933 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
936 return change_mask;
937 } /* End Function set_mtrr_state */
940 static atomic_t undone_count;
941 static volatile int wait_barrier_execute = FALSE;
942 static volatile int wait_barrier_cache_enable = FALSE;
944 struct set_mtrr_data
946 unsigned long smp_base;
947 unsigned long smp_size;
948 unsigned int smp_reg;
949 mtrr_type smp_type;
952 static void ipi_handler (void *info)
953 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
954 [RETURNS] Nothing.
957 struct set_mtrr_data *data = info;
958 struct set_mtrr_context ctxt;
960 set_mtrr_prepare (&ctxt);
961 /* Notify master that I've flushed and disabled my cache */
962 atomic_dec (&undone_count);
963 while (wait_barrier_execute) barrier ();
964 /* The master has cleared me to execute */
965 (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
966 data->smp_type, FALSE);
967 /* Notify master CPU that I've executed the function */
968 atomic_dec (&undone_count);
969 /* Wait for master to clear me to enable cache and return */
970 while (wait_barrier_cache_enable) barrier ();
971 set_mtrr_done (&ctxt);
972 } /* End Function ipi_handler */
974 static void set_mtrr_smp (unsigned int reg, unsigned long base,
975 unsigned long size, mtrr_type type)
977 struct set_mtrr_data data;
978 struct set_mtrr_context ctxt;
980 data.smp_reg = reg;
981 data.smp_base = base;
982 data.smp_size = size;
983 data.smp_type = type;
984 wait_barrier_execute = TRUE;
985 wait_barrier_cache_enable = TRUE;
986 atomic_set (&undone_count, smp_num_cpus - 1);
987 /* Start the ball rolling on other CPUs */
988 if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
989 panic ("mtrr: timed out waiting for other CPUs\n");
990 /* Flush and disable the local CPU's cache */
991 set_mtrr_prepare (&ctxt);
992 /* Wait for all other CPUs to flush and disable their caches */
993 while (atomic_read (&undone_count) > 0) barrier ();
994 /* Set up for completion wait and then release other CPUs to change MTRRs*/
995 atomic_set (&undone_count, smp_num_cpus - 1);
996 wait_barrier_execute = FALSE;
997 (*set_mtrr_up) (reg, base, size, type, FALSE);
998 /* Now wait for other CPUs to complete the function */
999 while (atomic_read (&undone_count) > 0) barrier ();
1000 /* Now all CPUs should have finished the function. Release the barrier to
1001 allow them to re-enable their caches and return from their interrupt,
1002 then enable the local cache and return */
1003 wait_barrier_cache_enable = FALSE;
1004 set_mtrr_done (&ctxt);
1005 } /* End Function set_mtrr_smp */
1008 /* Some BIOS's are fucked and don't set all MTRRs the same! */
1009 static void __init mtrr_state_warn(unsigned long mask)
1011 if (!mask) return;
1012 if (mask & MTRR_CHANGE_MASK_FIXED)
1013 printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
1014 if (mask & MTRR_CHANGE_MASK_VARIABLE)
1015 printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
1016 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
1017 printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
1018 printk ("mtrr: probably your BIOS does not setup all CPUs\n");
1019 } /* End Function mtrr_state_warn */
1021 #endif /* CONFIG_SMP */
1023 static char *attrib_to_str (int x)
1025 return (x <= 6) ? mtrr_strings[x] : "?";
1026 } /* End Function attrib_to_str */
1028 static void init_table (void)
1030 int i, max;
1032 max = get_num_var_ranges ();
1033 if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
1034 == NULL )
1036 printk ("mtrr: could not allocate\n");
1037 return;
1039 for (i = 0; i < max; i++) usage_table[i] = 1;
1040 #ifdef USERSPACE_INTERFACE
1041 if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
1043 printk ("mtrr: could not allocate\n");
1044 return;
1046 ascii_buf_bytes = 0;
1047 compute_ascii ();
1048 #endif
1049 } /* End Function init_table */
1051 static int generic_get_free_region (unsigned long base, unsigned long size)
1052 /* [SUMMARY] Get a free MTRR.
1053 <base> The starting (base) address of the region.
1054 <size> The size (in bytes) of the region.
1055 [RETURNS] The index of the region on success, else -1 on error.
1058 int i, max;
1059 mtrr_type ltype;
1060 unsigned long lbase, lsize;
1062 max = get_num_var_ranges ();
1063 for (i = 0; i < max; ++i)
1065 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1066 if (lsize == 0) return i;
1068 return -ENOSPC;
1069 } /* End Function generic_get_free_region */
1071 static int cyrix_get_free_region (unsigned long base, unsigned long size)
1072 /* [SUMMARY] Get a free ARR.
1073 <base> The starting (base) address of the region.
1074 <size> The size (in bytes) of the region.
1075 [RETURNS] The index of the region on success, else -1 on error.
1078 int i;
1079 mtrr_type ltype;
1080 unsigned long lbase, lsize;
1082 /* If we are to set up a region >32M then look at ARR7 immediately */
1083 if (size > 0x2000)
1085 cyrix_get_arr (7, &lbase, &lsize, &ltype);
1086 if (lsize == 0) return 7;
1087 /* Else try ARR0-ARR6 first */
1089 else
1091 for (i = 0; i < 7; i++)
1093 cyrix_get_arr (i, &lbase, &lsize, &ltype);
1094 if ((i == 3) && arr3_protected) continue;
1095 if (lsize == 0) return i;
1097 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
1098 cyrix_get_arr (i, &lbase, &lsize, &ltype);
1099 if ((lsize == 0) && (size >= 0x40)) return i;
1101 return -ENOSPC;
1102 } /* End Function cyrix_get_free_region */
1104 static int (*get_free_region) (unsigned long base,
1105 unsigned long size) = generic_get_free_region;
1108 * mtrr_add_page - Add a memory type region
1109 * @base: Physical base address of region in pages (4 KB)
1110 * @size: Physical size of region in pages (4 KB)
1111 * @type: Type of MTRR desired
1112 * @increment: If this is true do usage counting on the region
1114 * Memory type region registers control the caching on newer Intel and
1115 * non Intel processors. This function allows drivers to request an
1116 * MTRR is added. The details and hardware specifics of each processor's
1117 * implementation are hidden from the caller, but nevertheless the
1118 * caller should expect to need to provide a power of two size on an
1119 * equivalent power of two boundary.
1121 * If the region cannot be added either because all regions are in use
1122 * or the CPU cannot support it a negative value is returned. On success
1123 * the register number for this entry is returned, but should be treated
1124 * as a cookie only.
1126 * On a multiprocessor machine the changes are made to all processors.
1127 * This is required on x86 by the Intel processors.
1129 * The available types are
1131 * %MTRR_TYPE_UNCACHEABLE - No caching
1133 * %MTRR_TYPE_WRITEBACK - Write data back in bursts whenever
1135 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
1137 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
1139 * BUGS: Needs a quiet flag for the cases where drivers do not mind
1140 * failures and do not wish system log messages to be sent.
1143 int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, char increment)
1145 /* [SUMMARY] Add an MTRR entry.
1146 <base> The starting (base, in pages) address of the region.
1147 <size> The size of the region. (in pages)
1148 <type> The type of the new region.
1149 <increment> If true and the region already exists, the usage count will be
1150 incremented.
1151 [RETURNS] The MTRR register on success, else a negative number indicating
1152 the error code.
1153 [NOTE] This routine uses a spinlock.
1155 int i, max;
1156 mtrr_type ltype;
1157 unsigned long lbase, lsize, last;
1159 switch ( mtrr_if )
1161 case MTRR_IF_NONE:
1162 return -ENXIO; /* No MTRRs whatsoever */
1164 case MTRR_IF_AMD_K6:
1165 /* Apply the K6 block alignment and size rules
1166 In order
1167 o Uncached or gathering only
1168 o 128K or bigger block
1169 o Power of 2 block
1170 o base suitably aligned to the power
1172 if ( type > MTRR_TYPE_WRCOMB || size < (1 << (17-PAGE_SHIFT)) ||
1173 (size & ~(size-1))-size || ( base & (size-1) ) )
1174 return -EINVAL;
1175 break;
1177 case MTRR_IF_INTEL:
1178 /* For Intel PPro stepping <= 7, must be 4 MiB aligned */
1179 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1180 boot_cpu_data.x86 == 6 &&
1181 boot_cpu_data.x86_model == 1 &&
1182 boot_cpu_data.x86_mask <= 7 )
1184 if ( base & ((1 << (22-PAGE_SHIFT))-1) )
1186 printk (KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
1187 return -EINVAL;
1190 /* Fall through */
1192 case MTRR_IF_CYRIX_ARR:
1193 case MTRR_IF_CENTAUR_MCR:
1194 if ( mtrr_if == MTRR_IF_CENTAUR_MCR )
1196 if (type != MTRR_TYPE_WRCOMB)
1198 printk (KERN_WARNING "mtrr: only write-combining is supported\n");
1199 return -EINVAL;
1202 else if (base + size < 0x100)
1204 printk (KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
1205 base, size);
1206 return -EINVAL;
1208 /* Check upper bits of base and last are equal and lower bits are 0
1209 for base and 1 for last */
1210 last = base + size - 1;
1211 for (lbase = base; !(lbase & 1) && (last & 1);
1212 lbase = lbase >> 1, last = last >> 1);
1213 if (lbase != last)
1215 printk (KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
1216 base, size);
1217 return -EINVAL;
1219 break;
1221 default:
1222 return -EINVAL;
1225 if (type >= MTRR_NUM_TYPES)
1227 printk ("mtrr: type: %u illegal\n", type);
1228 return -EINVAL;
1231 /* If the type is WC, check that this processor supports it */
1232 if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
1234 printk (KERN_WARNING "mtrr: your processor doesn't support write-combining\n");
1235 return -ENOSYS;
1238 if ( base & size_or_mask || size & size_or_mask )
1240 printk ("mtrr: base or size exceeds the MTRR width\n");
1241 return -EINVAL;
1244 increment = increment ? 1 : 0;
1245 max = get_num_var_ranges ();
1246 /* Search for existing MTRR */
1247 down(&main_lock);
1248 for (i = 0; i < max; ++i)
1250 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1251 if (base >= lbase + lsize) continue;
1252 if ( (base < lbase) && (base + size <= lbase) ) continue;
1253 /* At this point we know there is some kind of overlap/enclosure */
1254 if ( (base < lbase) || (base + size > lbase + lsize) )
1256 up(&main_lock);
1257 printk (KERN_WARNING "mtrr: 0x%lx000,0x%lx000 overlaps existing"
1258 " 0x%lx000,0x%lx000\n",
1259 base, size, lbase, lsize);
1260 return -EINVAL;
1262 /* New region is enclosed by an existing region */
1263 if (ltype != type)
1265 if (type == MTRR_TYPE_UNCACHABLE) continue;
1266 up(&main_lock);
1267 printk ( "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
1268 base, size, attrib_to_str (ltype), attrib_to_str (type) );
1269 return -EINVAL;
1271 if (increment) ++usage_table[i];
1272 compute_ascii ();
1273 up(&main_lock);
1274 return i;
1276 /* Search for an empty MTRR */
1277 i = (*get_free_region) (base, size);
1278 if (i < 0)
1280 up(&main_lock);
1281 printk ("mtrr: no more MTRRs available\n");
1282 return i;
1284 set_mtrr (i, base, size, type);
1285 usage_table[i] = 1;
1286 compute_ascii ();
1287 up(&main_lock);
1288 return i;
1289 } /* End Function mtrr_add_page */
1292 * mtrr_add - Add a memory type region
1293 * @base: Physical base address of region
1294 * @size: Physical size of region
1295 * @type: Type of MTRR desired
1296 * @increment: If this is true do usage counting on the region
1298 * Memory type region registers control the caching on newer Intel and
1299 * non Intel processors. This function allows drivers to request an
1300 * MTRR is added. The details and hardware specifics of each processor's
1301 * implementation are hidden from the caller, but nevertheless the
1302 * caller should expect to need to provide a power of two size on an
1303 * equivalent power of two boundary.
1305 * If the region cannot be added either because all regions are in use
1306 * or the CPU cannot support it a negative value is returned. On success
1307 * the register number for this entry is returned, but should be treated
1308 * as a cookie only.
1310 * On a multiprocessor machine the changes are made to all processors.
1311 * This is required on x86 by the Intel processors.
1313 * The available types are
1315 * %MTRR_TYPE_UNCACHEABLE - No caching
1317 * %MTRR_TYPE_WRITEBACK - Write data back in bursts whenever
1319 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
1321 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
1323 * BUGS: Needs a quiet flag for the cases where drivers do not mind
1324 * failures and do not wish system log messages to be sent.
1327 int mtrr_add(unsigned long base, unsigned long size, unsigned int type, char increment)
1329 /* [SUMMARY] Add an MTRR entry.
1330 <base> The starting (base) address of the region.
1331 <size> The size (in bytes) of the region.
1332 <type> The type of the new region.
1333 <increment> If true and the region already exists, the usage count will be
1334 incremented.
1335 [RETURNS] The MTRR register on success, else a negative number indicating
1336 the error code.
1339 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1341 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1342 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1343 return -EINVAL;
1345 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment);
1346 } /* End Function mtrr_add */
1349 * mtrr_del_page - delete a memory type region
1350 * @reg: Register returned by mtrr_add
1351 * @base: Physical base address
1352 * @size: Size of region
1354 * If register is supplied then base and size are ignored. This is
1355 * how drivers should call it.
1357 * Releases an MTRR region. If the usage count drops to zero the
1358 * register is freed and the region returns to default state.
1359 * On success the register is returned, on failure a negative error
1360 * code.
1363 int mtrr_del_page (int reg, unsigned long base, unsigned long size)
1364 /* [SUMMARY] Delete MTRR/decrement usage count.
1365 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1366 be supplied.
1367 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1368 <size> The size of the region. This is ignored if <<reg>> is >= 0.
1369 [RETURNS] The register on success, else a negative number indicating
1370 the error code.
1371 [NOTE] This routine uses a spinlock.
1374 int i, max;
1375 mtrr_type ltype;
1376 unsigned long lbase, lsize;
1378 if ( mtrr_if == MTRR_IF_NONE ) return -ENXIO;
1380 max = get_num_var_ranges ();
1381 down (&main_lock);
1382 if (reg < 0)
1384 /* Search for existing MTRR */
1385 for (i = 0; i < max; ++i)
1387 (*get_mtrr) (i, &lbase, &lsize, &ltype);
1388 if (lbase == base && lsize == size)
1390 reg = i;
1391 break;
1394 if (reg < 0)
1396 up(&main_lock);
1397 printk ("mtrr: no MTRR for %lx000,%lx000 found\n", base, size);
1398 return -EINVAL;
1401 if (reg >= max)
1403 up (&main_lock);
1404 printk ("mtrr: register: %d too big\n", reg);
1405 return -EINVAL;
1407 if ( mtrr_if == MTRR_IF_CYRIX_ARR )
1409 if ( (reg == 3) && arr3_protected )
1411 up (&main_lock);
1412 printk ("mtrr: ARR3 cannot be changed\n");
1413 return -EINVAL;
1416 (*get_mtrr) (reg, &lbase, &lsize, &ltype);
1417 if (lsize < 1)
1419 up (&main_lock);
1420 printk ("mtrr: MTRR %d not used\n", reg);
1421 return -EINVAL;
1423 if (usage_table[reg] < 1)
1425 up (&main_lock);
1426 printk ("mtrr: reg: %d has count=0\n", reg);
1427 return -EINVAL;
1429 if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
1430 compute_ascii ();
1431 up (&main_lock);
1432 return reg;
1433 } /* End Function mtrr_del_page */
1436 * mtrr_del - delete a memory type region
1437 * @reg: Register returned by mtrr_add
1438 * @base: Physical base address
1439 * @size: Size of region
1441 * If register is supplied then base and size are ignored. This is
1442 * how drivers should call it.
1444 * Releases an MTRR region. If the usage count drops to zero the
1445 * register is freed and the region returns to default state.
1446 * On success the register is returned, on failure a negative error
1447 * code.
1450 int mtrr_del (int reg, unsigned long base, unsigned long size)
1451 /* [SUMMARY] Delete MTRR/decrement usage count.
1452 <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1453 be supplied.
1454 <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1455 <size> The size of the region. This is ignored if <<reg>> is >= 0.
1456 [RETURNS] The register on success, else a negative number indicating
1457 the error code.
1460 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1462 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1463 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1464 return -EINVAL;
1466 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
1469 #ifdef USERSPACE_INTERFACE
1471 static int mtrr_file_add (unsigned long base, unsigned long size,
1472 unsigned int type, char increment, struct file *file, int page)
1474 int reg, max;
1475 unsigned int *fcount = file->private_data;
1477 max = get_num_var_ranges ();
1478 if (fcount == NULL)
1480 if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
1482 printk ("mtrr: could not allocate\n");
1483 return -ENOMEM;
1485 memset (fcount, 0, max * sizeof *fcount);
1486 file->private_data = fcount;
1488 if (!page) {
1489 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1491 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1492 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1493 return -EINVAL;
1495 base >>= PAGE_SHIFT;
1496 size >>= PAGE_SHIFT;
1498 reg = mtrr_add_page (base, size, type, 1);
1499 if (reg >= 0) ++fcount[reg];
1500 return reg;
1501 } /* End Function mtrr_file_add */
1503 static int mtrr_file_del (unsigned long base, unsigned long size,
1504 struct file *file, int page)
1506 int reg;
1507 unsigned int *fcount = file->private_data;
1509 if (!page) {
1510 if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1512 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1513 printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1514 return -EINVAL;
1516 base >>= PAGE_SHIFT;
1517 size >>= PAGE_SHIFT;
1519 reg = mtrr_del_page (-1, base, size);
1520 if (reg < 0) return reg;
1521 if (fcount == NULL) return reg;
1522 if (fcount[reg] < 1) return -EINVAL;
1523 --fcount[reg];
1524 return reg;
1525 } /* End Function mtrr_file_del */
1527 static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
1528 loff_t *ppos)
1530 if (*ppos >= ascii_buf_bytes) return 0;
1531 if (*ppos + len > ascii_buf_bytes) len = ascii_buf_bytes - *ppos;
1532 if ( copy_to_user (buf, ascii_buffer + *ppos, len) ) return -EFAULT;
1533 *ppos += len;
1534 return len;
1535 } /* End Function mtrr_read */
1537 static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
1538 loff_t *ppos)
1539 /* Format of control line:
1540 "base=%Lx size=%Lx type=%s" OR:
1541 "disable=%d"
1544 int i, err;
1545 unsigned long reg;
1546 unsigned long long base, size;
1547 char *ptr;
1548 char line[LINE_SIZE];
1550 if ( !suser () ) return -EPERM;
1551 /* Can't seek (pwrite) on this device */
1552 if (ppos != &file->f_pos) return -ESPIPE;
1553 memset (line, 0, LINE_SIZE);
1554 if (len > LINE_SIZE) len = LINE_SIZE;
1555 if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
1556 ptr = line + strlen (line) - 1;
1557 if (*ptr == '\n') *ptr = '\0';
1558 if ( !strncmp (line, "disable=", 8) )
1560 reg = simple_strtoul (line + 8, &ptr, 0);
1561 err = mtrr_del_page (reg, 0, 0);
1562 if (err < 0) return err;
1563 return len;
1565 if ( strncmp (line, "base=", 5) )
1567 printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
1568 return -EINVAL;
1570 base = simple_strtoull (line + 5, &ptr, 0);
1571 for (; isspace (*ptr); ++ptr);
1572 if ( strncmp (ptr, "size=", 5) )
1574 printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
1575 return -EINVAL;
1577 size = simple_strtoull (ptr + 5, &ptr, 0);
1578 if ( (base & 0xfff) || (size & 0xfff) )
1580 printk ("mtrr: size and base must be multiples of 4 kiB\n");
1581 printk ("mtrr: size: 0x%Lx base: 0x%Lx\n", size, base);
1582 return -EINVAL;
1584 for (; isspace (*ptr); ++ptr);
1585 if ( strncmp (ptr, "type=", 5) )
1587 printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
1588 return -EINVAL;
1590 ptr += 5;
1591 for (; isspace (*ptr); ++ptr);
1592 for (i = 0; i < MTRR_NUM_TYPES; ++i)
1594 if ( strcmp (ptr, mtrr_strings[i]) ) continue;
1595 base >>= PAGE_SHIFT;
1596 size >>= PAGE_SHIFT;
1597 err = mtrr_add_page ((unsigned long)base, (unsigned long)size, i, 1);
1598 if (err < 0) return err;
1599 return len;
1601 printk ("mtrr: illegal type: \"%s\"\n", ptr);
1602 return -EINVAL;
1603 } /* End Function mtrr_write */
1605 static int mtrr_ioctl (struct inode *inode, struct file *file,
1606 unsigned int cmd, unsigned long arg)
1608 int err;
1609 mtrr_type type;
1610 struct mtrr_sentry sentry;
1611 struct mtrr_gentry gentry;
1613 switch (cmd)
1615 default:
1616 return -ENOIOCTLCMD;
1617 case MTRRIOC_ADD_ENTRY:
1618 if ( !suser () ) return -EPERM;
1619 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1620 return -EFAULT;
1621 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 0);
1622 if (err < 0) return err;
1623 break;
1624 case MTRRIOC_SET_ENTRY:
1625 if ( !suser () ) return -EPERM;
1626 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1627 return -EFAULT;
1628 err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1629 if (err < 0) return err;
1630 break;
1631 case MTRRIOC_DEL_ENTRY:
1632 if ( !suser () ) return -EPERM;
1633 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1634 return -EFAULT;
1635 err = mtrr_file_del (sentry.base, sentry.size, file, 0);
1636 if (err < 0) return err;
1637 break;
1638 case MTRRIOC_KILL_ENTRY:
1639 if ( !suser () ) return -EPERM;
1640 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1641 return -EFAULT;
1642 err = mtrr_del (-1, sentry.base, sentry.size);
1643 if (err < 0) return err;
1644 break;
1645 case MTRRIOC_GET_ENTRY:
1646 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1647 return -EFAULT;
1648 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1649 (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1651 /* Hide entries that go above 4GB */
1652 if (gentry.base + gentry.size > 0x100000 || gentry.size == 0x100000)
1653 gentry.base = gentry.size = gentry.type = 0;
1654 else {
1655 gentry.base <<= PAGE_SHIFT;
1656 gentry.size <<= PAGE_SHIFT;
1657 gentry.type = type;
1660 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1661 return -EFAULT;
1662 break;
1663 case MTRRIOC_ADD_PAGE_ENTRY:
1664 if ( !suser () ) return -EPERM;
1665 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1666 return -EFAULT;
1667 err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 1);
1668 if (err < 0) return err;
1669 break;
1670 case MTRRIOC_SET_PAGE_ENTRY:
1671 if ( !suser () ) return -EPERM;
1672 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1673 return -EFAULT;
1674 err = mtrr_add_page (sentry.base, sentry.size, sentry.type, 0);
1675 if (err < 0) return err;
1676 break;
1677 case MTRRIOC_DEL_PAGE_ENTRY:
1678 if ( !suser () ) return -EPERM;
1679 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1680 return -EFAULT;
1681 err = mtrr_file_del (sentry.base, sentry.size, file, 1);
1682 if (err < 0) return err;
1683 break;
1684 case MTRRIOC_KILL_PAGE_ENTRY:
1685 if ( !suser () ) return -EPERM;
1686 if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1687 return -EFAULT;
1688 err = mtrr_del_page (-1, sentry.base, sentry.size);
1689 if (err < 0) return err;
1690 break;
1691 case MTRRIOC_GET_PAGE_ENTRY:
1692 if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1693 return -EFAULT;
1694 if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1695 (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1696 gentry.type = type;
1698 if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1699 return -EFAULT;
1700 break;
1702 return 0;
1703 } /* End Function mtrr_ioctl */
1705 static int mtrr_close (struct inode *ino, struct file *file)
1707 int i, max;
1708 unsigned int *fcount = file->private_data;
1710 if (fcount == NULL) return 0;
1711 lock_kernel();
1712 max = get_num_var_ranges ();
1713 for (i = 0; i < max; ++i)
1715 while (fcount[i] > 0)
1717 if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1718 --fcount[i];
1721 unlock_kernel();
1722 kfree (fcount);
1723 file->private_data = NULL;
1724 return 0;
1725 } /* End Function mtrr_close */
1727 static struct file_operations mtrr_fops =
1729 owner: THIS_MODULE,
1730 read: mtrr_read,
1731 write: mtrr_write,
1732 ioctl: mtrr_ioctl,
1733 release: mtrr_close,
1736 # ifdef CONFIG_PROC_FS
1738 static struct proc_dir_entry *proc_root_mtrr;
1740 # endif /* CONFIG_PROC_FS */
1742 static devfs_handle_t devfs_handle;
1744 static void compute_ascii (void)
1746 char factor;
1747 int i, max;
1748 mtrr_type type;
1749 unsigned long base, size;
1751 ascii_buf_bytes = 0;
1752 max = get_num_var_ranges ();
1753 for (i = 0; i < max; i++)
1755 (*get_mtrr) (i, &base, &size, &type);
1756 if (size == 0) usage_table[i] = 0;
1757 else
1759 if (size < (0x100000 >> PAGE_SHIFT))
1761 /* less than 1MB */
1762 factor = 'K';
1763 size <<= PAGE_SHIFT - 10;
1765 else
1767 factor = 'M';
1768 size >>= 20 - PAGE_SHIFT;
1770 sprintf
1771 (ascii_buffer + ascii_buf_bytes,
1772 "reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n",
1773 i, base, base >> (20 - PAGE_SHIFT), size, factor,
1774 attrib_to_str (type), usage_table[i]);
1775 ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1778 devfs_set_file_size (devfs_handle, ascii_buf_bytes);
1779 # ifdef CONFIG_PROC_FS
1780 proc_root_mtrr->size = ascii_buf_bytes;
1781 # endif /* CONFIG_PROC_FS */
1782 } /* End Function compute_ascii */
1784 #endif /* USERSPACE_INTERFACE */
1786 EXPORT_SYMBOL(mtrr_add);
1787 EXPORT_SYMBOL(mtrr_del);
1789 #ifdef CONFIG_SMP
1791 typedef struct
1793 unsigned long base;
1794 unsigned long size;
1795 mtrr_type type;
1796 } arr_state_t;
1798 arr_state_t arr_state[8] __initdata =
1800 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
1801 {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
1804 unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
1806 static void __init cyrix_arr_init_secondary(void)
1808 struct set_mtrr_context ctxt;
1809 int i;
1811 set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
1813 /* the CCRs are not contiguous */
1814 for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
1815 for( ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
1816 for(i=0; i<8; i++)
1817 cyrix_set_arr_up(i,
1818 arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
1820 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1821 } /* End Function cyrix_arr_init_secondary */
1823 #endif
1826 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
1827 * with the SMM (System Management Mode) mode. So we need the following:
1828 * Check whether SMI_LOCK (CCR3 bit 0) is set
1829 * if it is set, write a warning message: ARR3 cannot be changed!
1830 * (it cannot be changed until the next processor reset)
1831 * if it is reset, then we can change it, set all the needed bits:
1832 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
1833 * - disable access to SMM memory (CCR1 bit 2 reset)
1834 * - disable SMM mode (CCR1 bit 1 reset)
1835 * - disable write protection of ARR3 (CCR6 bit 1 reset)
1836 * - (maybe) disable ARR3
1837 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
1839 static void __init cyrix_arr_init(void)
1841 struct set_mtrr_context ctxt;
1842 unsigned char ccr[7];
1843 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
1844 #ifdef CONFIG_SMP
1845 int i;
1846 #endif
1848 set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
1850 /* Save all CCRs locally */
1851 ccr[0] = getCx86 (CX86_CCR0);
1852 ccr[1] = getCx86 (CX86_CCR1);
1853 ccr[2] = getCx86 (CX86_CCR2);
1854 ccr[3] = ctxt.ccr3;
1855 ccr[4] = getCx86 (CX86_CCR4);
1856 ccr[5] = getCx86 (CX86_CCR5);
1857 ccr[6] = getCx86 (CX86_CCR6);
1859 if (ccr[3] & 1)
1861 ccrc[3] = 1;
1862 arr3_protected = 1;
1864 else
1866 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
1867 * access to SMM memory through ARR3 (bit 7).
1869 if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
1870 if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
1871 if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
1872 arr3_protected = 0;
1873 if (ccr[6] & 0x02) {
1874 ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3 */
1875 setCx86 (CX86_CCR6, ccr[6]);
1877 /* Disable ARR3. This is safe now that we disabled SMM. */
1878 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
1880 /* If we changed CCR1 in memory, change it in the processor, too. */
1881 if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
1883 /* Enable ARR usage by the processor */
1884 if (!(ccr[5] & 0x20))
1886 ccr[5] |= 0x20; ccrc[5] = 1;
1887 setCx86 (CX86_CCR5, ccr[5]);
1890 #ifdef CONFIG_SMP
1891 for(i=0; i<7; i++) ccr_state[i] = ccr[i];
1892 for(i=0; i<8; i++)
1893 cyrix_get_arr(i,
1894 &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
1895 #endif
1897 set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1899 if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
1900 if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
1902 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
1903 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
1904 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
1906 if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
1907 } /* End Function cyrix_arr_init */
1909 static void __init centaur_mcr_init(void)
1911 unsigned i;
1912 struct set_mtrr_context ctxt;
1914 set_mtrr_prepare (&ctxt);
1915 /* Unfortunately, MCR's are read-only, so there is no way to
1916 * find out what the bios might have done.
1918 /* Clear all MCR's.
1919 * This way we are sure that the centaur_mcr array contains the actual
1920 * values. The disadvantage is that any BIOS tweaks are thus undone.
1922 for (i = 0; i < 8; ++i)
1924 centaur_mcr[i].high = 0;
1925 centaur_mcr[i].low = 0;
1926 wrmsr (0x110 + i , 0, 0);
1928 /* Throw the main write-combining switch... */
1929 wrmsr (0x120, 0x01f0001f, 0);
1930 set_mtrr_done (&ctxt);
1931 } /* End Function centaur_mcr_init */
1933 static int __init mtrr_setup(void)
1935 if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {
1936 /* Intel (P6) standard MTRRs */
1937 mtrr_if = MTRR_IF_INTEL;
1938 get_mtrr = intel_get_mtrr;
1939 set_mtrr_up = intel_set_mtrr_up;
1940 switch (boot_cpu_data.x86_vendor) {
1941 case X86_VENDOR_AMD:
1942 /* The original Athlon docs said that
1943 total addressable memory is 44 bits wide.
1944 It was not really clear whether its MTRRs
1945 follow this or not. (Read: 44 or 36 bits).
1946 However, "x86-64_overview.pdf" explicitly
1947 states that "previous implementations support
1948 36 bit MTRRs" and also provides a way to
1949 query the width (in bits) of the physical
1950 addressable memory on the Hammer family.
1952 if (boot_cpu_data.x86 == 7 && (cpuid_eax(0x80000000) >= 0x80000008)) {
1953 u32 phys_addr;
1954 phys_addr = cpuid_eax(0x80000008) & 0xff ;
1955 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
1956 size_and_mask = ~size_or_mask & 0xfff00000;
1957 break;
1959 default:
1960 /* Intel, etc. */
1961 size_or_mask = 0xff000000; /* 36 bits */
1962 size_and_mask = 0x00f00000;
1963 break;
1965 } else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {
1966 /* Pre-Athlon (K6) AMD CPU MTRRs */
1967 mtrr_if = MTRR_IF_AMD_K6;
1968 get_mtrr = amd_get_mtrr;
1969 set_mtrr_up = amd_set_mtrr_up;
1970 size_or_mask = 0xfff00000; /* 32 bits */
1971 size_and_mask = 0;
1972 } else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {
1973 /* Cyrix ARRs */
1974 mtrr_if = MTRR_IF_CYRIX_ARR;
1975 get_mtrr = cyrix_get_arr;
1976 set_mtrr_up = cyrix_set_arr_up;
1977 get_free_region = cyrix_get_free_region;
1978 cyrix_arr_init();
1979 size_or_mask = 0xfff00000; /* 32 bits */
1980 size_and_mask = 0;
1981 } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {
1982 /* Centaur MCRs */
1983 mtrr_if = MTRR_IF_CENTAUR_MCR;
1984 get_mtrr = centaur_get_mcr;
1985 set_mtrr_up = centaur_set_mcr_up;
1986 centaur_mcr_init();
1987 size_or_mask = 0xfff00000; /* 32 bits */
1988 size_and_mask = 0;
1989 } else {
1990 /* No supported MTRR interface */
1991 mtrr_if = MTRR_IF_NONE;
1994 printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
1995 "mtrr: detected mtrr type: %s\n",
1996 MTRR_VERSION, mtrr_if_name[mtrr_if]);
1998 return (mtrr_if != MTRR_IF_NONE);
1999 } /* End Function mtrr_setup */
2001 #ifdef CONFIG_SMP
2003 static volatile unsigned long smp_changes_mask __initdata = 0;
2004 static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
2006 void __init mtrr_init_boot_cpu(void)
2008 if ( !mtrr_setup () )
2009 return;
2011 if ( mtrr_if == MTRR_IF_INTEL ) {
2012 /* Only for Intel MTRRs */
2013 get_mtrr_state (&smp_mtrr_state);
2015 } /* End Function mtrr_init_boot_cpu */
2017 static void __init intel_mtrr_init_secondary_cpu(void)
2019 unsigned long mask, count;
2020 struct set_mtrr_context ctxt;
2022 /* Note that this is not ideal, since the cache is only flushed/disabled
2023 for this CPU while the MTRRs are changed, but changing this requires
2024 more invasive changes to the way the kernel boots */
2025 set_mtrr_prepare (&ctxt);
2026 mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
2027 set_mtrr_done (&ctxt);
2028 /* Use the atomic bitops to update the global mask */
2029 for (count = 0; count < sizeof mask * 8; ++count)
2031 if (mask & 0x01) set_bit (count, &smp_changes_mask);
2032 mask >>= 1;
2034 } /* End Function intel_mtrr_init_secondary_cpu */
2036 void __init mtrr_init_secondary_cpu(void)
2038 switch ( mtrr_if ) {
2039 case MTRR_IF_INTEL:
2040 /* Intel (P6) standard MTRRs */
2041 intel_mtrr_init_secondary_cpu();
2042 break;
2043 case MTRR_IF_CYRIX_ARR:
2044 /* This is _completely theoretical_!
2045 * I assume here that one day Cyrix will support Intel APIC.
2046 * In reality on non-Intel CPUs we won't even get to this routine.
2047 * Hopefully no one will plug two Cyrix processors in a dual P5 board.
2048 * :-)
2050 cyrix_arr_init_secondary ();
2051 break;
2052 default:
2053 /* I see no MTRRs I can support in SMP mode... */
2054 printk ("mtrr: SMP support incomplete for this vendor\n");
2056 } /* End Function mtrr_init_secondary_cpu */
2057 #endif /* CONFIG_SMP */
2059 int __init mtrr_init(void)
2061 #ifdef CONFIG_SMP
2062 /* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */
2064 if ( mtrr_if == MTRR_IF_INTEL ) {
2065 finalize_mtrr_state (&smp_mtrr_state);
2066 mtrr_state_warn (smp_changes_mask);
2068 #else
2069 if ( !mtrr_setup() )
2070 return 0; /* MTRRs not supported? */
2071 #endif
2073 #ifdef CONFIG_PROC_FS
2074 proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
2075 proc_root_mtrr->owner = THIS_MODULE;
2076 proc_root_mtrr->proc_fops = &mtrr_fops;
2077 #endif
2078 #ifdef CONFIG_DEVFS_FS
2079 devfs_handle = devfs_register (NULL, "cpu/mtrr", DEVFS_FL_DEFAULT, 0, 0,
2080 S_IFREG | S_IRUGO | S_IWUSR,
2081 &mtrr_fops, NULL);
2082 #endif
2083 init_table ();
2084 return 0;
2085 } /* End Function mtrr_init */
2088 * Local Variables:
2089 * mode:c
2090 * c-file-style:"k&r"
2091 * c-basic-offset:4
2092 * End: