1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
39 #include <linux/mutex.h>
43 #include <asm/uaccess.h>
44 #include <asm/processor.h>
48 u32 num_var_ranges
= 0;
50 unsigned int *usage_table
;
51 static DEFINE_MUTEX(mtrr_mutex
);
53 u32 size_or_mask
, size_and_mask
;
55 static struct mtrr_ops
* mtrr_ops
[X86_VENDOR_NUM
] = {};
57 struct mtrr_ops
* mtrr_if
= NULL
;
59 static void set_mtrr(unsigned int reg
, unsigned long base
,
60 unsigned long size
, mtrr_type type
);
62 extern int arr3_protected
;
64 void set_mtrr_ops(struct mtrr_ops
* ops
)
66 if (ops
->vendor
&& ops
->vendor
< X86_VENDOR_NUM
)
67 mtrr_ops
[ops
->vendor
] = ops
;
70 /* Returns non-zero if we have the write-combining memory type */
71 static int have_wrcomb(void)
76 if ((dev
= pci_get_class(PCI_CLASS_BRIDGE_HOST
<< 8, NULL
)) != NULL
) {
77 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
78 Don't allow it and leave room for other chipsets to be tagged */
79 if (dev
->vendor
== PCI_VENDOR_ID_SERVERWORKS
&&
80 dev
->device
== PCI_DEVICE_ID_SERVERWORKS_LE
) {
81 pci_read_config_byte(dev
, PCI_CLASS_REVISION
, &rev
);
83 printk(KERN_INFO
"mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
88 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
89 write combining memory may resulting in data corruption */
90 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
&&
91 dev
->device
== PCI_DEVICE_ID_INTEL_82451NX
) {
92 printk(KERN_INFO
"mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
98 return (mtrr_if
->have_wrcomb
? mtrr_if
->have_wrcomb() : 0);
101 /* This function returns the number of variable MTRRs */
102 static void __init
set_num_var_ranges(void)
104 unsigned long config
= 0, dummy
;
107 rdmsr(MTRRcap_MSR
, config
, dummy
);
108 } else if (is_cpu(AMD
))
110 else if (is_cpu(CYRIX
) || is_cpu(CENTAUR
))
112 num_var_ranges
= config
& 0xff;
115 static void __init
init_table(void)
119 max
= num_var_ranges
;
120 if ((usage_table
= kmalloc(max
* sizeof *usage_table
, GFP_KERNEL
))
122 printk(KERN_ERR
"mtrr: could not allocate\n");
125 for (i
= 0; i
< max
; i
++)
129 struct set_mtrr_data
{
132 unsigned long smp_base
;
133 unsigned long smp_size
;
134 unsigned int smp_reg
;
140 static void ipi_handler(void *info
)
141 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
145 struct set_mtrr_data
*data
= info
;
148 local_irq_save(flags
);
150 atomic_dec(&data
->count
);
151 while(!atomic_read(&data
->gate
))
154 /* The master has cleared me to execute */
155 if (data
->smp_reg
!= ~0U)
156 mtrr_if
->set(data
->smp_reg
, data
->smp_base
,
157 data
->smp_size
, data
->smp_type
);
161 atomic_dec(&data
->count
);
162 while(atomic_read(&data
->gate
))
165 atomic_dec(&data
->count
);
166 local_irq_restore(flags
);
172 * set_mtrr - update mtrrs on all processors
173 * @reg: mtrr in question
178 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
180 * 1. Send IPI to do the following:
181 * 2. Disable Interrupts
182 * 3. Wait for all procs to do so
183 * 4. Enter no-fill cache mode
187 * 8. Disable all range registers
188 * 9. Update the MTRRs
189 * 10. Enable all range registers
190 * 11. Flush all TLBs and caches again
191 * 12. Enter normal cache mode and reenable caching
193 * 14. Wait for buddies to catch up
194 * 15. Enable interrupts.
196 * What does that mean for us? Well, first we set data.count to the number
197 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
198 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
199 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
200 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
201 * differently, so we call mtrr_if->set() callback and let them take care of it.
202 * When they're done, they again decrement data->count and wait for data.gate to
204 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
205 * Everyone then enables interrupts and we all continue on.
207 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
210 static void set_mtrr(unsigned int reg
, unsigned long base
,
211 unsigned long size
, mtrr_type type
)
213 struct set_mtrr_data data
;
217 data
.smp_base
= base
;
218 data
.smp_size
= size
;
219 data
.smp_type
= type
;
220 atomic_set(&data
.count
, num_booting_cpus() - 1);
221 atomic_set(&data
.gate
,0);
223 /* Start the ball rolling on other CPUs */
224 if (smp_call_function(ipi_handler
, &data
, 1, 0) != 0)
225 panic("mtrr: timed out waiting for other CPUs\n");
227 local_irq_save(flags
);
229 while(atomic_read(&data
.count
))
232 /* ok, reset count and toggle gate */
233 atomic_set(&data
.count
, num_booting_cpus() - 1);
234 atomic_set(&data
.gate
,1);
236 /* do our MTRR business */
239 * We use this same function to initialize the mtrrs on boot.
240 * The state of the boot cpu's mtrrs has been saved, and we want
241 * to replicate across all the APs.
242 * If we're doing that @reg is set to something special...
245 mtrr_if
->set(reg
,base
,size
,type
);
247 /* wait for the others */
248 while(atomic_read(&data
.count
))
251 atomic_set(&data
.count
, num_booting_cpus() - 1);
252 atomic_set(&data
.gate
,0);
255 * Wait here for everyone to have seen the gate change
256 * So we're the last ones to touch 'data'
258 while(atomic_read(&data
.count
))
261 local_irq_restore(flags
);
265 * mtrr_add_page - Add a memory type region
266 * @base: Physical base address of region in pages (in units of 4 kB!)
267 * @size: Physical size of region in pages (4 kB)
268 * @type: Type of MTRR desired
269 * @increment: If this is true do usage counting on the region
271 * Memory type region registers control the caching on newer Intel and
272 * non Intel processors. This function allows drivers to request an
273 * MTRR is added. The details and hardware specifics of each processor's
274 * implementation are hidden from the caller, but nevertheless the
275 * caller should expect to need to provide a power of two size on an
276 * equivalent power of two boundary.
278 * If the region cannot be added either because all regions are in use
279 * or the CPU cannot support it a negative value is returned. On success
280 * the register number for this entry is returned, but should be treated
283 * On a multiprocessor machine the changes are made to all processors.
284 * This is required on x86 by the Intel processors.
286 * The available types are
288 * %MTRR_TYPE_UNCACHABLE - No caching
290 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
292 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
294 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
296 * BUGS: Needs a quiet flag for the cases where drivers do not mind
297 * failures and do not wish system log messages to be sent.
300 int mtrr_add_page(unsigned long base
, unsigned long size
,
301 unsigned int type
, char increment
)
312 if ((error
= mtrr_if
->validate_add_page(base
,size
,type
)))
315 if (type
>= MTRR_NUM_TYPES
) {
316 printk(KERN_WARNING
"mtrr: type: %u invalid\n", type
);
320 /* If the type is WC, check that this processor supports it */
321 if ((type
== MTRR_TYPE_WRCOMB
) && !have_wrcomb()) {
323 "mtrr: your processor doesn't support write-combining\n");
327 if (base
& size_or_mask
|| size
& size_or_mask
) {
328 printk(KERN_WARNING
"mtrr: base or size exceeds the MTRR width\n");
334 /* No CPU hotplug when we change MTRR entries */
336 /* Search for existing MTRR */
337 mutex_lock(&mtrr_mutex
);
338 for (i
= 0; i
< num_var_ranges
; ++i
) {
339 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
340 if (base
>= lbase
+ lsize
)
342 if ((base
< lbase
) && (base
+ size
<= lbase
))
344 /* At this point we know there is some kind of overlap/enclosure */
345 if ((base
< lbase
) || (base
+ size
> lbase
+ lsize
)) {
347 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
348 " 0x%lx000,0x%x000\n", base
, size
, lbase
,
352 /* New region is enclosed by an existing region */
354 if (type
== MTRR_TYPE_UNCACHABLE
)
356 printk (KERN_WARNING
"mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
357 base
, size
, mtrr_attrib_to_str(ltype
),
358 mtrr_attrib_to_str(type
));
366 /* Search for an empty MTRR */
367 i
= mtrr_if
->get_free_region(base
, size
);
369 set_mtrr(i
, base
, size
, type
);
372 printk(KERN_INFO
"mtrr: no more MTRRs available\n");
375 mutex_unlock(&mtrr_mutex
);
376 unlock_cpu_hotplug();
380 static int mtrr_check(unsigned long base
, unsigned long size
)
382 if ((base
& (PAGE_SIZE
- 1)) || (size
& (PAGE_SIZE
- 1))) {
384 "mtrr: size and base must be multiples of 4 kiB\n");
386 "mtrr: size: 0x%lx base: 0x%lx\n", size
, base
);
394 * mtrr_add - Add a memory type region
395 * @base: Physical base address of region
396 * @size: Physical size of region
397 * @type: Type of MTRR desired
398 * @increment: If this is true do usage counting on the region
400 * Memory type region registers control the caching on newer Intel and
401 * non Intel processors. This function allows drivers to request an
402 * MTRR is added. The details and hardware specifics of each processor's
403 * implementation are hidden from the caller, but nevertheless the
404 * caller should expect to need to provide a power of two size on an
405 * equivalent power of two boundary.
407 * If the region cannot be added either because all regions are in use
408 * or the CPU cannot support it a negative value is returned. On success
409 * the register number for this entry is returned, but should be treated
412 * On a multiprocessor machine the changes are made to all processors.
413 * This is required on x86 by the Intel processors.
415 * The available types are
417 * %MTRR_TYPE_UNCACHABLE - No caching
419 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
421 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
423 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
425 * BUGS: Needs a quiet flag for the cases where drivers do not mind
426 * failures and do not wish system log messages to be sent.
430 mtrr_add(unsigned long base
, unsigned long size
, unsigned int type
,
433 if (mtrr_check(base
, size
))
435 return mtrr_add_page(base
>> PAGE_SHIFT
, size
>> PAGE_SHIFT
, type
,
440 * mtrr_del_page - delete a memory type region
441 * @reg: Register returned by mtrr_add
442 * @base: Physical base address
443 * @size: Size of region
445 * If register is supplied then base and size are ignored. This is
446 * how drivers should call it.
448 * Releases an MTRR region. If the usage count drops to zero the
449 * register is freed and the region returns to default state.
450 * On success the register is returned, on failure a negative error
454 int mtrr_del_page(int reg
, unsigned long base
, unsigned long size
)
465 max
= num_var_ranges
;
466 /* No CPU hotplug when we change MTRR entries */
468 mutex_lock(&mtrr_mutex
);
470 /* Search for existing MTRR */
471 for (i
= 0; i
< max
; ++i
) {
472 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
473 if (lbase
== base
&& lsize
== size
) {
479 printk(KERN_DEBUG
"mtrr: no MTRR for %lx000,%lx000 found\n", base
,
485 printk(KERN_WARNING
"mtrr: register: %d too big\n", reg
);
488 if (is_cpu(CYRIX
) && !use_intel()) {
489 if ((reg
== 3) && arr3_protected
) {
490 printk(KERN_WARNING
"mtrr: ARR3 cannot be changed\n");
494 mtrr_if
->get(reg
, &lbase
, &lsize
, <ype
);
496 printk(KERN_WARNING
"mtrr: MTRR %d not used\n", reg
);
499 if (usage_table
[reg
] < 1) {
500 printk(KERN_WARNING
"mtrr: reg: %d has count=0\n", reg
);
503 if (--usage_table
[reg
] < 1)
504 set_mtrr(reg
, 0, 0, 0);
507 mutex_unlock(&mtrr_mutex
);
508 unlock_cpu_hotplug();
512 * mtrr_del - delete a memory type region
513 * @reg: Register returned by mtrr_add
514 * @base: Physical base address
515 * @size: Size of region
517 * If register is supplied then base and size are ignored. This is
518 * how drivers should call it.
520 * Releases an MTRR region. If the usage count drops to zero the
521 * register is freed and the region returns to default state.
522 * On success the register is returned, on failure a negative error
527 mtrr_del(int reg
, unsigned long base
, unsigned long size
)
529 if (mtrr_check(base
, size
))
531 return mtrr_del_page(reg
, base
>> PAGE_SHIFT
, size
>> PAGE_SHIFT
);
534 EXPORT_SYMBOL(mtrr_add
);
535 EXPORT_SYMBOL(mtrr_del
);
538 * These should be called implicitly, but we can't yet until all the initcall
541 extern void amd_init_mtrr(void);
542 extern void cyrix_init_mtrr(void);
543 extern void centaur_init_mtrr(void);
545 static void __init
init_ifs(void)
552 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
553 * MTRR driver doesn't require this
561 static struct mtrr_value
* mtrr_state
;
563 static int mtrr_save(struct sys_device
* sysdev
, pm_message_t state
)
566 int size
= num_var_ranges
* sizeof(struct mtrr_value
);
568 mtrr_state
= kmalloc(size
,GFP_ATOMIC
);
570 memset(mtrr_state
,0,size
);
574 for (i
= 0; i
< num_var_ranges
; i
++) {
576 &mtrr_state
[i
].lbase
,
577 &mtrr_state
[i
].lsize
,
578 &mtrr_state
[i
].ltype
);
583 static int mtrr_restore(struct sys_device
* sysdev
)
587 for (i
= 0; i
< num_var_ranges
; i
++) {
588 if (mtrr_state
[i
].lsize
)
592 mtrr_state
[i
].ltype
);
600 static struct sysdev_driver mtrr_sysdev_driver
= {
601 .suspend
= mtrr_save
,
602 .resume
= mtrr_restore
,
607 * mtrr_bp_init - initialize mtrrs on the boot CPU
609 * This needs to be called early; before any of the other CPUs are
610 * initialized (i.e. before smp_init()).
613 void __init
mtrr_bp_init(void)
618 mtrr_if
= &generic_mtrr_ops
;
619 size_or_mask
= 0xff000000; /* 36 bits */
620 size_and_mask
= 0x00f00000;
622 /* This is an AMD specific MSR, but we assume(hope?) that
623 Intel will implement it to when they extend the address
625 if (cpuid_eax(0x80000000) >= 0x80000008) {
627 phys_addr
= cpuid_eax(0x80000008) & 0xff;
628 /* CPUID workaround for Intel 0F33/0F34 CPU */
629 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
630 boot_cpu_data
.x86
== 0xF &&
631 boot_cpu_data
.x86_model
== 0x3 &&
632 (boot_cpu_data
.x86_mask
== 0x3 ||
633 boot_cpu_data
.x86_mask
== 0x4))
636 size_or_mask
= ~((1 << (phys_addr
- PAGE_SHIFT
)) - 1);
637 size_and_mask
= ~size_or_mask
& 0xfff00000;
638 } else if (boot_cpu_data
.x86_vendor
== X86_VENDOR_CENTAUR
&&
639 boot_cpu_data
.x86
== 6) {
640 /* VIA C* family have Intel style MTRRs, but
642 size_or_mask
= 0xfff00000; /* 32 bits */
646 switch (boot_cpu_data
.x86_vendor
) {
648 if (cpu_has_k6_mtrr
) {
649 /* Pre-Athlon (K6) AMD CPU MTRRs */
650 mtrr_if
= mtrr_ops
[X86_VENDOR_AMD
];
651 size_or_mask
= 0xfff00000; /* 32 bits */
655 case X86_VENDOR_CENTAUR
:
656 if (cpu_has_centaur_mcr
) {
657 mtrr_if
= mtrr_ops
[X86_VENDOR_CENTAUR
];
658 size_or_mask
= 0xfff00000; /* 32 bits */
662 case X86_VENDOR_CYRIX
:
663 if (cpu_has_cyrix_arr
) {
664 mtrr_if
= mtrr_ops
[X86_VENDOR_CYRIX
];
665 size_or_mask
= 0xfff00000; /* 32 bits */
675 set_num_var_ranges();
682 void mtrr_ap_init(void)
686 if (!mtrr_if
|| !use_intel())
689 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
690 * but this routine will be called in cpu boot time, holding the lock
691 * breaks it. This routine is called in two cases: 1.very earily time
692 * of software resume, when there absolutely isn't mtrr entry changes;
693 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
694 * prevent mtrr entry changes
696 local_irq_save(flags
);
700 local_irq_restore(flags
);
703 static int __init
mtrr_init_finialize(void)
710 /* The CPUs haven't MTRR and seemes not support SMP. They have
711 * specific drivers, we use a tricky method to support
712 * suspend/resume for them.
713 * TBD: is there any system with such CPU which supports
714 * suspend/resume? if no, we should remove the code.
716 sysdev_driver_register(&cpu_sysdev_class
,
717 &mtrr_sysdev_driver
);
721 subsys_initcall(mtrr_init_finialize
);