1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
42 #include <asm/uaccess.h>
43 #include <asm/processor.h>
47 u32 num_var_ranges
= 0;
49 unsigned int *usage_table
;
50 static DECLARE_MUTEX(mtrr_sem
);
52 u32 size_or_mask
, size_and_mask
;
54 static struct mtrr_ops
* mtrr_ops
[X86_VENDOR_NUM
] = {};
56 struct mtrr_ops
* mtrr_if
= NULL
;
58 static void set_mtrr(unsigned int reg
, unsigned long base
,
59 unsigned long size
, mtrr_type type
);
61 extern int arr3_protected
;
63 void set_mtrr_ops(struct mtrr_ops
* ops
)
65 if (ops
->vendor
&& ops
->vendor
< X86_VENDOR_NUM
)
66 mtrr_ops
[ops
->vendor
] = ops
;
69 /* Returns non-zero if we have the write-combining memory type */
70 static int have_wrcomb(void)
75 if ((dev
= pci_get_class(PCI_CLASS_BRIDGE_HOST
<< 8, NULL
)) != NULL
) {
76 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
77 Don't allow it and leave room for other chipsets to be tagged */
78 if (dev
->vendor
== PCI_VENDOR_ID_SERVERWORKS
&&
79 dev
->device
== PCI_DEVICE_ID_SERVERWORKS_LE
) {
80 pci_read_config_byte(dev
, PCI_CLASS_REVISION
, &rev
);
82 printk(KERN_INFO
"mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
87 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
88 write combining memory may resulting in data corruption */
89 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
&&
90 dev
->device
== PCI_DEVICE_ID_INTEL_82451NX
) {
91 printk(KERN_INFO
"mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
97 return (mtrr_if
->have_wrcomb
? mtrr_if
->have_wrcomb() : 0);
100 /* This function returns the number of variable MTRRs */
101 static void __init
set_num_var_ranges(void)
103 unsigned long config
= 0, dummy
;
106 rdmsr(MTRRcap_MSR
, config
, dummy
);
107 } else if (is_cpu(AMD
))
109 else if (is_cpu(CYRIX
) || is_cpu(CENTAUR
))
111 num_var_ranges
= config
& 0xff;
114 static void __init
init_table(void)
118 max
= num_var_ranges
;
119 if ((usage_table
= kmalloc(max
* sizeof *usage_table
, GFP_KERNEL
))
121 printk(KERN_ERR
"mtrr: could not allocate\n");
124 for (i
= 0; i
< max
; i
++)
128 struct set_mtrr_data
{
131 unsigned long smp_base
;
132 unsigned long smp_size
;
133 unsigned int smp_reg
;
139 static void ipi_handler(void *info
)
140 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
144 struct set_mtrr_data
*data
= info
;
147 local_irq_save(flags
);
149 atomic_dec(&data
->count
);
150 while(!atomic_read(&data
->gate
))
153 /* The master has cleared me to execute */
154 if (data
->smp_reg
!= ~0U)
155 mtrr_if
->set(data
->smp_reg
, data
->smp_base
,
156 data
->smp_size
, data
->smp_type
);
160 atomic_dec(&data
->count
);
161 while(atomic_read(&data
->gate
))
164 atomic_dec(&data
->count
);
165 local_irq_restore(flags
);
171 * set_mtrr - update mtrrs on all processors
172 * @reg: mtrr in question
177 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
179 * 1. Send IPI to do the following:
180 * 2. Disable Interrupts
181 * 3. Wait for all procs to do so
182 * 4. Enter no-fill cache mode
186 * 8. Disable all range registers
187 * 9. Update the MTRRs
188 * 10. Enable all range registers
189 * 11. Flush all TLBs and caches again
190 * 12. Enter normal cache mode and reenable caching
192 * 14. Wait for buddies to catch up
193 * 15. Enable interrupts.
195 * What does that mean for us? Well, first we set data.count to the number
196 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
197 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
198 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
199 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
200 * differently, so we call mtrr_if->set() callback and let them take care of it.
201 * When they're done, they again decrement data->count and wait for data.gate to
203 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
204 * Everyone then enables interrupts and we all continue on.
206 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
209 static void set_mtrr(unsigned int reg
, unsigned long base
,
210 unsigned long size
, mtrr_type type
)
212 struct set_mtrr_data data
;
216 data
.smp_base
= base
;
217 data
.smp_size
= size
;
218 data
.smp_type
= type
;
219 atomic_set(&data
.count
, num_booting_cpus() - 1);
220 atomic_set(&data
.gate
,0);
222 /* Start the ball rolling on other CPUs */
223 if (smp_call_function(ipi_handler
, &data
, 1, 0) != 0)
224 panic("mtrr: timed out waiting for other CPUs\n");
226 local_irq_save(flags
);
228 while(atomic_read(&data
.count
))
231 /* ok, reset count and toggle gate */
232 atomic_set(&data
.count
, num_booting_cpus() - 1);
233 atomic_set(&data
.gate
,1);
235 /* do our MTRR business */
238 * We use this same function to initialize the mtrrs on boot.
239 * The state of the boot cpu's mtrrs has been saved, and we want
240 * to replicate across all the APs.
241 * If we're doing that @reg is set to something special...
244 mtrr_if
->set(reg
,base
,size
,type
);
246 /* wait for the others */
247 while(atomic_read(&data
.count
))
250 atomic_set(&data
.count
, num_booting_cpus() - 1);
251 atomic_set(&data
.gate
,0);
254 * Wait here for everyone to have seen the gate change
255 * So we're the last ones to touch 'data'
257 while(atomic_read(&data
.count
))
260 local_irq_restore(flags
);
264 * mtrr_add_page - Add a memory type region
265 * @base: Physical base address of region in pages (4 KB)
266 * @size: Physical size of region in pages (4 KB)
267 * @type: Type of MTRR desired
268 * @increment: If this is true do usage counting on the region
270 * Memory type region registers control the caching on newer Intel and
271 * non Intel processors. This function allows drivers to request an
272 * MTRR is added. The details and hardware specifics of each processor's
273 * implementation are hidden from the caller, but nevertheless the
274 * caller should expect to need to provide a power of two size on an
275 * equivalent power of two boundary.
277 * If the region cannot be added either because all regions are in use
278 * or the CPU cannot support it a negative value is returned. On success
279 * the register number for this entry is returned, but should be treated
282 * On a multiprocessor machine the changes are made to all processors.
283 * This is required on x86 by the Intel processors.
285 * The available types are
287 * %MTRR_TYPE_UNCACHABLE - No caching
289 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
291 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
293 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
295 * BUGS: Needs a quiet flag for the cases where drivers do not mind
296 * failures and do not wish system log messages to be sent.
299 int mtrr_add_page(unsigned long base
, unsigned long size
,
300 unsigned int type
, char increment
)
311 if ((error
= mtrr_if
->validate_add_page(base
,size
,type
)))
314 if (type
>= MTRR_NUM_TYPES
) {
315 printk(KERN_WARNING
"mtrr: type: %u invalid\n", type
);
319 /* If the type is WC, check that this processor supports it */
320 if ((type
== MTRR_TYPE_WRCOMB
) && !have_wrcomb()) {
322 "mtrr: your processor doesn't support write-combining\n");
326 if (base
& size_or_mask
|| size
& size_or_mask
) {
327 printk(KERN_WARNING
"mtrr: base or size exceeds the MTRR width\n");
333 /* No CPU hotplug when we change MTRR entries */
335 /* Search for existing MTRR */
337 for (i
= 0; i
< num_var_ranges
; ++i
) {
338 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
339 if (base
>= lbase
+ lsize
)
341 if ((base
< lbase
) && (base
+ size
<= lbase
))
343 /* At this point we know there is some kind of overlap/enclosure */
344 if ((base
< lbase
) || (base
+ size
> lbase
+ lsize
)) {
346 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
347 " 0x%lx000,0x%x000\n", base
, size
, lbase
,
351 /* New region is enclosed by an existing region */
353 if (type
== MTRR_TYPE_UNCACHABLE
)
355 printk (KERN_WARNING
"mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
356 base
, size
, mtrr_attrib_to_str(ltype
),
357 mtrr_attrib_to_str(type
));
365 /* Search for an empty MTRR */
366 i
= mtrr_if
->get_free_region(base
, size
);
368 set_mtrr(i
, base
, size
, type
);
371 printk(KERN_INFO
"mtrr: no more MTRRs available\n");
375 unlock_cpu_hotplug();
379 static int mtrr_check(unsigned long base
, unsigned long size
)
381 if ((base
& (PAGE_SIZE
- 1)) || (size
& (PAGE_SIZE
- 1))) {
383 "mtrr: size and base must be multiples of 4 kiB\n");
385 "mtrr: size: 0x%lx base: 0x%lx\n", size
, base
);
393 * mtrr_add - Add a memory type region
394 * @base: Physical base address of region
395 * @size: Physical size of region
396 * @type: Type of MTRR desired
397 * @increment: If this is true do usage counting on the region
399 * Memory type region registers control the caching on newer Intel and
400 * non Intel processors. This function allows drivers to request an
401 * MTRR is added. The details and hardware specifics of each processor's
402 * implementation are hidden from the caller, but nevertheless the
403 * caller should expect to need to provide a power of two size on an
404 * equivalent power of two boundary.
406 * If the region cannot be added either because all regions are in use
407 * or the CPU cannot support it a negative value is returned. On success
408 * the register number for this entry is returned, but should be treated
411 * On a multiprocessor machine the changes are made to all processors.
412 * This is required on x86 by the Intel processors.
414 * The available types are
416 * %MTRR_TYPE_UNCACHABLE - No caching
418 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
420 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
422 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
424 * BUGS: Needs a quiet flag for the cases where drivers do not mind
425 * failures and do not wish system log messages to be sent.
429 mtrr_add(unsigned long base
, unsigned long size
, unsigned int type
,
432 if (mtrr_check(base
, size
))
434 return mtrr_add_page(base
>> PAGE_SHIFT
, size
>> PAGE_SHIFT
, type
,
439 * mtrr_del_page - delete a memory type region
440 * @reg: Register returned by mtrr_add
441 * @base: Physical base address
442 * @size: Size of region
444 * If register is supplied then base and size are ignored. This is
445 * how drivers should call it.
447 * Releases an MTRR region. If the usage count drops to zero the
448 * register is freed and the region returns to default state.
449 * On success the register is returned, on failure a negative error
453 int mtrr_del_page(int reg
, unsigned long base
, unsigned long size
)
464 max
= num_var_ranges
;
465 /* No CPU hotplug when we change MTRR entries */
469 /* Search for existing MTRR */
470 for (i
= 0; i
< max
; ++i
) {
471 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
472 if (lbase
== base
&& lsize
== size
) {
478 printk(KERN_DEBUG
"mtrr: no MTRR for %lx000,%lx000 found\n", base
,
484 printk(KERN_WARNING
"mtrr: register: %d too big\n", reg
);
487 if (is_cpu(CYRIX
) && !use_intel()) {
488 if ((reg
== 3) && arr3_protected
) {
489 printk(KERN_WARNING
"mtrr: ARR3 cannot be changed\n");
493 mtrr_if
->get(reg
, &lbase
, &lsize
, <ype
);
495 printk(KERN_WARNING
"mtrr: MTRR %d not used\n", reg
);
498 if (usage_table
[reg
] < 1) {
499 printk(KERN_WARNING
"mtrr: reg: %d has count=0\n", reg
);
502 if (--usage_table
[reg
] < 1)
503 set_mtrr(reg
, 0, 0, 0);
507 unlock_cpu_hotplug();
511 * mtrr_del - delete a memory type region
512 * @reg: Register returned by mtrr_add
513 * @base: Physical base address
514 * @size: Size of region
516 * If register is supplied then base and size are ignored. This is
517 * how drivers should call it.
519 * Releases an MTRR region. If the usage count drops to zero the
520 * register is freed and the region returns to default state.
521 * On success the register is returned, on failure a negative error
526 mtrr_del(int reg
, unsigned long base
, unsigned long size
)
528 if (mtrr_check(base
, size
))
530 return mtrr_del_page(reg
, base
>> PAGE_SHIFT
, size
>> PAGE_SHIFT
);
533 EXPORT_SYMBOL(mtrr_add
);
534 EXPORT_SYMBOL(mtrr_del
);
537 * These should be called implicitly, but we can't yet until all the initcall
540 extern void amd_init_mtrr(void);
541 extern void cyrix_init_mtrr(void);
542 extern void centaur_init_mtrr(void);
544 static void __init
init_ifs(void)
551 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
552 * MTRR driver doesn't require this
560 static struct mtrr_value
* mtrr_state
;
562 static int mtrr_save(struct sys_device
* sysdev
, pm_message_t state
)
565 int size
= num_var_ranges
* sizeof(struct mtrr_value
);
567 mtrr_state
= kmalloc(size
,GFP_ATOMIC
);
569 memset(mtrr_state
,0,size
);
573 for (i
= 0; i
< num_var_ranges
; i
++) {
575 &mtrr_state
[i
].lbase
,
576 &mtrr_state
[i
].lsize
,
577 &mtrr_state
[i
].ltype
);
582 static int mtrr_restore(struct sys_device
* sysdev
)
586 for (i
= 0; i
< num_var_ranges
; i
++) {
587 if (mtrr_state
[i
].lsize
)
591 mtrr_state
[i
].ltype
);
599 static struct sysdev_driver mtrr_sysdev_driver
= {
600 .suspend
= mtrr_save
,
601 .resume
= mtrr_restore
,
606 * mtrr_bp_init - initialize mtrrs on the boot CPU
608 * This needs to be called early; before any of the other CPUs are
609 * initialized (i.e. before smp_init()).
612 void __init
mtrr_bp_init(void)
617 mtrr_if
= &generic_mtrr_ops
;
618 size_or_mask
= 0xff000000; /* 36 bits */
619 size_and_mask
= 0x00f00000;
621 /* This is an AMD specific MSR, but we assume(hope?) that
622 Intel will implement it to when they extend the address
624 if (cpuid_eax(0x80000000) >= 0x80000008) {
626 phys_addr
= cpuid_eax(0x80000008) & 0xff;
627 /* CPUID workaround for Intel 0F33/0F34 CPU */
628 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
629 boot_cpu_data
.x86
== 0xF &&
630 boot_cpu_data
.x86_model
== 0x3 &&
631 (boot_cpu_data
.x86_mask
== 0x3 ||
632 boot_cpu_data
.x86_mask
== 0x4))
635 size_or_mask
= ~((1 << (phys_addr
- PAGE_SHIFT
)) - 1);
636 size_and_mask
= ~size_or_mask
& 0xfff00000;
637 } else if (boot_cpu_data
.x86_vendor
== X86_VENDOR_CENTAUR
&&
638 boot_cpu_data
.x86
== 6) {
639 /* VIA C* family have Intel style MTRRs, but
641 size_or_mask
= 0xfff00000; /* 32 bits */
645 switch (boot_cpu_data
.x86_vendor
) {
647 if (cpu_has_k6_mtrr
) {
648 /* Pre-Athlon (K6) AMD CPU MTRRs */
649 mtrr_if
= mtrr_ops
[X86_VENDOR_AMD
];
650 size_or_mask
= 0xfff00000; /* 32 bits */
654 case X86_VENDOR_CENTAUR
:
655 if (cpu_has_centaur_mcr
) {
656 mtrr_if
= mtrr_ops
[X86_VENDOR_CENTAUR
];
657 size_or_mask
= 0xfff00000; /* 32 bits */
661 case X86_VENDOR_CYRIX
:
662 if (cpu_has_cyrix_arr
) {
663 mtrr_if
= mtrr_ops
[X86_VENDOR_CYRIX
];
664 size_or_mask
= 0xfff00000; /* 32 bits */
674 set_num_var_ranges();
681 void mtrr_ap_init(void)
685 if (!mtrr_if
|| !use_intel())
688 * Ideally we should hold mtrr_sem here to avoid mtrr entries changed,
689 * but this routine will be called in cpu boot time, holding the lock
690 * breaks it. This routine is called in two cases: 1.very earily time
691 * of software resume, when there absolutely isn't mtrr entry changes;
692 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
693 * prevent mtrr entry changes
695 local_irq_save(flags
);
699 local_irq_restore(flags
);
702 static int __init
mtrr_init_finialize(void)
709 /* The CPUs haven't MTRR and seemes not support SMP. They have
710 * specific drivers, we use a tricky method to support
711 * suspend/resume for them.
712 * TBD: is there any system with such CPU which supports
713 * suspend/resume? if no, we should remove the code.
715 sysdev_driver_register(&cpu_sysdev_class
,
716 &mtrr_sysdev_driver
);
720 subsys_initcall(mtrr_init_finialize
);