x86: fix get_mtrr() warning about smp_processor_id() with CONFIG_PREEMPT=y
[linux-2.6/mini2440.git] / arch / x86 / kernel / cpu / mtrr / generic.c
blob641ee3507d063d4d69c94d7617c46e7bdb389bf1
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
17 struct fixed_range_block {
18 int base_msr; /* start address of an MTRR block */
19 int ranges; /* number of MTRRs in this block */
22 static struct fixed_range_block fixed_range_blocks[] = {
23 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
24 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
25 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
31 u64 mtrr_tom2;
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
36 /**
37 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
38 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
39 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
40 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
41 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
42 * 0 for operation."
44 static inline void k8_check_syscfg_dram_mod_en(void)
46 u32 lo, hi;
48 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
49 (boot_cpu_data.x86 >= 0x0f)))
50 return;
52 rdmsr(MSR_K8_SYSCFG, lo, hi);
53 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
54 printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
55 " not cleared by BIOS, clearing this bit\n",
56 smp_processor_id());
57 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
58 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
63 * Returns the effective MTRR type for the region
64 * Error returns:
65 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
66 * - 0xFF - when MTRR is not enabled
68 u8 mtrr_type_lookup(u64 start, u64 end)
70 int i;
71 u64 base, mask;
72 u8 prev_match, curr_match;
74 if (!mtrr_state_set)
75 return 0xFF;
77 if (!mtrr_state.enabled)
78 return 0xFF;
80 /* Make end inclusive end, instead of exclusive */
81 end--;
83 /* Look in fixed ranges. Just return the type as per start */
84 if (mtrr_state.have_fixed && (start < 0x100000)) {
85 int idx;
87 if (start < 0x80000) {
88 idx = 0;
89 idx += (start >> 16);
90 return mtrr_state.fixed_ranges[idx];
91 } else if (start < 0xC0000) {
92 idx = 1 * 8;
93 idx += ((start - 0x80000) >> 14);
94 return mtrr_state.fixed_ranges[idx];
95 } else if (start < 0x1000000) {
96 idx = 3 * 8;
97 idx += ((start - 0xC0000) >> 12);
98 return mtrr_state.fixed_ranges[idx];
103 * Look in variable ranges
104 * Look of multiple ranges matching this address and pick type
105 * as per MTRR precedence
107 if (!(mtrr_state.enabled & 2)) {
108 return mtrr_state.def_type;
111 prev_match = 0xFF;
112 for (i = 0; i < num_var_ranges; ++i) {
113 unsigned short start_state, end_state;
115 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
116 continue;
118 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
119 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
120 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
121 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
123 start_state = ((start & mask) == (base & mask));
124 end_state = ((end & mask) == (base & mask));
125 if (start_state != end_state)
126 return 0xFE;
128 if ((start & mask) != (base & mask)) {
129 continue;
132 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
133 if (prev_match == 0xFF) {
134 prev_match = curr_match;
135 continue;
138 if (prev_match == MTRR_TYPE_UNCACHABLE ||
139 curr_match == MTRR_TYPE_UNCACHABLE) {
140 return MTRR_TYPE_UNCACHABLE;
143 if ((prev_match == MTRR_TYPE_WRBACK &&
144 curr_match == MTRR_TYPE_WRTHROUGH) ||
145 (prev_match == MTRR_TYPE_WRTHROUGH &&
146 curr_match == MTRR_TYPE_WRBACK)) {
147 prev_match = MTRR_TYPE_WRTHROUGH;
148 curr_match = MTRR_TYPE_WRTHROUGH;
151 if (prev_match != curr_match) {
152 return MTRR_TYPE_UNCACHABLE;
156 if (mtrr_tom2) {
157 if (start >= (1ULL<<32) && (end < mtrr_tom2))
158 return MTRR_TYPE_WRBACK;
161 if (prev_match != 0xFF)
162 return prev_match;
164 return mtrr_state.def_type;
167 /* Get the MSR pair relating to a var range */
168 static void
169 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
171 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
172 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
175 /* fill the MSR pair relating to a var range */
176 void fill_mtrr_var_range(unsigned int index,
177 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
179 struct mtrr_var_range *vr;
181 vr = mtrr_state.var_ranges;
183 vr[index].base_lo = base_lo;
184 vr[index].base_hi = base_hi;
185 vr[index].mask_lo = mask_lo;
186 vr[index].mask_hi = mask_hi;
189 static void
190 get_fixed_ranges(mtrr_type * frs)
192 unsigned int *p = (unsigned int *) frs;
193 int i;
195 k8_check_syscfg_dram_mod_en();
197 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
199 for (i = 0; i < 2; i++)
200 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
201 for (i = 0; i < 8; i++)
202 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
205 void mtrr_save_fixed_ranges(void *info)
207 if (cpu_has_mtrr)
208 get_fixed_ranges(mtrr_state.fixed_ranges);
211 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
213 unsigned i;
215 for (i = 0; i < 8; ++i, ++types, base += step)
216 printk(KERN_INFO " %05X-%05X %s\n",
217 base, base + step - 1, mtrr_attrib_to_str(*types));
220 static void prepare_set(void);
221 static void post_set(void);
223 static void __init print_mtrr_state(void)
225 unsigned int i;
226 int high_width;
228 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
229 if (mtrr_state.have_fixed) {
230 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
231 mtrr_state.enabled & 1 ? "en" : "dis");
232 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
233 for (i = 0; i < 2; ++i)
234 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
235 for (i = 0; i < 8; ++i)
236 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
238 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
239 mtrr_state.enabled & 2 ? "en" : "dis");
240 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
241 for (i = 0; i < num_var_ranges; ++i) {
242 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
243 printk(KERN_INFO " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
245 high_width,
246 mtrr_state.var_ranges[i].base_hi,
247 mtrr_state.var_ranges[i].base_lo >> 12,
248 high_width,
249 mtrr_state.var_ranges[i].mask_hi,
250 mtrr_state.var_ranges[i].mask_lo >> 12,
251 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
252 else
253 printk(KERN_INFO " %u disabled\n", i);
255 if (mtrr_tom2) {
256 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
257 mtrr_tom2, mtrr_tom2>>20);
261 /* Grab all of the MTRR state for this CPU into *state */
262 void __init get_mtrr_state(void)
264 unsigned int i;
265 struct mtrr_var_range *vrs;
266 unsigned lo, dummy;
267 unsigned long flags;
269 vrs = mtrr_state.var_ranges;
271 rdmsr(MTRRcap_MSR, lo, dummy);
272 mtrr_state.have_fixed = (lo >> 8) & 1;
274 for (i = 0; i < num_var_ranges; i++)
275 get_mtrr_var_range(i, &vrs[i]);
276 if (mtrr_state.have_fixed)
277 get_fixed_ranges(mtrr_state.fixed_ranges);
279 rdmsr(MTRRdefType_MSR, lo, dummy);
280 mtrr_state.def_type = (lo & 0xff);
281 mtrr_state.enabled = (lo & 0xc00) >> 10;
283 if (amd_special_default_mtrr()) {
284 unsigned low, high;
285 /* TOP_MEM2 */
286 rdmsr(MSR_K8_TOP_MEM2, low, high);
287 mtrr_tom2 = high;
288 mtrr_tom2 <<= 32;
289 mtrr_tom2 |= low;
290 mtrr_tom2 &= 0xffffff800000ULL;
293 print_mtrr_state();
295 mtrr_state_set = 1;
297 /* PAT setup for BP. We need to go through sync steps here */
298 local_irq_save(flags);
299 prepare_set();
301 pat_init();
303 post_set();
304 local_irq_restore(flags);
308 /* Some BIOS's are fucked and don't set all MTRRs the same! */
309 void __init mtrr_state_warn(void)
311 unsigned long mask = smp_changes_mask;
313 if (!mask)
314 return;
315 if (mask & MTRR_CHANGE_MASK_FIXED)
316 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
317 if (mask & MTRR_CHANGE_MASK_VARIABLE)
318 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
319 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
320 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
321 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
322 printk(KERN_INFO "mtrr: corrected configuration.\n");
325 /* Doesn't attempt to pass an error out to MTRR users
326 because it's quite complicated in some cases and probably not
327 worth it because the best error handling is to ignore it. */
328 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
330 if (wrmsr_safe(msr, a, b) < 0)
331 printk(KERN_ERR
332 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
333 smp_processor_id(), msr, a, b);
337 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
338 * @msr: MSR address of the MTTR which should be checked and updated
339 * @changed: pointer which indicates whether the MTRR needed to be changed
340 * @msrwords: pointer to the MSR values which the MSR should have
342 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
344 unsigned lo, hi;
346 rdmsr(msr, lo, hi);
348 if (lo != msrwords[0] || hi != msrwords[1]) {
349 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
350 *changed = true;
355 * generic_get_free_region - Get a free MTRR.
356 * @base: The starting (base) address of the region.
357 * @size: The size (in bytes) of the region.
358 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
360 * Returns: The index of the region on success, else negative on error.
362 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
364 int i, max;
365 mtrr_type ltype;
366 unsigned long lbase, lsize;
368 max = num_var_ranges;
369 if (replace_reg >= 0 && replace_reg < max)
370 return replace_reg;
371 for (i = 0; i < max; ++i) {
372 mtrr_if->get(i, &lbase, &lsize, &ltype);
373 if (lsize == 0)
374 return i;
376 return -ENOSPC;
379 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
380 unsigned long *size, mtrr_type *type)
382 unsigned int mask_lo, mask_hi, base_lo, base_hi;
383 unsigned int tmp, hi;
384 int cpu;
387 * get_mtrr doesn't need to update mtrr_state, also it could be called
388 * from any cpu, so try to print it out directly.
390 cpu = get_cpu();
392 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
394 if ((mask_lo & 0x800) == 0) {
395 /* Invalid (i.e. free) range */
396 *base = 0;
397 *size = 0;
398 *type = 0;
399 goto out_put_cpu;
402 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
404 /* Work out the shifted address mask: */
405 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
406 mask_lo = size_or_mask | tmp;
408 /* Expand tmp with high bits to all 1s: */
409 hi = fls(tmp);
410 if (hi > 0) {
411 tmp |= ~((1<<(hi - 1)) - 1);
413 if (tmp != mask_lo) {
414 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
415 mask_lo = tmp;
420 * This works correctly if size is a power of two, i.e. a
421 * contiguous range:
423 *size = -mask_lo;
424 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
425 *type = base_lo & 0xff;
427 printk(KERN_DEBUG " get_mtrr: cpu%d reg%02d base=%010lx size=%010lx %s\n",
428 cpu, reg, *base, *size,
429 mtrr_attrib_to_str(*type & 0xff));
430 out_put_cpu:
431 put_cpu();
435 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
436 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
438 static int set_fixed_ranges(mtrr_type * frs)
440 unsigned long long *saved = (unsigned long long *) frs;
441 bool changed = false;
442 int block=-1, range;
444 k8_check_syscfg_dram_mod_en();
446 while (fixed_range_blocks[++block].ranges)
447 for (range=0; range < fixed_range_blocks[block].ranges; range++)
448 set_fixed_range(fixed_range_blocks[block].base_msr + range,
449 &changed, (unsigned int *) saved++);
451 return changed;
454 /* Set the MSR pair relating to a var range. Returns TRUE if
455 changes are made */
456 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
458 unsigned int lo, hi;
459 bool changed = false;
461 rdmsr(MTRRphysBase_MSR(index), lo, hi);
462 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
463 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
464 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
465 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
466 changed = true;
469 rdmsr(MTRRphysMask_MSR(index), lo, hi);
471 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
472 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
473 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
474 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
475 changed = true;
477 return changed;
480 static u32 deftype_lo, deftype_hi;
483 * set_mtrr_state - Set the MTRR state for this CPU.
485 * NOTE: The CPU must already be in a safe state for MTRR changes.
486 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
488 static unsigned long set_mtrr_state(void)
490 unsigned int i;
491 unsigned long change_mask = 0;
493 for (i = 0; i < num_var_ranges; i++)
494 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
495 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
497 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
498 change_mask |= MTRR_CHANGE_MASK_FIXED;
500 /* Set_mtrr_restore restores the old value of MTRRdefType,
501 so to set it we fiddle with the saved value */
502 if ((deftype_lo & 0xff) != mtrr_state.def_type
503 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
504 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
505 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
508 return change_mask;
512 static unsigned long cr4 = 0;
513 static DEFINE_SPINLOCK(set_atomicity_lock);
516 * Since we are disabling the cache don't allow any interrupts - they
517 * would run extremely slow and would only increase the pain. The caller must
518 * ensure that local interrupts are disabled and are reenabled after post_set()
519 * has been called.
522 static void prepare_set(void) __acquires(set_atomicity_lock)
524 unsigned long cr0;
526 /* Note that this is not ideal, since the cache is only flushed/disabled
527 for this CPU while the MTRRs are changed, but changing this requires
528 more invasive changes to the way the kernel boots */
530 spin_lock(&set_atomicity_lock);
532 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
533 cr0 = read_cr0() | X86_CR0_CD;
534 write_cr0(cr0);
535 wbinvd();
537 /* Save value of CR4 and clear Page Global Enable (bit 7) */
538 if ( cpu_has_pge ) {
539 cr4 = read_cr4();
540 write_cr4(cr4 & ~X86_CR4_PGE);
543 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
544 __flush_tlb();
546 /* Save MTRR state */
547 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
549 /* Disable MTRRs, and set the default type to uncached */
550 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
553 static void post_set(void) __releases(set_atomicity_lock)
555 /* Flush TLBs (no need to flush caches - they are disabled) */
556 __flush_tlb();
558 /* Intel (P6) standard MTRRs */
559 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
561 /* Enable caches */
562 write_cr0(read_cr0() & 0xbfffffff);
564 /* Restore value of CR4 */
565 if ( cpu_has_pge )
566 write_cr4(cr4);
567 spin_unlock(&set_atomicity_lock);
570 static void generic_set_all(void)
572 unsigned long mask, count;
573 unsigned long flags;
575 local_irq_save(flags);
576 prepare_set();
578 /* Actually set the state */
579 mask = set_mtrr_state();
581 /* also set PAT */
582 pat_init();
584 post_set();
585 local_irq_restore(flags);
587 /* Use the atomic bitops to update the global mask */
588 for (count = 0; count < sizeof mask * 8; ++count) {
589 if (mask & 0x01)
590 set_bit(count, &smp_changes_mask);
591 mask >>= 1;
596 static void generic_set_mtrr(unsigned int reg, unsigned long base,
597 unsigned long size, mtrr_type type)
598 /* [SUMMARY] Set variable MTRR register on the local CPU.
599 <reg> The register to set.
600 <base> The base address of the region.
601 <size> The size of the region. If this is 0 the region is disabled.
602 <type> The type of the region.
603 [RETURNS] Nothing.
606 unsigned long flags;
607 struct mtrr_var_range *vr;
609 vr = &mtrr_state.var_ranges[reg];
611 local_irq_save(flags);
612 prepare_set();
614 if (size == 0) {
615 /* The invalid bit is kept in the mask, so we simply clear the
616 relevant mask register to disable a range. */
617 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
618 memset(vr, 0, sizeof(struct mtrr_var_range));
619 } else {
620 vr->base_lo = base << PAGE_SHIFT | type;
621 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
622 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
623 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
625 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
626 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
629 post_set();
630 local_irq_restore(flags);
633 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
635 unsigned long lbase, last;
637 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
638 and not touch 0x70000000->0x7003FFFF */
639 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
640 boot_cpu_data.x86_model == 1 &&
641 boot_cpu_data.x86_mask <= 7) {
642 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
643 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
644 return -EINVAL;
646 if (!(base + size < 0x70000 || base > 0x7003F) &&
647 (type == MTRR_TYPE_WRCOMB
648 || type == MTRR_TYPE_WRBACK)) {
649 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
650 return -EINVAL;
654 /* Check upper bits of base and last are equal and lower bits are 0
655 for base and 1 for last */
656 last = base + size - 1;
657 for (lbase = base; !(lbase & 1) && (last & 1);
658 lbase = lbase >> 1, last = last >> 1) ;
659 if (lbase != last) {
660 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
661 base, size);
662 return -EINVAL;
664 return 0;
668 static int generic_have_wrcomb(void)
670 unsigned long config, dummy;
671 rdmsr(MTRRcap_MSR, config, dummy);
672 return (config & (1 << 10));
675 int positive_have_wrcomb(void)
677 return 1;
680 /* generic structure...
682 struct mtrr_ops generic_mtrr_ops = {
683 .use_intel_if = 1,
684 .set_all = generic_set_all,
685 .get = generic_get_mtrr,
686 .get_free_region = generic_get_free_region,
687 .set = generic_set_mtrr,
688 .validate_add_page = generic_validate_add_page,
689 .have_wrcomb = generic_have_wrcomb,