2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
17 #include <asm/tlbflush.h>
18 #include <asm/processor.h>
20 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/fcntl.h>
29 int __read_mostly pat_wc_enabled
= 1;
31 void __cpuinit
pat_disable(char *reason
)
34 printk(KERN_INFO
"%s\n", reason
);
37 static int nopat(char *str
)
39 pat_disable("PAT support disabled.");
42 early_param("nopat", nopat
);
46 static int debug_enable
;
47 static int __init
pat_debug_setup(char *str
)
52 __setup("debugpat", pat_debug_setup
);
54 #define dprintk(fmt, arg...) \
55 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
58 static u64 __read_mostly boot_pat_state
;
61 PAT_UC
= 0, /* uncached */
62 PAT_WC
= 1, /* Write combining */
63 PAT_WT
= 4, /* Write Through */
64 PAT_WP
= 5, /* Write Protected */
65 PAT_WB
= 6, /* Write Back (default) */
66 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
69 #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8))
79 if (!cpu_has_pat
&& boot_pat_state
) {
81 * If this happens we are on a secondary CPU, but
82 * switched to PAT on the boot CPU. We have no way to
85 printk(KERN_ERR
"PAT enabled, "
86 "but not supported by secondary CPU\n");
90 /* Set PWT to Write-Combining. All other bits stay the same */
92 * PTE encoding used in Linux:
97 * 000 WB _PAGE_CACHE_WB
98 * 001 WC _PAGE_CACHE_WC
99 * 010 UC- _PAGE_CACHE_UC_MINUS
100 * 011 UC _PAGE_CACHE_UC
103 pat
= PAT(0,WB
) | PAT(1,WC
) | PAT(2,UC_MINUS
) | PAT(3,UC
) |
104 PAT(4,WB
) | PAT(5,WC
) | PAT(6,UC_MINUS
) | PAT(7,UC
);
108 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
110 wrmsrl(MSR_IA32_CR_PAT
, pat
);
111 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
112 smp_processor_id(), boot_pat_state
, pat
);
117 static char *cattr_name(unsigned long flags
)
119 switch (flags
& _PAGE_CACHE_MASK
) {
120 case _PAGE_CACHE_UC
: return "uncached";
121 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
122 case _PAGE_CACHE_WB
: return "write-back";
123 case _PAGE_CACHE_WC
: return "write-combining";
124 default: return "broken";
129 * The global memtype list keeps track of memory type for specific
130 * physical memory areas. Conflicting memory types in different
131 * mappings can cause CPU cache corruption. To avoid this we keep track.
133 * The list is sorted based on starting address and can contain multiple
134 * entries for each address (this allows reference counting for overlapping
135 * areas). All the aliases have the same cache attributes of course.
136 * Zero attributes are represented as holes.
138 * Currently the data structure is a list because the number of mappings
139 * are expected to be relatively small. If this should be a problem
140 * it could be changed to a rbtree or similar.
142 * memtype_lock protects the whole list.
152 static LIST_HEAD(memtype_list
);
153 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
156 * Does intersection of PAT memory type and MTRR memory type and returns
157 * the resulting memory type as PAT understands it.
158 * (Type in pat and mtrr will not have same value)
159 * The intersection is based on "Effective Memory Type" tables in IA-32
162 static int pat_x_mtrr_type(u64 start
, u64 end
, unsigned long prot
,
163 unsigned long *ret_prot
)
165 unsigned long pat_type
;
168 pat_type
= prot
& _PAGE_CACHE_MASK
;
169 prot
&= (~_PAGE_CACHE_MASK
);
172 * We return the PAT request directly for types where PAT takes
173 * precedence with respect to MTRR and for UC_MINUS.
174 * Consistency checks with other PAT requests is done later
175 * while going through memtype list.
177 if (pat_type
== _PAGE_CACHE_WC
) {
178 *ret_prot
= prot
| _PAGE_CACHE_WC
;
180 } else if (pat_type
== _PAGE_CACHE_UC_MINUS
) {
181 *ret_prot
= prot
| _PAGE_CACHE_UC_MINUS
;
183 } else if (pat_type
== _PAGE_CACHE_UC
) {
184 *ret_prot
= prot
| _PAGE_CACHE_UC
;
189 * Look for MTRR hint to get the effective type in case where PAT
192 mtrr_type
= mtrr_type_lookup(start
, end
);
194 if (mtrr_type
== MTRR_TYPE_UNCACHABLE
) {
195 *ret_prot
= prot
| _PAGE_CACHE_UC
;
196 } else if (mtrr_type
== MTRR_TYPE_WRCOMB
) {
197 *ret_prot
= prot
| _PAGE_CACHE_WC
;
199 *ret_prot
= prot
| _PAGE_CACHE_WB
;
206 * req_type typically has one of the:
209 * - _PAGE_CACHE_UC_MINUS
212 * req_type will have a special case value '-1', when requester want to inherit
213 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
215 * If ret_type is NULL, function will return an error if it cannot reserve the
216 * region with req_type. If ret_type is non-null, function will return
217 * available type in ret_type in case of no error. In case of any error
218 * it will return a negative return value.
220 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
221 unsigned long *ret_type
)
223 struct memtype
*new_entry
= NULL
;
224 struct memtype
*parse
;
225 unsigned long actual_type
;
228 /* Only track when pat_wc_enabled */
229 if (!pat_wc_enabled
) {
230 /* This is identical to page table setting without PAT */
232 if (req_type
== -1) {
233 *ret_type
= _PAGE_CACHE_WB
;
235 *ret_type
= req_type
;
241 /* Low ISA region is always mapped WB in page table. No need to track */
242 if (start
>= ISA_START_ADDRESS
&& (end
- 1) <= ISA_END_ADDRESS
) {
244 *ret_type
= _PAGE_CACHE_WB
;
249 if (req_type
== -1) {
251 * Call mtrr_lookup to get the type hint. This is an
252 * optimization for /dev/mem mmap'ers into WB memory (BIOS
253 * tools and ACPI tools). Use WB request for WB memory and use
254 * UC_MINUS otherwise.
256 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
258 if (mtrr_type
== MTRR_TYPE_WRBACK
) {
259 req_type
= _PAGE_CACHE_WB
;
260 actual_type
= _PAGE_CACHE_WB
;
262 req_type
= _PAGE_CACHE_UC_MINUS
;
263 actual_type
= _PAGE_CACHE_UC_MINUS
;
266 req_type
&= _PAGE_CACHE_MASK
;
267 err
= pat_x_mtrr_type(start
, end
, req_type
, &actual_type
);
272 *ret_type
= actual_type
;
277 new_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
281 new_entry
->start
= start
;
282 new_entry
->end
= end
;
283 new_entry
->type
= actual_type
;
286 *ret_type
= actual_type
;
288 spin_lock(&memtype_lock
);
290 /* Search for existing mapping that overlaps the current range */
291 list_for_each_entry(parse
, &memtype_list
, nd
) {
292 struct memtype
*saved_ptr
;
294 if (parse
->start
>= end
) {
295 dprintk("New Entry\n");
296 list_add(&new_entry
->nd
, parse
->nd
.prev
);
301 if (start
<= parse
->start
&& end
>= parse
->start
) {
302 if (actual_type
!= parse
->type
&& ret_type
) {
303 actual_type
= parse
->type
;
304 *ret_type
= actual_type
;
305 new_entry
->type
= actual_type
;
308 if (actual_type
!= parse
->type
) {
310 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
311 current
->comm
, current
->pid
,
313 cattr_name(actual_type
),
314 cattr_name(parse
->type
));
321 * Check to see whether the request overlaps more
322 * than one entry in the list
324 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
325 if (end
<= parse
->start
) {
329 if (actual_type
!= parse
->type
) {
331 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
332 current
->comm
, current
->pid
,
334 cattr_name(actual_type
),
335 cattr_name(parse
->type
));
345 dprintk("Overlap at 0x%Lx-0x%Lx\n",
346 saved_ptr
->start
, saved_ptr
->end
);
347 /* No conflict. Go ahead and add this new entry */
348 list_add(&new_entry
->nd
, saved_ptr
->nd
.prev
);
353 if (start
< parse
->end
) {
354 if (actual_type
!= parse
->type
&& ret_type
) {
355 actual_type
= parse
->type
;
356 *ret_type
= actual_type
;
357 new_entry
->type
= actual_type
;
360 if (actual_type
!= parse
->type
) {
362 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
363 current
->comm
, current
->pid
,
365 cattr_name(actual_type
),
366 cattr_name(parse
->type
));
373 * Check to see whether the request overlaps more
374 * than one entry in the list
376 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
377 if (end
<= parse
->start
) {
381 if (actual_type
!= parse
->type
) {
383 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
384 current
->comm
, current
->pid
,
386 cattr_name(actual_type
),
387 cattr_name(parse
->type
));
397 dprintk("Overlap at 0x%Lx-0x%Lx\n",
398 saved_ptr
->start
, saved_ptr
->end
);
399 /* No conflict. Go ahead and add this new entry */
400 list_add(&new_entry
->nd
, &saved_ptr
->nd
);
408 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
409 start
, end
, cattr_name(new_entry
->type
),
410 cattr_name(req_type
));
412 spin_unlock(&memtype_lock
);
417 /* No conflict. Not yet added to the list. Add to the tail */
418 list_add_tail(&new_entry
->nd
, &memtype_list
);
419 dprintk("New Entry\n");
424 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
425 start
, end
, cattr_name(actual_type
),
426 cattr_name(req_type
), cattr_name(*ret_type
));
429 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
430 start
, end
, cattr_name(actual_type
),
431 cattr_name(req_type
));
434 spin_unlock(&memtype_lock
);
438 int free_memtype(u64 start
, u64 end
)
443 /* Only track when pat_wc_enabled */
444 if (!pat_wc_enabled
) {
448 /* Low ISA region is always mapped WB. No need to track */
449 if (start
>= ISA_START_ADDRESS
&& end
<= ISA_END_ADDRESS
) {
453 spin_lock(&memtype_lock
);
454 list_for_each_entry(ml
, &memtype_list
, nd
) {
455 if (ml
->start
== start
&& ml
->end
== end
) {
462 spin_unlock(&memtype_lock
);
465 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
466 current
->comm
, current
->pid
, start
, end
);
469 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
475 * /dev/mem mmap interface. The memtype used for mapping varies:
476 * - Use UC for mappings with O_SYNC flag
477 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
478 * inherit the memtype from existing mapping.
479 * - Else use UC_MINUS memtype (for backward compatibility with existing
482 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
483 unsigned long size
, pgprot_t vma_prot
)
488 #ifdef CONFIG_NONPROMISC_DEVMEM
489 /* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
490 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
495 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
497 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
498 u64 to
= from
+ size
;
501 while (cursor
< to
) {
502 if (!devmem_is_allowed(pfn
)) {
504 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
505 current
->comm
, from
, to
);
513 #endif /* CONFIG_NONPROMISC_DEVMEM */
515 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
516 unsigned long size
, pgprot_t
*vma_prot
)
518 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
519 unsigned long flags
= _PAGE_CACHE_UC_MINUS
;
522 if (!range_is_allowed(pfn
, size
))
525 if (file
->f_flags
& O_SYNC
) {
526 flags
= _PAGE_CACHE_UC
;
531 * On the PPro and successors, the MTRRs are used to set
532 * memory types for physical addresses outside main memory,
533 * so blindly setting UC or PWT on those pages is wrong.
534 * For Pentiums and earlier, the surround logic should disable
535 * caching for the high addresses through the KEN pin, but
536 * we maintain the tradition of paranoia in this code.
538 if (!pat_wc_enabled
&&
539 ! ( boot_cpu_has(X86_FEATURE_MTRR
) ||
540 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
541 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
542 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
543 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
544 flags
= _PAGE_CACHE_UC
;
549 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
550 * Without O_SYNC, we want to get
551 * - WB for WB-able memory and no other conflicting mappings
552 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
553 * - Inherit from confliting mappings otherwise
555 if (flags
!= _PAGE_CACHE_UC_MINUS
) {
556 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
558 retval
= reserve_memtype(offset
, offset
+ size
, -1, &flags
);
564 if (pfn
<= max_pfn_mapped
&&
565 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
566 free_memtype(offset
, offset
+ size
);
568 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
569 current
->comm
, current
->pid
,
571 offset
, (unsigned long long)(offset
+ size
));
575 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
580 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
582 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
584 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
586 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
587 if (flags
!= want_flags
) {
589 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
590 current
->comm
, current
->pid
,
591 cattr_name(want_flags
),
592 addr
, (unsigned long long)(addr
+ size
),
597 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
599 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
601 free_memtype(addr
, addr
+ size
);