2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
17 #include <asm/tlbflush.h>
18 #include <asm/processor.h>
19 #include <asm/pgtable.h>
22 #include <asm/cacheflush.h>
23 #include <asm/fcntl.h>
27 int pat_wc_enabled
= 1;
29 static u64 __read_mostly boot_pat_state
;
31 static int nopat(char *str
)
34 printk(KERN_INFO
"x86: PAT support disabled.\n");
38 early_param("nopat", nopat
);
40 static int pat_known_cpu(void)
49 printk(KERN_INFO
"CPU and/or kernel does not support PAT.\n");
54 PAT_UC
= 0, /* uncached */
55 PAT_WC
= 1, /* Write combining */
56 PAT_WT
= 4, /* Write Through */
57 PAT_WP
= 5, /* Write Protected */
58 PAT_WB
= 6, /* Write Back (default) */
59 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
62 #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8))
68 #ifndef CONFIG_X86_PAT
72 /* Boot CPU enables PAT based on CPU feature */
73 if (!smp_processor_id() && !pat_known_cpu())
76 /* APs enable PAT iff boot CPU has enabled it before */
77 if (smp_processor_id() && !pat_wc_enabled
)
80 /* Set PWT to Write-Combining. All other bits stay the same */
82 * PTE encoding used in Linux:
87 * 000 WB _PAGE_CACHE_WB
88 * 001 WC _PAGE_CACHE_WC
89 * 010 UC- _PAGE_CACHE_UC_MINUS
90 * 011 UC _PAGE_CACHE_UC
93 pat
= PAT(0,WB
) | PAT(1,WC
) | PAT(2,UC_MINUS
) | PAT(3,UC
) |
94 PAT(4,WB
) | PAT(5,WC
) | PAT(6,UC_MINUS
) | PAT(7,UC
);
97 if (!smp_processor_id()) {
98 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
101 wrmsrl(MSR_IA32_CR_PAT
, pat
);
102 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
103 smp_processor_id(), boot_pat_state
, pat
);
108 static char *cattr_name(unsigned long flags
)
110 switch (flags
& _PAGE_CACHE_MASK
) {
111 case _PAGE_CACHE_UC
: return "uncached";
112 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
113 case _PAGE_CACHE_WB
: return "write-back";
114 case _PAGE_CACHE_WC
: return "write-combining";
115 default: return "broken";
120 * The global memtype list keeps track of memory type for specific
121 * physical memory areas. Conflicting memory types in different
122 * mappings can cause CPU cache corruption. To avoid this we keep track.
124 * The list is sorted based on starting address and can contain multiple
125 * entries for each address (this allows reference counting for overlapping
126 * areas). All the aliases have the same cache attributes of course.
127 * Zero attributes are represented as holes.
129 * Currently the data structure is a list because the number of mappings
130 * are expected to be relatively small. If this should be a problem
131 * it could be changed to a rbtree or similar.
133 * memtype_lock protects the whole list.
143 static LIST_HEAD(memtype_list
);
144 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
147 * Does intersection of PAT memory type and MTRR memory type and returns
148 * the resulting memory type as PAT understands it.
149 * (Type in pat and mtrr will not have same value)
150 * The intersection is based on "Effective Memory Type" tables in IA-32
153 static int pat_x_mtrr_type(u64 start
, u64 end
, unsigned long prot
,
154 unsigned long *ret_prot
)
156 unsigned long pat_type
;
159 mtrr_type
= mtrr_type_lookup(start
, end
);
160 if (mtrr_type
== 0xFF) { /* MTRR not enabled */
164 if (mtrr_type
== 0xFE) { /* MTRR match error */
165 *ret_prot
= _PAGE_CACHE_UC
;
168 if (mtrr_type
!= MTRR_TYPE_UNCACHABLE
&&
169 mtrr_type
!= MTRR_TYPE_WRBACK
&&
170 mtrr_type
!= MTRR_TYPE_WRCOMB
) { /* MTRR type unhandled */
171 *ret_prot
= _PAGE_CACHE_UC
;
175 pat_type
= prot
& _PAGE_CACHE_MASK
;
176 prot
&= (~_PAGE_CACHE_MASK
);
178 /* Currently doing intersection by hand. Optimize it later. */
179 if (pat_type
== _PAGE_CACHE_WC
) {
180 *ret_prot
= prot
| _PAGE_CACHE_WC
;
181 } else if (pat_type
== _PAGE_CACHE_UC_MINUS
) {
182 *ret_prot
= prot
| _PAGE_CACHE_UC_MINUS
;
183 } else if (pat_type
== _PAGE_CACHE_UC
||
184 mtrr_type
== MTRR_TYPE_UNCACHABLE
) {
185 *ret_prot
= prot
| _PAGE_CACHE_UC
;
186 } else if (mtrr_type
== MTRR_TYPE_WRCOMB
) {
187 *ret_prot
= prot
| _PAGE_CACHE_WC
;
189 *ret_prot
= prot
| _PAGE_CACHE_WB
;
196 * req_type typically has one of the:
199 * - _PAGE_CACHE_UC_MINUS
202 * req_type will have a special case value '-1', when requester want to inherit
203 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
205 * If ret_type is NULL, function will return an error if it cannot reserve the
206 * region with req_type. If ret_type is non-null, function will return
207 * available type in ret_type in case of no error. In case of any error
208 * it will return a negative return value.
210 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
211 unsigned long *ret_type
)
213 struct memtype
*new_entry
= NULL
;
214 struct memtype
*parse
;
215 unsigned long actual_type
;
218 /* Only track when pat_wc_enabled */
219 if (!pat_wc_enabled
) {
220 /* This is identical to page table setting without PAT */
222 if (req_type
== -1) {
223 *ret_type
= _PAGE_CACHE_WB
;
225 *ret_type
= req_type
;
231 /* Low ISA region is always mapped WB in page table. No need to track */
232 if (start
>= ISA_START_ADDRESS
&& (end
- 1) <= ISA_END_ADDRESS
) {
234 *ret_type
= _PAGE_CACHE_WB
;
239 if (req_type
== -1) {
241 * Special case where caller wants to inherit from mtrr or
242 * existing pat mapping, defaulting to UC_MINUS in case of
245 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
246 if (mtrr_type
== 0xFE) { /* MTRR match error */
250 if (mtrr_type
== MTRR_TYPE_WRBACK
) {
251 req_type
= _PAGE_CACHE_WB
;
252 actual_type
= _PAGE_CACHE_WB
;
254 req_type
= _PAGE_CACHE_UC_MINUS
;
255 actual_type
= _PAGE_CACHE_UC_MINUS
;
258 req_type
&= _PAGE_CACHE_MASK
;
259 err
= pat_x_mtrr_type(start
, end
, req_type
, &actual_type
);
264 *ret_type
= actual_type
;
269 new_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
273 new_entry
->start
= start
;
274 new_entry
->end
= end
;
275 new_entry
->type
= actual_type
;
278 *ret_type
= actual_type
;
280 spin_lock(&memtype_lock
);
282 /* Search for existing mapping that overlaps the current range */
283 list_for_each_entry(parse
, &memtype_list
, nd
) {
284 struct memtype
*saved_ptr
;
286 if (parse
->start
>= end
) {
287 pr_debug("New Entry\n");
288 list_add(&new_entry
->nd
, parse
->nd
.prev
);
293 if (start
<= parse
->start
&& end
>= parse
->start
) {
294 if (actual_type
!= parse
->type
&& ret_type
) {
295 actual_type
= parse
->type
;
296 *ret_type
= actual_type
;
297 new_entry
->type
= actual_type
;
300 if (actual_type
!= parse
->type
) {
302 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
303 current
->comm
, current
->pid
,
305 cattr_name(actual_type
),
306 cattr_name(parse
->type
));
313 * Check to see whether the request overlaps more
314 * than one entry in the list
316 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
317 if (end
<= parse
->start
) {
321 if (actual_type
!= parse
->type
) {
323 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
324 current
->comm
, current
->pid
,
326 cattr_name(actual_type
),
327 cattr_name(parse
->type
));
337 pr_debug("Overlap at 0x%Lx-0x%Lx\n",
338 saved_ptr
->start
, saved_ptr
->end
);
339 /* No conflict. Go ahead and add this new entry */
340 list_add(&new_entry
->nd
, saved_ptr
->nd
.prev
);
345 if (start
< parse
->end
) {
346 if (actual_type
!= parse
->type
&& ret_type
) {
347 actual_type
= parse
->type
;
348 *ret_type
= actual_type
;
349 new_entry
->type
= actual_type
;
352 if (actual_type
!= parse
->type
) {
354 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
355 current
->comm
, current
->pid
,
357 cattr_name(actual_type
),
358 cattr_name(parse
->type
));
365 * Check to see whether the request overlaps more
366 * than one entry in the list
368 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
369 if (end
<= parse
->start
) {
373 if (actual_type
!= parse
->type
) {
375 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
376 current
->comm
, current
->pid
,
378 cattr_name(actual_type
),
379 cattr_name(parse
->type
));
389 printk(KERN_INFO
"Overlap at 0x%Lx-0x%Lx\n",
390 saved_ptr
->start
, saved_ptr
->end
);
391 /* No conflict. Go ahead and add this new entry */
392 list_add(&new_entry
->nd
, &saved_ptr
->nd
);
400 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
401 start
, end
, cattr_name(new_entry
->type
),
402 cattr_name(req_type
));
404 spin_unlock(&memtype_lock
);
409 /* No conflict. Not yet added to the list. Add to the tail */
410 list_add_tail(&new_entry
->nd
, &memtype_list
);
411 pr_debug("New Entry\n");
416 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
417 start
, end
, cattr_name(actual_type
),
418 cattr_name(req_type
), cattr_name(*ret_type
));
421 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
422 start
, end
, cattr_name(actual_type
),
423 cattr_name(req_type
));
426 spin_unlock(&memtype_lock
);
430 int free_memtype(u64 start
, u64 end
)
435 /* Only track when pat_wc_enabled */
436 if (!pat_wc_enabled
) {
440 /* Low ISA region is always mapped WB. No need to track */
441 if (start
>= ISA_START_ADDRESS
&& end
<= ISA_END_ADDRESS
) {
445 spin_lock(&memtype_lock
);
446 list_for_each_entry(ml
, &memtype_list
, nd
) {
447 if (ml
->start
== start
&& ml
->end
== end
) {
454 spin_unlock(&memtype_lock
);
457 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
458 current
->comm
, current
->pid
, start
, end
);
461 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
467 * /dev/mem mmap interface. The memtype used for mapping varies:
468 * - Use UC for mappings with O_SYNC flag
469 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
470 * inherit the memtype from existing mapping.
471 * - Else use UC_MINUS memtype (for backward compatibility with existing
474 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
475 unsigned long size
, pgprot_t vma_prot
)
480 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
481 unsigned long size
, pgprot_t
*vma_prot
)
483 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
484 unsigned long flags
= _PAGE_CACHE_UC_MINUS
;
485 unsigned long ret_flags
;
488 if (file
->f_flags
& O_SYNC
) {
489 flags
= _PAGE_CACHE_UC
;
494 * On the PPro and successors, the MTRRs are used to set
495 * memory types for physical addresses outside main memory,
496 * so blindly setting UC or PWT on those pages is wrong.
497 * For Pentiums and earlier, the surround logic should disable
498 * caching for the high addresses through the KEN pin, but
499 * we maintain the tradition of paranoia in this code.
501 if (!pat_wc_enabled
&&
502 ! ( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
503 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
504 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
505 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
)) &&
506 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
507 flags
= _PAGE_CACHE_UC
;
512 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
513 * Without O_SYNC, we want to get
514 * - WB for WB-able memory and no other conflicting mappings
515 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
516 * - Inherit from confliting mappings otherwise
518 if (flags
!= _PAGE_CACHE_UC_MINUS
) {
519 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
521 retval
= reserve_memtype(offset
, offset
+ size
, -1, &ret_flags
);
529 if (pfn
<= max_pfn_mapped
&&
530 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
531 free_memtype(offset
, offset
+ size
);
533 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
534 current
->comm
, current
->pid
,
536 offset
, offset
+ size
);
540 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
545 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
547 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
549 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
551 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
552 if (flags
!= want_flags
) {
554 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
555 current
->comm
, current
->pid
,
556 cattr_name(want_flags
),
562 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
564 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
566 free_memtype(addr
, addr
+ size
);