2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/x86_init.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
33 #include "pat_internal.h"
36 int __read_mostly pat_enabled
= 1;
38 static inline void pat_disable(const char *reason
)
41 printk(KERN_INFO
"%s\n", reason
);
44 static int __init
nopat(char *str
)
46 pat_disable("PAT support disabled.");
49 early_param("nopat", nopat
);
51 static inline void pat_disable(const char *reason
)
60 static int __init
pat_debug_setup(char *str
)
65 __setup("debugpat", pat_debug_setup
);
67 static u64 __read_mostly boot_pat_state
;
70 PAT_UC
= 0, /* uncached */
71 PAT_WC
= 1, /* Write combining */
72 PAT_WT
= 4, /* Write Through */
73 PAT_WP
= 5, /* Write Protected */
74 PAT_WB
= 6, /* Write Back (default) */
75 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
78 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
83 bool boot_cpu
= !boot_pat_state
;
89 if (!boot_pat_state
) {
90 pat_disable("PAT not supported by CPU.");
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
98 printk(KERN_ERR
"PAT enabled, "
99 "but not supported by secondary CPU\n");
104 /* Set PWT to Write-Combining. All other bits stay the same */
106 * PTE encoding used in Linux:
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
117 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
118 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
122 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
124 wrmsrl(MSR_IA32_CR_PAT
, pat
);
127 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128 smp_processor_id(), boot_pat_state
, pat
);
133 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype accesses */
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
142 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
145 * Look for MTRR hint to get the effective type in case where PAT
148 if (req_type
== _PAGE_CACHE_WB
) {
151 mtrr_type
= mtrr_type_lookup(start
, end
);
152 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
153 return _PAGE_CACHE_UC_MINUS
;
155 return _PAGE_CACHE_WB
;
161 static int pat_pagerange_is_ram(resource_size_t start
, resource_size_t end
)
163 int ram_page
= 0, not_rampage
= 0;
164 unsigned long page_nr
;
166 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
169 * For legacy reasons, physical address range in the legacy ISA
170 * region is tracked as non-RAM. This will allow users of
171 * /dev/mem to map portions of legacy ISA region, even when
172 * some of those portions are listed(or not even listed) with
173 * different e820 types(RAM/reserved/..)
175 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
176 page_is_ram(page_nr
))
181 if (ram_page
== not_rampage
)
189 * For RAM pages, we use page flags to mark the pages with appropriate type.
190 * Here we do two pass:
191 * - Find the memtype of all the pages in the range, look for any conflicts
192 * - In case of no conflicts, set the new memtype for pages in the range
194 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
195 unsigned long *new_type
)
200 if (req_type
== _PAGE_CACHE_UC
) {
201 /* We do not support strong UC */
203 req_type
= _PAGE_CACHE_UC_MINUS
;
206 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
209 page
= pfn_to_page(pfn
);
210 type
= get_page_memtype(page
);
212 printk(KERN_INFO
"reserve_ram_pages_type failed "
213 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
214 start
, end
, type
, req_type
);
223 *new_type
= req_type
;
225 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
226 page
= pfn_to_page(pfn
);
227 set_page_memtype(page
, req_type
);
232 static int free_ram_pages_type(u64 start
, u64 end
)
237 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
238 page
= pfn_to_page(pfn
);
239 set_page_memtype(page
, -1);
245 * req_type typically has one of the:
248 * - _PAGE_CACHE_UC_MINUS
251 * If new_type is NULL, function will return an error if it cannot reserve the
252 * region with req_type. If new_type is non-NULL, function will return
253 * available type in new_type in case of no error. In case of any error
254 * it will return a negative return value.
256 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
257 unsigned long *new_type
)
260 unsigned long actual_type
;
264 BUG_ON(start
>= end
); /* end is exclusive */
267 /* This is identical to page table setting without PAT */
269 if (req_type
== _PAGE_CACHE_WC
)
270 *new_type
= _PAGE_CACHE_UC_MINUS
;
272 *new_type
= req_type
& _PAGE_CACHE_MASK
;
277 /* Low ISA region is always mapped WB in page table. No need to track */
278 if (x86_platform
.is_untracked_pat_range(start
, end
)) {
280 *new_type
= _PAGE_CACHE_WB
;
285 * Call mtrr_lookup to get the type hint. This is an
286 * optimization for /dev/mem mmap'ers into WB memory (BIOS
287 * tools and ACPI tools). Use WB request for WB memory and use
288 * UC_MINUS otherwise.
290 actual_type
= pat_x_mtrr_type(start
, end
, req_type
& _PAGE_CACHE_MASK
);
293 *new_type
= actual_type
;
295 is_range_ram
= pat_pagerange_is_ram(start
, end
);
296 if (is_range_ram
== 1) {
298 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
301 } else if (is_range_ram
< 0) {
305 new = kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
311 new->type
= actual_type
;
313 spin_lock(&memtype_lock
);
315 err
= rbt_memtype_check_insert(new, new_type
);
317 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
318 "track %s, req %s\n",
319 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
321 spin_unlock(&memtype_lock
);
326 spin_unlock(&memtype_lock
);
328 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
329 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
330 new_type
? cattr_name(*new_type
) : "-");
335 int free_memtype(u64 start
, u64 end
)
339 struct memtype
*entry
;
344 /* Low ISA region is always mapped WB. No need to track */
345 if (x86_platform
.is_untracked_pat_range(start
, end
))
348 is_range_ram
= pat_pagerange_is_ram(start
, end
);
349 if (is_range_ram
== 1) {
351 err
= free_ram_pages_type(start
, end
);
354 } else if (is_range_ram
< 0) {
358 spin_lock(&memtype_lock
);
359 entry
= rbt_memtype_erase(start
, end
);
360 spin_unlock(&memtype_lock
);
363 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
364 current
->comm
, current
->pid
, start
, end
);
370 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
377 * lookup_memtype - Looksup the memory type for a physical address
378 * @paddr: physical address of which memory type needs to be looked up
380 * Only to be called when PAT is enabled
382 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
385 static unsigned long lookup_memtype(u64 paddr
)
387 int rettype
= _PAGE_CACHE_WB
;
388 struct memtype
*entry
;
390 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ PAGE_SIZE
))
393 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
395 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
396 rettype
= get_page_memtype(page
);
398 * -1 from get_page_memtype() implies RAM page is in its
399 * default state and not reserved, and hence of type WB
402 rettype
= _PAGE_CACHE_WB
;
407 spin_lock(&memtype_lock
);
409 entry
= rbt_memtype_lookup(paddr
);
411 rettype
= entry
->type
;
413 rettype
= _PAGE_CACHE_UC_MINUS
;
415 spin_unlock(&memtype_lock
);
420 * io_reserve_memtype - Request a memory type mapping for a region of memory
421 * @start: start (physical address) of the region
422 * @end: end (physical address) of the region
423 * @type: A pointer to memtype, with requested type. On success, requested
424 * or any other compatible type that was available for the region is returned
426 * On success, returns 0
427 * On failure, returns non-zero
429 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
432 resource_size_t size
= end
- start
;
433 unsigned long req_type
= *type
;
434 unsigned long new_type
;
437 WARN_ON_ONCE(iomem_map_sanity_check(start
, size
));
439 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
443 if (!is_new_memtype_allowed(start
, size
, req_type
, new_type
))
446 if (kernel_map_sync_memtype(start
, size
, new_type
) < 0)
453 free_memtype(start
, end
);
460 * io_free_memtype - Release a memory type mapping for a region of memory
461 * @start: start (physical address) of the region
462 * @end: end (physical address) of the region
464 void io_free_memtype(resource_size_t start
, resource_size_t end
)
466 free_memtype(start
, end
);
469 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
470 unsigned long size
, pgprot_t vma_prot
)
475 #ifdef CONFIG_STRICT_DEVMEM
476 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
477 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
482 /* This check is needed to avoid cache aliasing when PAT is enabled */
483 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
485 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
486 u64 to
= from
+ size
;
492 while (cursor
< to
) {
493 if (!devmem_is_allowed(pfn
)) {
495 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
496 current
->comm
, from
, to
);
504 #endif /* CONFIG_STRICT_DEVMEM */
506 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
507 unsigned long size
, pgprot_t
*vma_prot
)
509 unsigned long flags
= _PAGE_CACHE_WB
;
511 if (!range_is_allowed(pfn
, size
))
514 if (file
->f_flags
& O_DSYNC
)
515 flags
= _PAGE_CACHE_UC_MINUS
;
519 * On the PPro and successors, the MTRRs are used to set
520 * memory types for physical addresses outside main memory,
521 * so blindly setting UC or PWT on those pages is wrong.
522 * For Pentiums and earlier, the surround logic should disable
523 * caching for the high addresses through the KEN pin, but
524 * we maintain the tradition of paranoia in this code.
527 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
528 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
529 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
530 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
531 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
532 flags
= _PAGE_CACHE_UC
;
536 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
542 * Change the memory type for the physial address range in kernel identity
543 * mapping space if that range is a part of identity map.
545 int kernel_map_sync_memtype(u64 base
, unsigned long size
, unsigned long flags
)
549 if (base
>= __pa(high_memory
))
552 id_sz
= (__pa(high_memory
) < base
+ size
) ?
553 __pa(high_memory
) - base
:
556 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, flags
) < 0) {
558 "%s:%d ioremap_change_attr failed %s "
560 current
->comm
, current
->pid
,
562 base
, (unsigned long long)(base
+ size
));
569 * Internal interface to reserve a range of physical memory with prot.
570 * Reserved non RAM regions only and after successful reserve_memtype,
571 * this func also keeps identity mapping (if any) in sync with this new prot.
573 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
578 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
579 unsigned long flags
= want_flags
;
581 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
584 * reserve_pfn_range() for RAM pages. We do not refcount to keep
585 * track of number of mappings of RAM pages. We can assert that
586 * the type requested matches the type of first page in the range.
592 flags
= lookup_memtype(paddr
);
593 if (want_flags
!= flags
) {
595 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
596 current
->comm
, current
->pid
,
597 cattr_name(want_flags
),
598 (unsigned long long)paddr
,
599 (unsigned long long)(paddr
+ size
),
601 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
602 (~_PAGE_CACHE_MASK
)) |
608 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
612 if (flags
!= want_flags
) {
614 !is_new_memtype_allowed(paddr
, size
, want_flags
, flags
)) {
615 free_memtype(paddr
, paddr
+ size
);
616 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
617 " for %Lx-%Lx, got %s\n",
618 current
->comm
, current
->pid
,
619 cattr_name(want_flags
),
620 (unsigned long long)paddr
,
621 (unsigned long long)(paddr
+ size
),
626 * We allow returning different type than the one requested in
629 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
630 (~_PAGE_CACHE_MASK
)) |
634 if (kernel_map_sync_memtype(paddr
, size
, flags
) < 0) {
635 free_memtype(paddr
, paddr
+ size
);
642 * Internal interface to free a range of physical memory.
643 * Frees non RAM regions only.
645 static void free_pfn_range(u64 paddr
, unsigned long size
)
649 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
651 free_memtype(paddr
, paddr
+ size
);
655 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
656 * copied through copy_page_range().
658 * If the vma has a linear pfn mapping for the entire range, we get the prot
659 * from pte and reserve the entire vma range with single reserve_pfn_range call.
661 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
663 resource_size_t paddr
;
665 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
668 if (is_linear_pfn_mapping(vma
)) {
670 * reserve the whole chunk covered by vma. We need the
671 * starting address and protection from pte.
673 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
677 pgprot
= __pgprot(prot
);
678 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
685 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
686 * for physical range indicated by pfn and size.
688 * prot is passed in as a parameter for the new mapping. If the vma has a
689 * linear pfn mapping for the entire range reserve the entire vma range with
690 * single reserve_pfn_range call.
692 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
693 unsigned long pfn
, unsigned long size
)
696 resource_size_t paddr
;
697 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
699 if (is_linear_pfn_mapping(vma
)) {
700 /* reserve the whole chunk starting from vm_pgoff */
701 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
702 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
708 /* for vm_insert_pfn and friends, we set prot based on lookup */
709 flags
= lookup_memtype(pfn
<< PAGE_SHIFT
);
710 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
717 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
718 * untrack can be called for a specific region indicated by pfn and size or
719 * can be for the entire vma (in which case size can be zero).
721 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
724 resource_size_t paddr
;
725 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
727 if (is_linear_pfn_mapping(vma
)) {
728 /* free the whole chunk starting from vm_pgoff */
729 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
730 free_pfn_range(paddr
, vma_size
);
735 pgprot_t
pgprot_writecombine(pgprot_t prot
)
738 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
740 return pgprot_noncached(prot
);
742 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
744 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
746 static struct memtype
*memtype_get_idx(loff_t pos
)
748 struct memtype
*print_entry
;
751 print_entry
= kzalloc(sizeof(struct memtype
), GFP_KERNEL
);
755 spin_lock(&memtype_lock
);
756 ret
= rbt_memtype_copy_nth_element(print_entry
, pos
);
757 spin_unlock(&memtype_lock
);
767 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
771 seq_printf(seq
, "PAT memtype list:\n");
774 return memtype_get_idx(*pos
);
777 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
780 return memtype_get_idx(*pos
);
783 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
787 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
789 struct memtype
*print_entry
= (struct memtype
*)v
;
791 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
792 print_entry
->start
, print_entry
->end
);
798 static const struct seq_operations memtype_seq_ops
= {
799 .start
= memtype_seq_start
,
800 .next
= memtype_seq_next
,
801 .stop
= memtype_seq_stop
,
802 .show
= memtype_seq_show
,
805 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
807 return seq_open(file
, &memtype_seq_ops
);
810 static const struct file_operations memtype_fops
= {
811 .open
= memtype_seq_open
,
814 .release
= seq_release
,
817 static int __init
pat_memtype_list_init(void)
820 debugfs_create_file("pat_memtype_list", S_IRUSR
,
821 arch_debugfs_dir
, NULL
, &memtype_fops
);
826 late_initcall(pat_memtype_list_init
);
828 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */