2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/gfp.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/pgtable.h>
24 #include <asm/fcntl.h>
33 int __read_mostly pat_enabled
= 1;
35 static inline void pat_disable(const char *reason
)
38 printk(KERN_INFO
"%s\n", reason
);
41 static int __init
nopat(char *str
)
43 pat_disable("PAT support disabled.");
46 early_param("nopat", nopat
);
48 static inline void pat_disable(const char *reason
)
55 static int debug_enable
;
57 static int __init
pat_debug_setup(char *str
)
62 __setup("debugpat", pat_debug_setup
);
64 #define dprintk(fmt, arg...) \
65 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
68 static u64 __read_mostly boot_pat_state
;
71 PAT_UC
= 0, /* uncached */
72 PAT_WC
= 1, /* Write combining */
73 PAT_WT
= 4, /* Write Through */
74 PAT_WP
= 5, /* Write Protected */
75 PAT_WB
= 6, /* Write Back (default) */
76 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
79 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
89 if (!boot_pat_state
) {
90 pat_disable("PAT not supported by CPU.");
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
98 printk(KERN_ERR
"PAT enabled, "
99 "but not supported by secondary CPU\n");
104 /* Set PWT to Write-Combining. All other bits stay the same */
106 * PTE encoding used in Linux:
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
117 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
118 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
122 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
124 wrmsrl(MSR_IA32_CR_PAT
, pat
);
125 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
126 smp_processor_id(), boot_pat_state
, pat
);
131 static char *cattr_name(unsigned long flags
)
133 switch (flags
& _PAGE_CACHE_MASK
) {
134 case _PAGE_CACHE_UC
: return "uncached";
135 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
136 case _PAGE_CACHE_WB
: return "write-back";
137 case _PAGE_CACHE_WC
: return "write-combining";
138 default: return "broken";
143 * The global memtype list keeps track of memory type for specific
144 * physical memory areas. Conflicting memory types in different
145 * mappings can cause CPU cache corruption. To avoid this we keep track.
147 * The list is sorted based on starting address and can contain multiple
148 * entries for each address (this allows reference counting for overlapping
149 * areas). All the aliases have the same cache attributes of course.
150 * Zero attributes are represented as holes.
152 * The data structure is a list that is also organized as an rbtree
153 * sorted on the start address of memtype range.
155 * memtype_lock protects both the linear list and rbtree.
166 static struct rb_root memtype_rbroot
= RB_ROOT
;
167 static LIST_HEAD(memtype_list
);
168 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
170 static struct memtype
*memtype_rb_search(struct rb_root
*root
, u64 start
)
172 struct rb_node
*node
= root
->rb_node
;
173 struct memtype
*last_lower
= NULL
;
176 struct memtype
*data
= container_of(node
, struct memtype
, rb
);
178 if (data
->start
< start
) {
180 node
= node
->rb_right
;
181 } else if (data
->start
> start
) {
182 node
= node
->rb_left
;
187 /* Will return NULL if there is no entry with its start <= start */
191 static void memtype_rb_insert(struct rb_root
*root
, struct memtype
*data
)
193 struct rb_node
**new = &(root
->rb_node
);
194 struct rb_node
*parent
= NULL
;
197 struct memtype
*this = container_of(*new, struct memtype
, rb
);
200 if (data
->start
<= this->start
)
201 new = &((*new)->rb_left
);
202 else if (data
->start
> this->start
)
203 new = &((*new)->rb_right
);
206 rb_link_node(&data
->rb
, parent
, new);
207 rb_insert_color(&data
->rb
, root
);
211 * Does intersection of PAT memory type and MTRR memory type and returns
212 * the resulting memory type as PAT understands it.
213 * (Type in pat and mtrr will not have same value)
214 * The intersection is based on "Effective Memory Type" tables in IA-32
217 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
220 * Look for MTRR hint to get the effective type in case where PAT
223 if (req_type
== _PAGE_CACHE_WB
) {
226 mtrr_type
= mtrr_type_lookup(start
, end
);
227 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
228 return _PAGE_CACHE_UC_MINUS
;
230 return _PAGE_CACHE_WB
;
237 chk_conflict(struct memtype
*new, struct memtype
*entry
, unsigned long *type
)
239 if (new->type
!= entry
->type
) {
241 new->type
= entry
->type
;
247 /* check overlaps with more than one entry in the list */
248 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
249 if (new->end
<= entry
->start
)
251 else if (new->type
!= entry
->type
)
257 printk(KERN_INFO
"%s:%d conflicting memory types "
258 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, new->start
,
259 new->end
, cattr_name(new->type
), cattr_name(entry
->type
));
263 static int pat_pagerange_is_ram(unsigned long start
, unsigned long end
)
265 int ram_page
= 0, not_rampage
= 0;
266 unsigned long page_nr
;
268 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
271 * For legacy reasons, physical address range in the legacy ISA
272 * region is tracked as non-RAM. This will allow users of
273 * /dev/mem to map portions of legacy ISA region, even when
274 * some of those portions are listed(or not even listed) with
275 * different e820 types(RAM/reserved/..)
277 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
278 page_is_ram(page_nr
))
283 if (ram_page
== not_rampage
)
291 * For RAM pages, we use page flags to mark the pages with appropriate type.
292 * Here we do two pass:
293 * - Find the memtype of all the pages in the range, look for any conflicts
294 * - In case of no conflicts, set the new memtype for pages in the range
296 * Caller must hold memtype_lock for atomicity.
298 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
299 unsigned long *new_type
)
304 if (req_type
== _PAGE_CACHE_UC
) {
305 /* We do not support strong UC */
307 req_type
= _PAGE_CACHE_UC_MINUS
;
310 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
313 page
= pfn_to_page(pfn
);
314 type
= get_page_memtype(page
);
316 printk(KERN_INFO
"reserve_ram_pages_type failed "
317 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
318 start
, end
, type
, req_type
);
327 *new_type
= req_type
;
329 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
330 page
= pfn_to_page(pfn
);
331 set_page_memtype(page
, req_type
);
336 static int free_ram_pages_type(u64 start
, u64 end
)
341 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
342 page
= pfn_to_page(pfn
);
343 set_page_memtype(page
, -1);
349 * req_type typically has one of the:
352 * - _PAGE_CACHE_UC_MINUS
355 * req_type will have a special case value '-1', when requester want to inherit
356 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
358 * If new_type is NULL, function will return an error if it cannot reserve the
359 * region with req_type. If new_type is non-NULL, function will return
360 * available type in new_type in case of no error. In case of any error
361 * it will return a negative return value.
363 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
364 unsigned long *new_type
)
366 struct memtype
*new, *entry
;
367 unsigned long actual_type
;
368 struct list_head
*where
;
372 BUG_ON(start
>= end
); /* end is exclusive */
375 /* This is identical to page table setting without PAT */
378 *new_type
= _PAGE_CACHE_WB
;
379 else if (req_type
== _PAGE_CACHE_WC
)
380 *new_type
= _PAGE_CACHE_UC_MINUS
;
382 *new_type
= req_type
& _PAGE_CACHE_MASK
;
387 /* Low ISA region is always mapped WB in page table. No need to track */
388 if (is_ISA_range(start
, end
- 1)) {
390 *new_type
= _PAGE_CACHE_WB
;
395 * Call mtrr_lookup to get the type hint. This is an
396 * optimization for /dev/mem mmap'ers into WB memory (BIOS
397 * tools and ACPI tools). Use WB request for WB memory and use
398 * UC_MINUS otherwise.
400 actual_type
= pat_x_mtrr_type(start
, end
, req_type
& _PAGE_CACHE_MASK
);
403 *new_type
= actual_type
;
405 is_range_ram
= pat_pagerange_is_ram(start
, end
);
406 if (is_range_ram
== 1) {
408 spin_lock(&memtype_lock
);
409 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
410 spin_unlock(&memtype_lock
);
413 } else if (is_range_ram
< 0) {
417 new = kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
423 new->type
= actual_type
;
425 spin_lock(&memtype_lock
);
427 entry
= memtype_rb_search(&memtype_rbroot
, new->start
);
428 if (likely(entry
!= NULL
)) {
429 /* To work correctly with list_for_each_entry_continue */
430 entry
= list_entry(entry
->nd
.prev
, struct memtype
, nd
);
432 entry
= list_entry(&memtype_list
, struct memtype
, nd
);
435 /* Search for existing mapping that overlaps the current range */
437 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
438 if (end
<= entry
->start
) {
439 where
= entry
->nd
.prev
;
441 } else if (start
<= entry
->start
) { /* end > entry->start */
442 err
= chk_conflict(new, entry
, new_type
);
444 dprintk("Overlap at 0x%Lx-0x%Lx\n",
445 entry
->start
, entry
->end
);
446 where
= entry
->nd
.prev
;
449 } else if (start
< entry
->end
) { /* start > entry->start */
450 err
= chk_conflict(new, entry
, new_type
);
452 dprintk("Overlap at 0x%Lx-0x%Lx\n",
453 entry
->start
, entry
->end
);
456 * Move to right position in the linked
457 * list to add this new entry
459 list_for_each_entry_continue(entry
,
461 if (start
<= entry
->start
) {
462 where
= entry
->nd
.prev
;
472 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
473 "track %s, req %s\n",
474 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
476 spin_unlock(&memtype_lock
);
482 list_add(&new->nd
, where
);
484 list_add_tail(&new->nd
, &memtype_list
);
486 memtype_rb_insert(&memtype_rbroot
, new);
488 spin_unlock(&memtype_lock
);
490 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
491 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
492 new_type
? cattr_name(*new_type
) : "-");
497 int free_memtype(u64 start
, u64 end
)
499 struct memtype
*entry
, *saved_entry
;
506 /* Low ISA region is always mapped WB. No need to track */
507 if (is_ISA_range(start
, end
- 1))
510 is_range_ram
= pat_pagerange_is_ram(start
, end
);
511 if (is_range_ram
== 1) {
513 spin_lock(&memtype_lock
);
514 err
= free_ram_pages_type(start
, end
);
515 spin_unlock(&memtype_lock
);
518 } else if (is_range_ram
< 0) {
522 spin_lock(&memtype_lock
);
524 entry
= memtype_rb_search(&memtype_rbroot
, start
);
525 if (unlikely(entry
== NULL
))
529 * Saved entry points to an entry with start same or less than what
530 * we searched for. Now go through the list in both directions to look
531 * for the entry that matches with both start and end, with list stored
532 * in sorted start address
535 list_for_each_entry(entry
, &memtype_list
, nd
) {
536 if (entry
->start
== start
&& entry
->end
== end
) {
537 rb_erase(&entry
->rb
, &memtype_rbroot
);
538 list_del(&entry
->nd
);
542 } else if (entry
->start
> start
) {
551 list_for_each_entry_reverse(entry
, &memtype_list
, nd
) {
552 if (entry
->start
== start
&& entry
->end
== end
) {
553 rb_erase(&entry
->rb
, &memtype_rbroot
);
554 list_del(&entry
->nd
);
558 } else if (entry
->start
< start
) {
563 spin_unlock(&memtype_lock
);
566 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
567 current
->comm
, current
->pid
, start
, end
);
570 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
577 * lookup_memtype - Looksup the memory type for a physical address
578 * @paddr: physical address of which memory type needs to be looked up
580 * Only to be called when PAT is enabled
582 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
585 static unsigned long lookup_memtype(u64 paddr
)
587 int rettype
= _PAGE_CACHE_WB
;
588 struct memtype
*entry
;
590 if (is_ISA_range(paddr
, paddr
+ PAGE_SIZE
- 1))
593 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
595 spin_lock(&memtype_lock
);
596 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
597 rettype
= get_page_memtype(page
);
598 spin_unlock(&memtype_lock
);
600 * -1 from get_page_memtype() implies RAM page is in its
601 * default state and not reserved, and hence of type WB
604 rettype
= _PAGE_CACHE_WB
;
609 spin_lock(&memtype_lock
);
611 entry
= memtype_rb_search(&memtype_rbroot
, paddr
);
613 rettype
= entry
->type
;
615 rettype
= _PAGE_CACHE_UC_MINUS
;
617 spin_unlock(&memtype_lock
);
622 * io_reserve_memtype - Request a memory type mapping for a region of memory
623 * @start: start (physical address) of the region
624 * @end: end (physical address) of the region
625 * @type: A pointer to memtype, with requested type. On success, requested
626 * or any other compatible type that was available for the region is returned
628 * On success, returns 0
629 * On failure, returns non-zero
631 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
634 unsigned long req_type
= *type
;
635 unsigned long new_type
;
638 WARN_ON_ONCE(iomem_map_sanity_check(start
, end
- start
));
640 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
644 if (!is_new_memtype_allowed(req_type
, new_type
))
647 if (kernel_map_sync_memtype(start
, end
- start
, new_type
) < 0)
654 free_memtype(start
, end
);
661 * io_free_memtype - Release a memory type mapping for a region of memory
662 * @start: start (physical address) of the region
663 * @end: end (physical address) of the region
665 void io_free_memtype(resource_size_t start
, resource_size_t end
)
667 free_memtype(start
, end
);
670 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
671 unsigned long size
, pgprot_t vma_prot
)
676 #ifdef CONFIG_STRICT_DEVMEM
677 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
678 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
683 /* This check is needed to avoid cache aliasing when PAT is enabled */
684 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
686 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
687 u64 to
= from
+ size
;
693 while (cursor
< to
) {
694 if (!devmem_is_allowed(pfn
)) {
696 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
697 current
->comm
, from
, to
);
705 #endif /* CONFIG_STRICT_DEVMEM */
707 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
708 unsigned long size
, pgprot_t
*vma_prot
)
710 unsigned long flags
= _PAGE_CACHE_WB
;
712 if (!range_is_allowed(pfn
, size
))
715 if (file
->f_flags
& O_SYNC
) {
716 flags
= _PAGE_CACHE_UC_MINUS
;
721 * On the PPro and successors, the MTRRs are used to set
722 * memory types for physical addresses outside main memory,
723 * so blindly setting UC or PWT on those pages is wrong.
724 * For Pentiums and earlier, the surround logic should disable
725 * caching for the high addresses through the KEN pin, but
726 * we maintain the tradition of paranoia in this code.
729 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
730 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
731 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
732 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
733 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
734 flags
= _PAGE_CACHE_UC
;
738 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
744 * Change the memory type for the physial address range in kernel identity
745 * mapping space if that range is a part of identity map.
747 int kernel_map_sync_memtype(u64 base
, unsigned long size
, unsigned long flags
)
751 if (base
>= __pa(high_memory
))
754 id_sz
= (__pa(high_memory
) < base
+ size
) ?
755 __pa(high_memory
) - base
:
758 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, flags
) < 0) {
760 "%s:%d ioremap_change_attr failed %s "
762 current
->comm
, current
->pid
,
764 base
, (unsigned long long)(base
+ size
));
771 * Internal interface to reserve a range of physical memory with prot.
772 * Reserved non RAM regions only and after successful reserve_memtype,
773 * this func also keeps identity mapping (if any) in sync with this new prot.
775 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
780 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
781 unsigned long flags
= want_flags
;
783 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
786 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
787 * behavior with RAM pages by returning success.
792 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
796 if (flags
!= want_flags
) {
797 if (strict_prot
|| !is_new_memtype_allowed(want_flags
, flags
)) {
798 free_memtype(paddr
, paddr
+ size
);
799 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
800 " for %Lx-%Lx, got %s\n",
801 current
->comm
, current
->pid
,
802 cattr_name(want_flags
),
803 (unsigned long long)paddr
,
804 (unsigned long long)(paddr
+ size
),
809 * We allow returning different type than the one requested in
812 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
813 (~_PAGE_CACHE_MASK
)) |
817 if (kernel_map_sync_memtype(paddr
, size
, flags
) < 0) {
818 free_memtype(paddr
, paddr
+ size
);
825 * Internal interface to free a range of physical memory.
826 * Frees non RAM regions only.
828 static void free_pfn_range(u64 paddr
, unsigned long size
)
832 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
834 free_memtype(paddr
, paddr
+ size
);
838 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
839 * copied through copy_page_range().
841 * If the vma has a linear pfn mapping for the entire range, we get the prot
842 * from pte and reserve the entire vma range with single reserve_pfn_range call.
844 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
846 resource_size_t paddr
;
848 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
851 if (is_linear_pfn_mapping(vma
)) {
853 * reserve the whole chunk covered by vma. We need the
854 * starting address and protection from pte.
856 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
860 pgprot
= __pgprot(prot
);
861 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
868 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
869 * for physical range indicated by pfn and size.
871 * prot is passed in as a parameter for the new mapping. If the vma has a
872 * linear pfn mapping for the entire range reserve the entire vma range with
873 * single reserve_pfn_range call.
875 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
876 unsigned long pfn
, unsigned long size
)
879 resource_size_t paddr
;
880 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
882 if (is_linear_pfn_mapping(vma
)) {
883 /* reserve the whole chunk starting from vm_pgoff */
884 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
885 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
891 /* for vm_insert_pfn and friends, we set prot based on lookup */
892 flags
= lookup_memtype(pfn
<< PAGE_SHIFT
);
893 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
900 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
901 * untrack can be called for a specific region indicated by pfn and size or
902 * can be for the entire vma (in which case size can be zero).
904 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
907 resource_size_t paddr
;
908 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
910 if (is_linear_pfn_mapping(vma
)) {
911 /* free the whole chunk starting from vm_pgoff */
912 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
913 free_pfn_range(paddr
, vma_size
);
918 pgprot_t
pgprot_writecombine(pgprot_t prot
)
921 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
923 return pgprot_noncached(prot
);
925 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
927 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
929 /* get Nth element of the linked list */
930 static struct memtype
*memtype_get_idx(loff_t pos
)
932 struct memtype
*list_node
, *print_entry
;
935 print_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
939 spin_lock(&memtype_lock
);
940 list_for_each_entry(list_node
, &memtype_list
, nd
) {
942 *print_entry
= *list_node
;
943 spin_unlock(&memtype_lock
);
948 spin_unlock(&memtype_lock
);
954 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
958 seq_printf(seq
, "PAT memtype list:\n");
961 return memtype_get_idx(*pos
);
964 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
967 return memtype_get_idx(*pos
);
970 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
974 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
976 struct memtype
*print_entry
= (struct memtype
*)v
;
978 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
979 print_entry
->start
, print_entry
->end
);
985 static struct seq_operations memtype_seq_ops
= {
986 .start
= memtype_seq_start
,
987 .next
= memtype_seq_next
,
988 .stop
= memtype_seq_stop
,
989 .show
= memtype_seq_show
,
992 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
994 return seq_open(file
, &memtype_seq_ops
);
997 static const struct file_operations memtype_fops
= {
998 .open
= memtype_seq_open
,
1000 .llseek
= seq_lseek
,
1001 .release
= seq_release
,
1004 static int __init
pat_memtype_list_init(void)
1006 debugfs_create_file("pat_memtype_list", S_IRUSR
, arch_debugfs_dir
,
1007 NULL
, &memtype_fops
);
1011 late_initcall(pat_memtype_list_init
);
1013 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */