2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/gfp.h>
19 #include <asm/cacheflush.h>
20 #include <asm/processor.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgtable.h>
23 #include <asm/fcntl.h>
32 int __read_mostly pat_enabled
= 1;
34 static inline void pat_disable(const char *reason
)
37 printk(KERN_INFO
"%s\n", reason
);
40 static int __init
nopat(char *str
)
42 pat_disable("PAT support disabled.");
45 early_param("nopat", nopat
);
47 static inline void pat_disable(const char *reason
)
54 static int debug_enable
;
56 static int __init
pat_debug_setup(char *str
)
61 __setup("debugpat", pat_debug_setup
);
63 #define dprintk(fmt, arg...) \
64 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
67 static u64 __read_mostly boot_pat_state
;
70 PAT_UC
= 0, /* uncached */
71 PAT_WC
= 1, /* Write combining */
72 PAT_WT
= 4, /* Write Through */
73 PAT_WP
= 5, /* Write Protected */
74 PAT_WB
= 6, /* Write Back (default) */
75 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
78 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
88 if (!boot_pat_state
) {
89 pat_disable("PAT not supported by CPU.");
93 * If this happens we are on a secondary CPU, but
94 * switched to PAT on the boot CPU. We have no way to
97 printk(KERN_ERR
"PAT enabled, "
98 "but not supported by secondary CPU\n");
103 /* Set PWT to Write-Combining. All other bits stay the same */
105 * PTE encoding used in Linux:
110 * 000 WB _PAGE_CACHE_WB
111 * 001 WC _PAGE_CACHE_WC
112 * 010 UC- _PAGE_CACHE_UC_MINUS
113 * 011 UC _PAGE_CACHE_UC
116 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
117 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
121 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
123 wrmsrl(MSR_IA32_CR_PAT
, pat
);
124 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
125 smp_processor_id(), boot_pat_state
, pat
);
130 static char *cattr_name(unsigned long flags
)
132 switch (flags
& _PAGE_CACHE_MASK
) {
133 case _PAGE_CACHE_UC
: return "uncached";
134 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
135 case _PAGE_CACHE_WB
: return "write-back";
136 case _PAGE_CACHE_WC
: return "write-combining";
137 default: return "broken";
142 * The global memtype list keeps track of memory type for specific
143 * physical memory areas. Conflicting memory types in different
144 * mappings can cause CPU cache corruption. To avoid this we keep track.
146 * The list is sorted based on starting address and can contain multiple
147 * entries for each address (this allows reference counting for overlapping
148 * areas). All the aliases have the same cache attributes of course.
149 * Zero attributes are represented as holes.
151 * Currently the data structure is a list because the number of mappings
152 * are expected to be relatively small. If this should be a problem
153 * it could be changed to a rbtree or similar.
155 * memtype_lock protects the whole list.
165 static LIST_HEAD(memtype_list
);
166 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
169 * Does intersection of PAT memory type and MTRR memory type and returns
170 * the resulting memory type as PAT understands it.
171 * (Type in pat and mtrr will not have same value)
172 * The intersection is based on "Effective Memory Type" tables in IA-32
175 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
178 * Look for MTRR hint to get the effective type in case where PAT
181 if (req_type
== _PAGE_CACHE_WB
) {
184 mtrr_type
= mtrr_type_lookup(start
, end
);
185 if (mtrr_type
== MTRR_TYPE_UNCACHABLE
)
186 return _PAGE_CACHE_UC
;
187 if (mtrr_type
== MTRR_TYPE_WRCOMB
)
188 return _PAGE_CACHE_WC
;
195 chk_conflict(struct memtype
*new, struct memtype
*entry
, unsigned long *type
)
197 if (new->type
!= entry
->type
) {
199 new->type
= entry
->type
;
205 /* check overlaps with more than one entry in the list */
206 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
207 if (new->end
<= entry
->start
)
209 else if (new->type
!= entry
->type
)
215 printk(KERN_INFO
"%s:%d conflicting memory types "
216 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, new->start
,
217 new->end
, cattr_name(new->type
), cattr_name(entry
->type
));
221 static struct memtype
*cached_entry
;
222 static u64 cached_start
;
224 static int pat_pagerange_is_ram(unsigned long start
, unsigned long end
)
226 int ram_page
= 0, not_rampage
= 0;
227 unsigned long page_nr
;
229 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
232 * For legacy reasons, physical address range in the legacy ISA
233 * region is tracked as non-RAM. This will allow users of
234 * /dev/mem to map portions of legacy ISA region, even when
235 * some of those portions are listed(or not even listed) with
236 * different e820 types(RAM/reserved/..)
238 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
239 page_is_ram(page_nr
))
244 if (ram_page
== not_rampage
)
252 * For RAM pages, mark the pages as non WB memory type using
253 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
254 * set_memory_wc() on a RAM page at a time before marking it as WB again.
255 * This is ok, because only one driver will be owning the page and
256 * doing set_memory_*() calls.
258 * For now, we use PageNonWB to track that the RAM page is being mapped
259 * as non WB. In future, we will have to use one more flag
260 * (or some other mechanism in page_struct) to distinguish between
263 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
264 unsigned long *new_type
)
269 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
270 page
= pfn_to_page(pfn
);
271 if (page_mapped(page
) || PageNonWB(page
))
280 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< end_pfn
; ++pfn
) {
281 page
= pfn_to_page(pfn
);
282 ClearPageNonWB(page
);
288 static int free_ram_pages_type(u64 start
, u64 end
)
293 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
294 page
= pfn_to_page(pfn
);
295 if (page_mapped(page
) || !PageNonWB(page
))
298 ClearPageNonWB(page
);
304 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< end_pfn
; ++pfn
) {
305 page
= pfn_to_page(pfn
);
312 * req_type typically has one of the:
315 * - _PAGE_CACHE_UC_MINUS
318 * req_type will have a special case value '-1', when requester want to inherit
319 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
321 * If new_type is NULL, function will return an error if it cannot reserve the
322 * region with req_type. If new_type is non-NULL, function will return
323 * available type in new_type in case of no error. In case of any error
324 * it will return a negative return value.
326 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
327 unsigned long *new_type
)
329 struct memtype
*new, *entry
;
330 unsigned long actual_type
;
331 struct list_head
*where
;
335 BUG_ON(start
>= end
); /* end is exclusive */
338 /* This is identical to page table setting without PAT */
341 *new_type
= _PAGE_CACHE_WB
;
343 *new_type
= req_type
& _PAGE_CACHE_MASK
;
348 /* Low ISA region is always mapped WB in page table. No need to track */
349 if (is_ISA_range(start
, end
- 1)) {
351 *new_type
= _PAGE_CACHE_WB
;
355 if (req_type
== -1) {
357 * Call mtrr_lookup to get the type hint. This is an
358 * optimization for /dev/mem mmap'ers into WB memory (BIOS
359 * tools and ACPI tools). Use WB request for WB memory and use
360 * UC_MINUS otherwise.
362 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
364 if (mtrr_type
== MTRR_TYPE_WRBACK
)
365 actual_type
= _PAGE_CACHE_WB
;
367 actual_type
= _PAGE_CACHE_UC_MINUS
;
369 actual_type
= pat_x_mtrr_type(start
, end
,
370 req_type
& _PAGE_CACHE_MASK
);
374 *new_type
= actual_type
;
376 is_range_ram
= pat_pagerange_is_ram(start
, end
);
377 if (is_range_ram
== 1)
378 return reserve_ram_pages_type(start
, end
, req_type
,
380 else if (is_range_ram
< 0)
383 new = kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
389 new->type
= actual_type
;
391 spin_lock(&memtype_lock
);
393 if (cached_entry
&& start
>= cached_start
)
394 entry
= cached_entry
;
396 entry
= list_entry(&memtype_list
, struct memtype
, nd
);
398 /* Search for existing mapping that overlaps the current range */
400 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
401 if (end
<= entry
->start
) {
402 where
= entry
->nd
.prev
;
403 cached_entry
= list_entry(where
, struct memtype
, nd
);
405 } else if (start
<= entry
->start
) { /* end > entry->start */
406 err
= chk_conflict(new, entry
, new_type
);
408 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry
->start
, entry
->end
);
410 where
= entry
->nd
.prev
;
411 cached_entry
= list_entry(where
,
415 } else if (start
< entry
->end
) { /* start > entry->start */
416 err
= chk_conflict(new, entry
, new_type
);
418 dprintk("Overlap at 0x%Lx-0x%Lx\n",
419 entry
->start
, entry
->end
);
420 cached_entry
= list_entry(entry
->nd
.prev
,
424 * Move to right position in the linked
425 * list to add this new entry
427 list_for_each_entry_continue(entry
,
429 if (start
<= entry
->start
) {
430 where
= entry
->nd
.prev
;
440 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
441 "track %s, req %s\n",
442 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
444 spin_unlock(&memtype_lock
);
449 cached_start
= start
;
452 list_add(&new->nd
, where
);
454 list_add_tail(&new->nd
, &memtype_list
);
456 spin_unlock(&memtype_lock
);
458 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
459 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
460 new_type
? cattr_name(*new_type
) : "-");
465 int free_memtype(u64 start
, u64 end
)
467 struct memtype
*entry
;
474 /* Low ISA region is always mapped WB. No need to track */
475 if (is_ISA_range(start
, end
- 1))
478 is_range_ram
= pat_pagerange_is_ram(start
, end
);
479 if (is_range_ram
== 1)
480 return free_ram_pages_type(start
, end
);
481 else if (is_range_ram
< 0)
484 spin_lock(&memtype_lock
);
485 list_for_each_entry(entry
, &memtype_list
, nd
) {
486 if (entry
->start
== start
&& entry
->end
== end
) {
487 if (cached_entry
== entry
|| cached_start
== start
)
490 list_del(&entry
->nd
);
496 spin_unlock(&memtype_lock
);
499 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
500 current
->comm
, current
->pid
, start
, end
);
503 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
509 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
510 unsigned long size
, pgprot_t vma_prot
)
515 #ifdef CONFIG_STRICT_DEVMEM
516 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
517 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
522 /* This check is needed to avoid cache aliasing when PAT is enabled */
523 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
525 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
526 u64 to
= from
+ size
;
532 while (cursor
< to
) {
533 if (!devmem_is_allowed(pfn
)) {
535 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
536 current
->comm
, from
, to
);
544 #endif /* CONFIG_STRICT_DEVMEM */
546 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
547 unsigned long size
, pgprot_t
*vma_prot
)
549 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
550 unsigned long flags
= -1;
553 if (!range_is_allowed(pfn
, size
))
556 if (file
->f_flags
& O_SYNC
) {
557 flags
= _PAGE_CACHE_UC_MINUS
;
562 * On the PPro and successors, the MTRRs are used to set
563 * memory types for physical addresses outside main memory,
564 * so blindly setting UC or PWT on those pages is wrong.
565 * For Pentiums and earlier, the surround logic should disable
566 * caching for the high addresses through the KEN pin, but
567 * we maintain the tradition of paranoia in this code.
570 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
571 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
572 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
573 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
574 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
575 flags
= _PAGE_CACHE_UC
;
580 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
582 * Without O_SYNC, we want to get
583 * - WB for WB-able memory and no other conflicting mappings
584 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
585 * - Inherit from confliting mappings otherwise
588 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
590 retval
= reserve_memtype(offset
, offset
+ size
, -1, &flags
);
596 if (((pfn
< max_low_pfn_mapped
) ||
597 (pfn
>= (1UL<<(32 - PAGE_SHIFT
)) && pfn
< max_pfn_mapped
)) &&
598 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
599 free_memtype(offset
, offset
+ size
);
601 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
602 current
->comm
, current
->pid
,
604 offset
, (unsigned long long)(offset
+ size
));
608 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
613 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
615 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
616 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
619 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
620 if (flags
!= want_flags
) {
622 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
623 current
->comm
, current
->pid
,
624 cattr_name(want_flags
),
625 addr
, (unsigned long long)(addr
+ size
),
630 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
632 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
634 free_memtype(addr
, addr
+ size
);
638 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map.
641 int kernel_map_sync_memtype(u64 base
, unsigned long size
, unsigned long flags
)
645 if (!pat_enabled
|| base
>= __pa(high_memory
))
648 id_sz
= (__pa(high_memory
) < base
+ size
) ?
649 __pa(high_memory
) - base
:
652 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, flags
) < 0) {
654 "%s:%d ioremap_change_attr failed %s "
656 current
->comm
, current
->pid
,
658 base
, (unsigned long long)(base
+ size
));
665 * Internal interface to reserve a range of physical memory with prot.
666 * Reserved non RAM regions only and after successful reserve_memtype,
667 * this func also keeps identity mapping (if any) in sync with this new prot.
669 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
675 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
677 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
680 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
681 * behavior with RAM pages by returning success.
686 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
690 if (flags
!= want_flags
) {
691 if (strict_prot
|| !is_new_memtype_allowed(want_flags
, flags
)) {
692 free_memtype(paddr
, paddr
+ size
);
693 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
694 " for %Lx-%Lx, got %s\n",
695 current
->comm
, current
->pid
,
696 cattr_name(want_flags
),
697 (unsigned long long)paddr
,
698 (unsigned long long)(paddr
+ size
),
703 * We allow returning different type than the one requested in
706 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
707 (~_PAGE_CACHE_MASK
)) |
711 if (kernel_map_sync_memtype(paddr
, size
, flags
) < 0) {
712 free_memtype(paddr
, paddr
+ size
);
719 * Internal interface to free a range of physical memory.
720 * Frees non RAM regions only.
722 static void free_pfn_range(u64 paddr
, unsigned long size
)
726 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
728 free_memtype(paddr
, paddr
+ size
);
732 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
733 * copied through copy_page_range().
735 * If the vma has a linear pfn mapping for the entire range, we get the prot
736 * from pte and reserve the entire vma range with single reserve_pfn_range call.
737 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
738 * by page to get physical address and protection.
740 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
744 resource_size_t paddr
;
746 unsigned long vma_start
= vma
->vm_start
;
747 unsigned long vma_end
= vma
->vm_end
;
748 unsigned long vma_size
= vma_end
- vma_start
;
754 if (is_linear_pfn_mapping(vma
)) {
756 * reserve the whole chunk covered by vma. We need the
757 * starting address and protection from pte.
759 if (follow_phys(vma
, vma_start
, 0, &prot
, &paddr
)) {
763 pgprot
= __pgprot(prot
);
764 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
767 /* reserve entire vma page by page, using pfn and prot from pte */
768 for (i
= 0; i
< vma_size
; i
+= PAGE_SIZE
) {
769 if (follow_phys(vma
, vma_start
+ i
, 0, &prot
, &paddr
))
772 pgprot
= __pgprot(prot
);
773 retval
= reserve_pfn_range(paddr
, PAGE_SIZE
, &pgprot
, 1);
780 /* Reserve error: Cleanup partial reservation and return error */
781 for (j
= 0; j
< i
; j
+= PAGE_SIZE
) {
782 if (follow_phys(vma
, vma_start
+ j
, 0, &prot
, &paddr
))
785 free_pfn_range(paddr
, PAGE_SIZE
);
792 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
793 * for physical range indicated by pfn and size.
795 * prot is passed in as a parameter for the new mapping. If the vma has a
796 * linear pfn mapping for the entire range reserve the entire vma range with
797 * single reserve_pfn_range call.
798 * Otherwise, we look t the pfn and size and reserve only the specified range
801 * Note that this function can be called with caller trying to map only a
802 * subrange/page inside the vma.
804 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
805 unsigned long pfn
, unsigned long size
)
809 resource_size_t base_paddr
;
810 resource_size_t paddr
;
811 unsigned long vma_start
= vma
->vm_start
;
812 unsigned long vma_end
= vma
->vm_end
;
813 unsigned long vma_size
= vma_end
- vma_start
;
818 if (is_linear_pfn_mapping(vma
)) {
819 /* reserve the whole chunk starting from vm_pgoff */
820 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
821 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
824 /* reserve page by page using pfn and size */
825 base_paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
826 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
827 paddr
= base_paddr
+ i
;
828 retval
= reserve_pfn_range(paddr
, PAGE_SIZE
, prot
, 0);
835 /* Reserve error: Cleanup partial reservation and return error */
836 for (j
= 0; j
< i
; j
+= PAGE_SIZE
) {
837 paddr
= base_paddr
+ j
;
838 free_pfn_range(paddr
, PAGE_SIZE
);
845 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
846 * untrack can be called for a specific region indicated by pfn and size or
847 * can be for the entire vma (in which case size can be zero).
849 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
853 resource_size_t paddr
;
855 unsigned long vma_start
= vma
->vm_start
;
856 unsigned long vma_end
= vma
->vm_end
;
857 unsigned long vma_size
= vma_end
- vma_start
;
862 if (is_linear_pfn_mapping(vma
)) {
863 /* free the whole chunk starting from vm_pgoff */
864 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
865 free_pfn_range(paddr
, vma_size
);
869 if (size
!= 0 && size
!= vma_size
) {
870 /* free page by page, using pfn and size */
871 paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
872 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
874 free_pfn_range(paddr
, PAGE_SIZE
);
877 /* free entire vma, page by page, using the pfn from pte */
878 for (i
= 0; i
< vma_size
; i
+= PAGE_SIZE
) {
879 if (follow_phys(vma
, vma_start
+ i
, 0, &prot
, &paddr
))
882 free_pfn_range(paddr
, PAGE_SIZE
);
887 pgprot_t
pgprot_writecombine(pgprot_t prot
)
890 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
892 return pgprot_noncached(prot
);
894 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
896 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
898 /* get Nth element of the linked list */
899 static struct memtype
*memtype_get_idx(loff_t pos
)
901 struct memtype
*list_node
, *print_entry
;
904 print_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
908 spin_lock(&memtype_lock
);
909 list_for_each_entry(list_node
, &memtype_list
, nd
) {
911 *print_entry
= *list_node
;
912 spin_unlock(&memtype_lock
);
917 spin_unlock(&memtype_lock
);
923 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
927 seq_printf(seq
, "PAT memtype list:\n");
930 return memtype_get_idx(*pos
);
933 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
936 return memtype_get_idx(*pos
);
939 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
943 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
945 struct memtype
*print_entry
= (struct memtype
*)v
;
947 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
948 print_entry
->start
, print_entry
->end
);
954 static struct seq_operations memtype_seq_ops
= {
955 .start
= memtype_seq_start
,
956 .next
= memtype_seq_next
,
957 .stop
= memtype_seq_stop
,
958 .show
= memtype_seq_show
,
961 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
963 return seq_open(file
, &memtype_seq_ops
);
966 static const struct file_operations memtype_fops
= {
967 .open
= memtype_seq_open
,
970 .release
= seq_release
,
973 static int __init
pat_memtype_list_init(void)
975 debugfs_create_file("pat_memtype_list", S_IRUSR
, arch_debugfs_dir
,
976 NULL
, &memtype_fops
);
980 late_initcall(pat_memtype_list_init
);
982 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */