2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/gfp.h>
19 #include <asm/cacheflush.h>
20 #include <asm/processor.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgtable.h>
23 #include <asm/fcntl.h>
32 int __read_mostly pat_enabled
= 1;
34 void __cpuinit
pat_disable(char *reason
)
37 printk(KERN_INFO
"%s\n", reason
);
40 static int __init
nopat(char *str
)
42 pat_disable("PAT support disabled.");
45 early_param("nopat", nopat
);
49 static int debug_enable
;
51 static int __init
pat_debug_setup(char *str
)
56 __setup("debugpat", pat_debug_setup
);
58 #define dprintk(fmt, arg...) \
59 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
62 static u64 __read_mostly boot_pat_state
;
65 PAT_UC
= 0, /* uncached */
66 PAT_WC
= 1, /* Write combining */
67 PAT_WT
= 4, /* Write Through */
68 PAT_WP
= 5, /* Write Protected */
69 PAT_WB
= 6, /* Write Back (default) */
70 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
73 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
83 if (!cpu_has_pat
&& boot_pat_state
) {
85 * If this happens we are on a secondary CPU, but
86 * switched to PAT on the boot CPU. We have no way to
89 printk(KERN_ERR
"PAT enabled, "
90 "but not supported by secondary CPU\n");
94 /* Set PWT to Write-Combining. All other bits stay the same */
96 * PTE encoding used in Linux:
101 * 000 WB _PAGE_CACHE_WB
102 * 001 WC _PAGE_CACHE_WC
103 * 010 UC- _PAGE_CACHE_UC_MINUS
104 * 011 UC _PAGE_CACHE_UC
107 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
108 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
112 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
114 wrmsrl(MSR_IA32_CR_PAT
, pat
);
115 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
116 smp_processor_id(), boot_pat_state
, pat
);
121 static char *cattr_name(unsigned long flags
)
123 switch (flags
& _PAGE_CACHE_MASK
) {
124 case _PAGE_CACHE_UC
: return "uncached";
125 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
126 case _PAGE_CACHE_WB
: return "write-back";
127 case _PAGE_CACHE_WC
: return "write-combining";
128 default: return "broken";
133 * The global memtype list keeps track of memory type for specific
134 * physical memory areas. Conflicting memory types in different
135 * mappings can cause CPU cache corruption. To avoid this we keep track.
137 * The list is sorted based on starting address and can contain multiple
138 * entries for each address (this allows reference counting for overlapping
139 * areas). All the aliases have the same cache attributes of course.
140 * Zero attributes are represented as holes.
142 * Currently the data structure is a list because the number of mappings
143 * are expected to be relatively small. If this should be a problem
144 * it could be changed to a rbtree or similar.
146 * memtype_lock protects the whole list.
156 static LIST_HEAD(memtype_list
);
157 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
160 * Does intersection of PAT memory type and MTRR memory type and returns
161 * the resulting memory type as PAT understands it.
162 * (Type in pat and mtrr will not have same value)
163 * The intersection is based on "Effective Memory Type" tables in IA-32
166 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
169 * Look for MTRR hint to get the effective type in case where PAT
172 if (req_type
== _PAGE_CACHE_WB
) {
175 mtrr_type
= mtrr_type_lookup(start
, end
);
176 if (mtrr_type
== MTRR_TYPE_UNCACHABLE
)
177 return _PAGE_CACHE_UC
;
178 if (mtrr_type
== MTRR_TYPE_WRCOMB
)
179 return _PAGE_CACHE_WC
;
186 chk_conflict(struct memtype
*new, struct memtype
*entry
, unsigned long *type
)
188 if (new->type
!= entry
->type
) {
190 new->type
= entry
->type
;
196 /* check overlaps with more than one entry in the list */
197 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
198 if (new->end
<= entry
->start
)
200 else if (new->type
!= entry
->type
)
206 printk(KERN_INFO
"%s:%d conflicting memory types "
207 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, new->start
,
208 new->end
, cattr_name(new->type
), cattr_name(entry
->type
));
212 static struct memtype
*cached_entry
;
213 static u64 cached_start
;
215 static int pat_pagerange_is_ram(unsigned long start
, unsigned long end
)
217 int ram_page
= 0, not_rampage
= 0;
218 unsigned long page_nr
;
220 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
223 * For legacy reasons, physical address range in the legacy ISA
224 * region is tracked as non-RAM. This will allow users of
225 * /dev/mem to map portions of legacy ISA region, even when
226 * some of those portions are listed(or not even listed) with
227 * different e820 types(RAM/reserved/..)
229 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
230 page_is_ram(page_nr
))
235 if (ram_page
== not_rampage
)
243 * For RAM pages, mark the pages as non WB memory type using
244 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
245 * set_memory_wc() on a RAM page at a time before marking it as WB again.
246 * This is ok, because only one driver will be owning the page and
247 * doing set_memory_*() calls.
249 * For now, we use PageNonWB to track that the RAM page is being mapped
250 * as non WB. In future, we will have to use one more flag
251 * (or some other mechanism in page_struct) to distinguish between
254 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
255 unsigned long *new_type
)
260 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
261 page
= pfn_to_page(pfn
);
262 if (page_mapped(page
) || PageNonWB(page
))
271 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< end_pfn
; ++pfn
) {
272 page
= pfn_to_page(pfn
);
273 ClearPageNonWB(page
);
279 static int free_ram_pages_type(u64 start
, u64 end
)
284 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
285 page
= pfn_to_page(pfn
);
286 if (page_mapped(page
) || !PageNonWB(page
))
289 ClearPageNonWB(page
);
295 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< end_pfn
; ++pfn
) {
296 page
= pfn_to_page(pfn
);
303 * req_type typically has one of the:
306 * - _PAGE_CACHE_UC_MINUS
309 * req_type will have a special case value '-1', when requester want to inherit
310 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
312 * If new_type is NULL, function will return an error if it cannot reserve the
313 * region with req_type. If new_type is non-NULL, function will return
314 * available type in new_type in case of no error. In case of any error
315 * it will return a negative return value.
317 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
318 unsigned long *new_type
)
320 struct memtype
*new, *entry
;
321 unsigned long actual_type
;
322 struct list_head
*where
;
326 BUG_ON(start
>= end
); /* end is exclusive */
329 /* This is identical to page table setting without PAT */
332 *new_type
= _PAGE_CACHE_WB
;
334 *new_type
= req_type
& _PAGE_CACHE_MASK
;
339 /* Low ISA region is always mapped WB in page table. No need to track */
340 if (is_ISA_range(start
, end
- 1)) {
342 *new_type
= _PAGE_CACHE_WB
;
346 if (req_type
== -1) {
348 * Call mtrr_lookup to get the type hint. This is an
349 * optimization for /dev/mem mmap'ers into WB memory (BIOS
350 * tools and ACPI tools). Use WB request for WB memory and use
351 * UC_MINUS otherwise.
353 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
355 if (mtrr_type
== MTRR_TYPE_WRBACK
)
356 actual_type
= _PAGE_CACHE_WB
;
358 actual_type
= _PAGE_CACHE_UC_MINUS
;
360 actual_type
= pat_x_mtrr_type(start
, end
,
361 req_type
& _PAGE_CACHE_MASK
);
365 *new_type
= actual_type
;
367 is_range_ram
= pat_pagerange_is_ram(start
, end
);
368 if (is_range_ram
== 1)
369 return reserve_ram_pages_type(start
, end
, req_type
,
371 else if (is_range_ram
< 0)
374 new = kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
380 new->type
= actual_type
;
382 spin_lock(&memtype_lock
);
384 if (cached_entry
&& start
>= cached_start
)
385 entry
= cached_entry
;
387 entry
= list_entry(&memtype_list
, struct memtype
, nd
);
389 /* Search for existing mapping that overlaps the current range */
391 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
392 if (end
<= entry
->start
) {
393 where
= entry
->nd
.prev
;
394 cached_entry
= list_entry(where
, struct memtype
, nd
);
396 } else if (start
<= entry
->start
) { /* end > entry->start */
397 err
= chk_conflict(new, entry
, new_type
);
399 dprintk("Overlap at 0x%Lx-0x%Lx\n",
400 entry
->start
, entry
->end
);
401 where
= entry
->nd
.prev
;
402 cached_entry
= list_entry(where
,
406 } else if (start
< entry
->end
) { /* start > entry->start */
407 err
= chk_conflict(new, entry
, new_type
);
409 dprintk("Overlap at 0x%Lx-0x%Lx\n",
410 entry
->start
, entry
->end
);
411 cached_entry
= list_entry(entry
->nd
.prev
,
415 * Move to right position in the linked
416 * list to add this new entry
418 list_for_each_entry_continue(entry
,
420 if (start
<= entry
->start
) {
421 where
= entry
->nd
.prev
;
431 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
432 "track %s, req %s\n",
433 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
435 spin_unlock(&memtype_lock
);
440 cached_start
= start
;
443 list_add(&new->nd
, where
);
445 list_add_tail(&new->nd
, &memtype_list
);
447 spin_unlock(&memtype_lock
);
449 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
450 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
451 new_type
? cattr_name(*new_type
) : "-");
456 int free_memtype(u64 start
, u64 end
)
458 struct memtype
*entry
;
465 /* Low ISA region is always mapped WB. No need to track */
466 if (is_ISA_range(start
, end
- 1))
469 is_range_ram
= pat_pagerange_is_ram(start
, end
);
470 if (is_range_ram
== 1)
471 return free_ram_pages_type(start
, end
);
472 else if (is_range_ram
< 0)
475 spin_lock(&memtype_lock
);
476 list_for_each_entry(entry
, &memtype_list
, nd
) {
477 if (entry
->start
== start
&& entry
->end
== end
) {
478 if (cached_entry
== entry
|| cached_start
== start
)
481 list_del(&entry
->nd
);
487 spin_unlock(&memtype_lock
);
490 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
491 current
->comm
, current
->pid
, start
, end
);
494 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
500 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
501 unsigned long size
, pgprot_t vma_prot
)
506 #ifdef CONFIG_STRICT_DEVMEM
507 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
508 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
513 /* This check is needed to avoid cache aliasing when PAT is enabled */
514 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
516 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
517 u64 to
= from
+ size
;
523 while (cursor
< to
) {
524 if (!devmem_is_allowed(pfn
)) {
526 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
527 current
->comm
, from
, to
);
535 #endif /* CONFIG_STRICT_DEVMEM */
537 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
538 unsigned long size
, pgprot_t
*vma_prot
)
540 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
541 unsigned long flags
= -1;
544 if (!range_is_allowed(pfn
, size
))
547 if (file
->f_flags
& O_SYNC
) {
548 flags
= _PAGE_CACHE_UC_MINUS
;
553 * On the PPro and successors, the MTRRs are used to set
554 * memory types for physical addresses outside main memory,
555 * so blindly setting UC or PWT on those pages is wrong.
556 * For Pentiums and earlier, the surround logic should disable
557 * caching for the high addresses through the KEN pin, but
558 * we maintain the tradition of paranoia in this code.
561 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
562 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
563 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
564 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
565 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
566 flags
= _PAGE_CACHE_UC
;
571 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
573 * Without O_SYNC, we want to get
574 * - WB for WB-able memory and no other conflicting mappings
575 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
576 * - Inherit from confliting mappings otherwise
579 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
581 retval
= reserve_memtype(offset
, offset
+ size
, -1, &flags
);
587 if (((pfn
< max_low_pfn_mapped
) ||
588 (pfn
>= (1UL<<(32 - PAGE_SHIFT
)) && pfn
< max_pfn_mapped
)) &&
589 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
590 free_memtype(offset
, offset
+ size
);
592 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
593 current
->comm
, current
->pid
,
595 offset
, (unsigned long long)(offset
+ size
));
599 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
604 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
606 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
607 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
610 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
611 if (flags
!= want_flags
) {
613 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
614 current
->comm
, current
->pid
,
615 cattr_name(want_flags
),
616 addr
, (unsigned long long)(addr
+ size
),
621 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
623 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
625 free_memtype(addr
, addr
+ size
);
629 * Internal interface to reserve a range of physical memory with prot.
630 * Reserved non RAM regions only and after successful reserve_memtype,
631 * this func also keeps identity mapping (if any) in sync with this new prot.
633 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
639 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
641 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
644 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
645 * behavior with RAM pages by returning success.
650 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
654 if (flags
!= want_flags
) {
655 if (strict_prot
|| !is_new_memtype_allowed(want_flags
, flags
)) {
656 free_memtype(paddr
, paddr
+ size
);
657 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
658 " for %Lx-%Lx, got %s\n",
659 current
->comm
, current
->pid
,
660 cattr_name(want_flags
),
661 (unsigned long long)paddr
,
662 (unsigned long long)(paddr
+ size
),
667 * We allow returning different type than the one requested in
670 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
671 (~_PAGE_CACHE_MASK
)) |
675 /* Need to keep identity mapping in sync */
676 if (paddr
>= __pa(high_memory
))
679 id_sz
= (__pa(high_memory
) < paddr
+ size
) ?
680 __pa(high_memory
) - paddr
:
683 if (ioremap_change_attr((unsigned long)__va(paddr
), id_sz
, flags
) < 0) {
684 free_memtype(paddr
, paddr
+ size
);
686 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
688 current
->comm
, current
->pid
,
690 (unsigned long long)paddr
,
691 (unsigned long long)(paddr
+ size
));
698 * Internal interface to free a range of physical memory.
699 * Frees non RAM regions only.
701 static void free_pfn_range(u64 paddr
, unsigned long size
)
705 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
707 free_memtype(paddr
, paddr
+ size
);
711 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
712 * copied through copy_page_range().
714 * If the vma has a linear pfn mapping for the entire range, we get the prot
715 * from pte and reserve the entire vma range with single reserve_pfn_range call.
716 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
717 * by page to get physical address and protection.
719 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
723 resource_size_t paddr
;
725 unsigned long vma_start
= vma
->vm_start
;
726 unsigned long vma_end
= vma
->vm_end
;
727 unsigned long vma_size
= vma_end
- vma_start
;
733 if (is_linear_pfn_mapping(vma
)) {
735 * reserve the whole chunk covered by vma. We need the
736 * starting address and protection from pte.
738 if (follow_phys(vma
, vma_start
, 0, &prot
, &paddr
)) {
742 pgprot
= __pgprot(prot
);
743 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
746 /* reserve entire vma page by page, using pfn and prot from pte */
747 for (i
= 0; i
< vma_size
; i
+= PAGE_SIZE
) {
748 if (follow_phys(vma
, vma_start
+ i
, 0, &prot
, &paddr
))
751 pgprot
= __pgprot(prot
);
752 retval
= reserve_pfn_range(paddr
, PAGE_SIZE
, &pgprot
, 1);
759 /* Reserve error: Cleanup partial reservation and return error */
760 for (j
= 0; j
< i
; j
+= PAGE_SIZE
) {
761 if (follow_phys(vma
, vma_start
+ j
, 0, &prot
, &paddr
))
764 free_pfn_range(paddr
, PAGE_SIZE
);
771 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
772 * for physical range indicated by pfn and size.
774 * prot is passed in as a parameter for the new mapping. If the vma has a
775 * linear pfn mapping for the entire range reserve the entire vma range with
776 * single reserve_pfn_range call.
777 * Otherwise, we look t the pfn and size and reserve only the specified range
780 * Note that this function can be called with caller trying to map only a
781 * subrange/page inside the vma.
783 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
784 unsigned long pfn
, unsigned long size
)
788 resource_size_t base_paddr
;
789 resource_size_t paddr
;
790 unsigned long vma_start
= vma
->vm_start
;
791 unsigned long vma_end
= vma
->vm_end
;
792 unsigned long vma_size
= vma_end
- vma_start
;
797 if (is_linear_pfn_mapping(vma
)) {
798 /* reserve the whole chunk starting from vm_pgoff */
799 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
800 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
803 /* reserve page by page using pfn and size */
804 base_paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
805 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
806 paddr
= base_paddr
+ i
;
807 retval
= reserve_pfn_range(paddr
, PAGE_SIZE
, prot
, 0);
814 /* Reserve error: Cleanup partial reservation and return error */
815 for (j
= 0; j
< i
; j
+= PAGE_SIZE
) {
816 paddr
= base_paddr
+ j
;
817 free_pfn_range(paddr
, PAGE_SIZE
);
824 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
825 * untrack can be called for a specific region indicated by pfn and size or
826 * can be for the entire vma (in which case size can be zero).
828 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
832 resource_size_t paddr
;
834 unsigned long vma_start
= vma
->vm_start
;
835 unsigned long vma_end
= vma
->vm_end
;
836 unsigned long vma_size
= vma_end
- vma_start
;
841 if (is_linear_pfn_mapping(vma
)) {
842 /* free the whole chunk starting from vm_pgoff */
843 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
844 free_pfn_range(paddr
, vma_size
);
848 if (size
!= 0 && size
!= vma_size
) {
849 /* free page by page, using pfn and size */
850 paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
851 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
853 free_pfn_range(paddr
, PAGE_SIZE
);
856 /* free entire vma, page by page, using the pfn from pte */
857 for (i
= 0; i
< vma_size
; i
+= PAGE_SIZE
) {
858 if (follow_phys(vma
, vma_start
+ i
, 0, &prot
, &paddr
))
861 free_pfn_range(paddr
, PAGE_SIZE
);
866 pgprot_t
pgprot_writecombine(pgprot_t prot
)
869 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
871 return pgprot_noncached(prot
);
873 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
875 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
877 /* get Nth element of the linked list */
878 static struct memtype
*memtype_get_idx(loff_t pos
)
880 struct memtype
*list_node
, *print_entry
;
883 print_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
887 spin_lock(&memtype_lock
);
888 list_for_each_entry(list_node
, &memtype_list
, nd
) {
890 *print_entry
= *list_node
;
891 spin_unlock(&memtype_lock
);
896 spin_unlock(&memtype_lock
);
902 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
906 seq_printf(seq
, "PAT memtype list:\n");
909 return memtype_get_idx(*pos
);
912 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
915 return memtype_get_idx(*pos
);
918 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
922 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
924 struct memtype
*print_entry
= (struct memtype
*)v
;
926 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
927 print_entry
->start
, print_entry
->end
);
933 static struct seq_operations memtype_seq_ops
= {
934 .start
= memtype_seq_start
,
935 .next
= memtype_seq_next
,
936 .stop
= memtype_seq_stop
,
937 .show
= memtype_seq_show
,
940 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
942 return seq_open(file
, &memtype_seq_ops
);
945 static const struct file_operations memtype_fops
= {
946 .open
= memtype_seq_open
,
949 .release
= seq_release
,
952 static int __init
pat_memtype_list_init(void)
954 debugfs_create_file("pat_memtype_list", S_IRUSR
, arch_debugfs_dir
,
955 NULL
, &memtype_fops
);
959 late_initcall(pat_memtype_list_init
);
961 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */