2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/seq_file.h>
13 #include <linux/debugfs.h>
16 #include <asm/processor.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/proto.h>
25 * The current flushing context - we pass it instead of 5 arguments:
34 unsigned force_split
: 1;
38 static unsigned long direct_pages_count
[PG_LEVEL_NUM
];
40 void update_page_count(int level
, unsigned long pages
)
44 /* Protect against CPA */
45 spin_lock_irqsave(&pgd_lock
, flags
);
46 direct_pages_count
[level
] += pages
;
47 spin_unlock_irqrestore(&pgd_lock
, flags
);
50 static void split_page_count(int level
)
52 direct_pages_count
[level
]--;
53 direct_pages_count
[level
- 1] += PTRS_PER_PTE
;
56 int arch_report_meminfo(char *page
)
58 int n
= sprintf(page
, "DirectMap4k: %8lu\n"
59 "DirectMap2M: %8lu\n",
60 direct_pages_count
[PG_LEVEL_4K
],
61 direct_pages_count
[PG_LEVEL_2M
]);
63 n
+= sprintf(page
+ n
, "DirectMap1G: %8lu\n",
64 direct_pages_count
[PG_LEVEL_1G
]);
69 static inline void split_page_count(int level
) { }
74 static inline unsigned long highmap_start_pfn(void)
76 return __pa(_text
) >> PAGE_SHIFT
;
79 static inline unsigned long highmap_end_pfn(void)
81 return __pa(round_up((unsigned long)_end
, PMD_SIZE
)) >> PAGE_SHIFT
;
86 #ifdef CONFIG_DEBUG_PAGEALLOC
87 # define debug_pagealloc 1
89 # define debug_pagealloc 0
93 within(unsigned long addr
, unsigned long start
, unsigned long end
)
95 return addr
>= start
&& addr
< end
;
103 * clflush_cache_range - flush a cache range with clflush
104 * @addr: virtual start address
105 * @size: number of bytes to flush
107 * clflush is an unordered instruction which needs fencing with mfence
108 * to avoid ordering issues.
110 void clflush_cache_range(void *vaddr
, unsigned int size
)
112 void *vend
= vaddr
+ size
- 1;
116 for (; vaddr
< vend
; vaddr
+= boot_cpu_data
.x86_clflush_size
)
119 * Flush any possible final partial cacheline:
126 static void __cpa_flush_all(void *arg
)
128 unsigned long cache
= (unsigned long)arg
;
131 * Flush all to work around Errata in early athlons regarding
132 * large page flushing.
136 if (cache
&& boot_cpu_data
.x86_model
>= 4)
140 static void cpa_flush_all(unsigned long cache
)
142 BUG_ON(irqs_disabled());
144 on_each_cpu(__cpa_flush_all
, (void *) cache
, 1, 1);
147 static void __cpa_flush_range(void *arg
)
150 * We could optimize that further and do individual per page
151 * tlb invalidates for a low number of pages. Caveat: we must
152 * flush the high aliases on 64bit as well.
157 static void cpa_flush_range(unsigned long start
, int numpages
, int cache
)
159 unsigned int i
, level
;
162 BUG_ON(irqs_disabled());
163 WARN_ON(PAGE_ALIGN(start
) != start
);
165 on_each_cpu(__cpa_flush_range
, NULL
, 1, 1);
171 * We only need to flush on one CPU,
172 * clflush is a MESI-coherent instruction that
173 * will cause all other CPUs to flush the same
176 for (i
= 0, addr
= start
; i
< numpages
; i
++, addr
+= PAGE_SIZE
) {
177 pte_t
*pte
= lookup_address(addr
, &level
);
180 * Only flush present addresses:
182 if (pte
&& (pte_val(*pte
) & _PAGE_PRESENT
))
183 clflush_cache_range((void *) addr
, PAGE_SIZE
);
188 * Certain areas of memory on x86 require very specific protection flags,
189 * for example the BIOS area or kernel text. Callers don't always get this
190 * right (again, ioremap() on BIOS memory is not uncommon) so this function
191 * checks and fixes these known static required protection bits.
193 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
,
196 pgprot_t forbidden
= __pgprot(0);
199 * The BIOS area between 640k and 1Mb needs to be executable for
200 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
202 if (within(pfn
, BIOS_BEGIN
>> PAGE_SHIFT
, BIOS_END
>> PAGE_SHIFT
))
203 pgprot_val(forbidden
) |= _PAGE_NX
;
206 * The kernel text needs to be executable for obvious reasons
207 * Does not cover __inittext since that is gone later on. On
208 * 64bit we do not enforce !NX on the low mapping
210 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
211 pgprot_val(forbidden
) |= _PAGE_NX
;
214 * The .rodata section needs to be read-only. Using the pfn
215 * catches all aliases.
217 if (within(pfn
, __pa((unsigned long)__start_rodata
) >> PAGE_SHIFT
,
218 __pa((unsigned long)__end_rodata
) >> PAGE_SHIFT
))
219 pgprot_val(forbidden
) |= _PAGE_RW
;
221 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
227 * Lookup the page table entry for a virtual address. Return a pointer
228 * to the entry and the level of the mapping.
230 * Note: We return pud and pmd either when the entry is marked large
231 * or when the present bit is not set. Otherwise we would return a
232 * pointer to a nonexisting mapping.
234 pte_t
*lookup_address(unsigned long address
, unsigned int *level
)
236 pgd_t
*pgd
= pgd_offset_k(address
);
240 *level
= PG_LEVEL_NONE
;
245 pud
= pud_offset(pgd
, address
);
249 *level
= PG_LEVEL_1G
;
250 if (pud_large(*pud
) || !pud_present(*pud
))
253 pmd
= pmd_offset(pud
, address
);
257 *level
= PG_LEVEL_2M
;
258 if (pmd_large(*pmd
) || !pmd_present(*pmd
))
261 *level
= PG_LEVEL_4K
;
263 return pte_offset_kernel(pmd
, address
);
267 * Set the new pmd in all the pgds we know about:
269 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
272 set_pte_atomic(kpte
, pte
);
274 if (!SHARED_KERNEL_PMD
) {
277 list_for_each_entry(page
, &pgd_list
, lru
) {
282 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
283 pud
= pud_offset(pgd
, address
);
284 pmd
= pmd_offset(pud
, address
);
285 set_pte_atomic((pte_t
*)pmd
, pte
);
292 try_preserve_large_page(pte_t
*kpte
, unsigned long address
,
293 struct cpa_data
*cpa
)
295 unsigned long nextpage_addr
, numpages
, pmask
, psize
, flags
, addr
, pfn
;
296 pte_t new_pte
, old_pte
, *tmp
;
297 pgprot_t old_prot
, new_prot
;
301 if (cpa
->force_split
)
304 spin_lock_irqsave(&pgd_lock
, flags
);
306 * Check for races, another CPU might have split this page
309 tmp
= lookup_address(address
, &level
);
315 psize
= PMD_PAGE_SIZE
;
316 pmask
= PMD_PAGE_MASK
;
320 psize
= PUD_PAGE_SIZE
;
321 pmask
= PUD_PAGE_MASK
;
330 * Calculate the number of pages, which fit into this large
331 * page starting at address:
333 nextpage_addr
= (address
+ psize
) & pmask
;
334 numpages
= (nextpage_addr
- address
) >> PAGE_SHIFT
;
335 if (numpages
< cpa
->numpages
)
336 cpa
->numpages
= numpages
;
339 * We are safe now. Check whether the new pgprot is the same:
342 old_prot
= new_prot
= pte_pgprot(old_pte
);
344 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
345 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
348 * old_pte points to the large page base address. So we need
349 * to add the offset of the virtual address:
351 pfn
= pte_pfn(old_pte
) + ((address
& (psize
- 1)) >> PAGE_SHIFT
);
354 new_prot
= static_protections(new_prot
, address
, pfn
);
357 * We need to check the full range, whether
358 * static_protection() requires a different pgprot for one of
359 * the pages in the range we try to preserve:
361 addr
= address
+ PAGE_SIZE
;
363 for (i
= 1; i
< cpa
->numpages
; i
++, addr
+= PAGE_SIZE
, pfn
++) {
364 pgprot_t chk_prot
= static_protections(new_prot
, addr
, pfn
);
366 if (pgprot_val(chk_prot
) != pgprot_val(new_prot
))
371 * If there are no changes, return. maxpages has been updated
374 if (pgprot_val(new_prot
) == pgprot_val(old_prot
)) {
380 * We need to change the attributes. Check, whether we can
381 * change the large page in one go. We request a split, when
382 * the address is not aligned and the number of pages is
383 * smaller than the number of pages in the large page. Note
384 * that we limited the number of possible pages already to
385 * the number of pages in the large page.
387 if (address
== (nextpage_addr
- psize
) && cpa
->numpages
== numpages
) {
389 * The address is aligned and the number of pages
390 * covers the full page.
392 new_pte
= pfn_pte(pte_pfn(old_pte
), canon_pgprot(new_prot
));
393 __set_pmd_pte(kpte
, address
, new_pte
);
399 spin_unlock_irqrestore(&pgd_lock
, flags
);
404 static LIST_HEAD(page_pool
);
405 static unsigned long pool_size
, pool_pages
, pool_low
;
406 static unsigned long pool_used
, pool_failed
;
408 static void cpa_fill_pool(struct page
**ret
)
410 gfp_t gfp
= GFP_KERNEL
;
415 * Avoid recursion (on debug-pagealloc) and also signal
416 * our priority to get to these pagetables:
418 if (current
->flags
& PF_MEMALLOC
)
420 current
->flags
|= PF_MEMALLOC
;
423 * Allocate atomically from atomic contexts:
425 if (in_atomic() || irqs_disabled() || debug_pagealloc
)
426 gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
428 while (pool_pages
< pool_size
|| (ret
&& !*ret
)) {
429 p
= alloc_pages(gfp
, 0);
435 * If the call site needs a page right now, provide it:
441 spin_lock_irqsave(&pgd_lock
, flags
);
442 list_add(&p
->lru
, &page_pool
);
444 spin_unlock_irqrestore(&pgd_lock
, flags
);
447 current
->flags
&= ~PF_MEMALLOC
;
450 #define SHIFT_MB (20 - PAGE_SHIFT)
451 #define ROUND_MB_GB ((1 << 10) - 1)
452 #define SHIFT_MB_GB 10
453 #define POOL_PAGES_PER_GB 16
455 void __init
cpa_init(void)
462 * Calculate the number of pool pages:
464 * Convert totalram (nr of pages) to MiB and round to the next
465 * GiB. Shift MiB to Gib and multiply the result by
468 if (debug_pagealloc
) {
469 gb
= ((si
.totalram
>> SHIFT_MB
) + ROUND_MB_GB
) >> SHIFT_MB_GB
;
470 pool_size
= POOL_PAGES_PER_GB
* gb
;
474 pool_low
= pool_size
;
478 "CPA: page pool initialized %lu of %lu pages preallocated\n",
479 pool_pages
, pool_size
);
482 static int split_large_page(pte_t
*kpte
, unsigned long address
)
484 unsigned long flags
, pfn
, pfninc
= 1;
485 unsigned int i
, level
;
491 * Get a page from the pool. The pool list is protected by the
492 * pgd_lock, which we have to take anyway for the split
495 spin_lock_irqsave(&pgd_lock
, flags
);
496 if (list_empty(&page_pool
)) {
497 spin_unlock_irqrestore(&pgd_lock
, flags
);
499 cpa_fill_pool(&base
);
502 spin_lock_irqsave(&pgd_lock
, flags
);
504 base
= list_first_entry(&page_pool
, struct page
, lru
);
505 list_del(&base
->lru
);
508 if (pool_pages
< pool_low
)
509 pool_low
= pool_pages
;
513 * Check for races, another CPU might have split this page
516 tmp
= lookup_address(address
, &level
);
520 pbase
= (pte_t
*)page_address(base
);
521 paravirt_alloc_pte(&init_mm
, page_to_pfn(base
));
522 ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
525 if (level
== PG_LEVEL_1G
) {
526 pfninc
= PMD_PAGE_SIZE
>> PAGE_SHIFT
;
527 pgprot_val(ref_prot
) |= _PAGE_PSE
;
532 * Get the target pfn from the original entry:
534 pfn
= pte_pfn(*kpte
);
535 for (i
= 0; i
< PTRS_PER_PTE
; i
++, pfn
+= pfninc
)
536 set_pte(&pbase
[i
], pfn_pte(pfn
, ref_prot
));
538 if (address
>= (unsigned long)__va(0) &&
539 address
< (unsigned long)__va(max_low_pfn_mapped
<< PAGE_SHIFT
))
540 split_page_count(level
);
543 if (address
>= (unsigned long)__va(1UL<<32) &&
544 address
< (unsigned long)__va(max_pfn_mapped
<< PAGE_SHIFT
))
545 split_page_count(level
);
549 * Install the new, split up pagetable. Important details here:
551 * On Intel the NX bit of all levels must be cleared to make a
552 * page executable. See section 4.13.2 of Intel 64 and IA-32
553 * Architectures Software Developer's Manual).
555 * Mark the entry present. The current mapping might be
556 * set to not present, which we preserved above.
558 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
559 pgprot_val(ref_prot
) |= _PAGE_PRESENT
;
560 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
565 * If we dropped out via the lookup_address check under
566 * pgd_lock then stick the page back into the pool:
569 list_add(&base
->lru
, &page_pool
);
573 spin_unlock_irqrestore(&pgd_lock
, flags
);
578 static int __change_page_attr(struct cpa_data
*cpa
, int primary
)
580 unsigned long address
= cpa
->vaddr
;
583 pte_t
*kpte
, old_pte
;
586 kpte
= lookup_address(address
, &level
);
591 if (!pte_val(old_pte
)) {
594 printk(KERN_WARNING
"CPA: called for zero pte. "
595 "vaddr = %lx cpa->vaddr = %lx\n", address
,
601 if (level
== PG_LEVEL_4K
) {
603 pgprot_t new_prot
= pte_pgprot(old_pte
);
604 unsigned long pfn
= pte_pfn(old_pte
);
606 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
607 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
609 new_prot
= static_protections(new_prot
, address
, pfn
);
612 * We need to keep the pfn from the existing PTE,
613 * after all we're only going to change it's attributes
614 * not the memory it points to
616 new_pte
= pfn_pte(pfn
, canon_pgprot(new_prot
));
619 * Do we really change anything ?
621 if (pte_val(old_pte
) != pte_val(new_pte
)) {
622 set_pte_atomic(kpte
, new_pte
);
630 * Check, whether we can keep the large page intact
631 * and just change the pte:
633 do_split
= try_preserve_large_page(kpte
, address
, cpa
);
635 * When the range fits into the existing large page,
636 * return. cp->numpages and cpa->tlbflush have been updated in
643 * We have to split the large page:
645 err
= split_large_page(kpte
, address
);
654 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
);
656 static int cpa_process_alias(struct cpa_data
*cpa
)
658 struct cpa_data alias_cpa
;
661 if (cpa
->pfn
> max_pfn_mapped
)
665 if (cpa
->pfn
> max_low_pfn_mapped
&& cpa
->pfn
< (1UL<<(32-PAGE_SHIFT
)))
669 * No need to redo, when the primary call touched the direct
672 if (!(within(cpa
->vaddr
, PAGE_OFFSET
,
673 PAGE_OFFSET
+ (max_low_pfn_mapped
<< PAGE_SHIFT
))
675 || within(cpa
->vaddr
, PAGE_OFFSET
+ (1UL<<32),
676 PAGE_OFFSET
+ (max_pfn_mapped
<< PAGE_SHIFT
))
681 alias_cpa
.vaddr
= (unsigned long) __va(cpa
->pfn
<< PAGE_SHIFT
);
683 ret
= __change_page_attr_set_clr(&alias_cpa
, 0);
690 * No need to redo, when the primary call touched the high
693 if (within(cpa
->vaddr
, (unsigned long) _text
, (unsigned long) _end
))
697 * If the physical address is inside the kernel map, we need
698 * to touch the high mapped kernel as well:
700 if (!within(cpa
->pfn
, highmap_start_pfn(), highmap_end_pfn()))
705 (cpa
->pfn
<< PAGE_SHIFT
) + __START_KERNEL_map
- phys_base
;
708 * The high mapping range is imprecise, so ignore the return value.
710 __change_page_attr_set_clr(&alias_cpa
, 0);
715 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
)
717 int ret
, numpages
= cpa
->numpages
;
721 * Store the remaining nr of pages for the large page
722 * preservation check.
724 cpa
->numpages
= numpages
;
726 ret
= __change_page_attr(cpa
, checkalias
);
731 ret
= cpa_process_alias(cpa
);
737 * Adjust the number of pages with the result of the
738 * CPA operation. Either a large page has been
739 * preserved or a single page update happened.
741 BUG_ON(cpa
->numpages
> numpages
);
742 numpages
-= cpa
->numpages
;
743 cpa
->vaddr
+= cpa
->numpages
* PAGE_SIZE
;
748 static inline int cache_attr(pgprot_t attr
)
750 return pgprot_val(attr
) &
751 (_PAGE_PAT
| _PAGE_PAT_LARGE
| _PAGE_PWT
| _PAGE_PCD
);
754 static int change_page_attr_set_clr(unsigned long addr
, int numpages
,
755 pgprot_t mask_set
, pgprot_t mask_clr
,
759 int ret
, cache
, checkalias
;
762 * Check, if we are requested to change a not supported
765 mask_set
= canon_pgprot(mask_set
);
766 mask_clr
= canon_pgprot(mask_clr
);
767 if (!pgprot_val(mask_set
) && !pgprot_val(mask_clr
) && !force_split
)
770 /* Ensure we are PAGE_SIZE aligned */
771 if (addr
& ~PAGE_MASK
) {
774 * People should not be passing in unaligned addresses:
780 cpa
.numpages
= numpages
;
781 cpa
.mask_set
= mask_set
;
782 cpa
.mask_clr
= mask_clr
;
784 cpa
.force_split
= force_split
;
786 /* No alias checking for _NX bit modifications */
787 checkalias
= (pgprot_val(mask_set
) | pgprot_val(mask_clr
)) != _PAGE_NX
;
789 ret
= __change_page_attr_set_clr(&cpa
, checkalias
);
792 * Check whether we really changed something:
798 * No need to flush, when we did not set any of the caching
801 cache
= cache_attr(mask_set
);
804 * On success we use clflush, when the CPU supports it to
805 * avoid the wbindv. If the CPU does not support it and in the
806 * error case we fall back to cpa_flush_all (which uses
809 if (!ret
&& cpu_has_clflush
)
810 cpa_flush_range(addr
, numpages
, cache
);
812 cpa_flush_all(cache
);
820 static inline int change_page_attr_set(unsigned long addr
, int numpages
,
823 return change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0), 0);
826 static inline int change_page_attr_clear(unsigned long addr
, int numpages
,
829 return change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
, 0);
832 int _set_memory_uc(unsigned long addr
, int numpages
)
835 * for now UC MINUS. see comments in ioremap_nocache()
837 return change_page_attr_set(addr
, numpages
,
838 __pgprot(_PAGE_CACHE_UC_MINUS
));
841 int set_memory_uc(unsigned long addr
, int numpages
)
844 * for now UC MINUS. see comments in ioremap_nocache()
846 if (reserve_memtype(addr
, addr
+ numpages
* PAGE_SIZE
,
847 _PAGE_CACHE_UC_MINUS
, NULL
))
850 return _set_memory_uc(addr
, numpages
);
852 EXPORT_SYMBOL(set_memory_uc
);
854 int _set_memory_wc(unsigned long addr
, int numpages
)
856 return change_page_attr_set(addr
, numpages
,
857 __pgprot(_PAGE_CACHE_WC
));
860 int set_memory_wc(unsigned long addr
, int numpages
)
863 return set_memory_uc(addr
, numpages
);
865 if (reserve_memtype(addr
, addr
+ numpages
* PAGE_SIZE
,
866 _PAGE_CACHE_WC
, NULL
))
869 return _set_memory_wc(addr
, numpages
);
871 EXPORT_SYMBOL(set_memory_wc
);
873 int _set_memory_wb(unsigned long addr
, int numpages
)
875 return change_page_attr_clear(addr
, numpages
,
876 __pgprot(_PAGE_CACHE_MASK
));
879 int set_memory_wb(unsigned long addr
, int numpages
)
881 free_memtype(addr
, addr
+ numpages
* PAGE_SIZE
);
883 return _set_memory_wb(addr
, numpages
);
885 EXPORT_SYMBOL(set_memory_wb
);
887 int set_memory_x(unsigned long addr
, int numpages
)
889 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_NX
));
891 EXPORT_SYMBOL(set_memory_x
);
893 int set_memory_nx(unsigned long addr
, int numpages
)
895 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_NX
));
897 EXPORT_SYMBOL(set_memory_nx
);
899 int set_memory_ro(unsigned long addr
, int numpages
)
901 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_RW
));
904 int set_memory_rw(unsigned long addr
, int numpages
)
906 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_RW
));
909 int set_memory_np(unsigned long addr
, int numpages
)
911 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
914 int set_memory_4k(unsigned long addr
, int numpages
)
916 return change_page_attr_set_clr(addr
, numpages
, __pgprot(0),
920 int set_pages_uc(struct page
*page
, int numpages
)
922 unsigned long addr
= (unsigned long)page_address(page
);
924 return set_memory_uc(addr
, numpages
);
926 EXPORT_SYMBOL(set_pages_uc
);
928 int set_pages_wb(struct page
*page
, int numpages
)
930 unsigned long addr
= (unsigned long)page_address(page
);
932 return set_memory_wb(addr
, numpages
);
934 EXPORT_SYMBOL(set_pages_wb
);
936 int set_pages_x(struct page
*page
, int numpages
)
938 unsigned long addr
= (unsigned long)page_address(page
);
940 return set_memory_x(addr
, numpages
);
942 EXPORT_SYMBOL(set_pages_x
);
944 int set_pages_nx(struct page
*page
, int numpages
)
946 unsigned long addr
= (unsigned long)page_address(page
);
948 return set_memory_nx(addr
, numpages
);
950 EXPORT_SYMBOL(set_pages_nx
);
952 int set_pages_ro(struct page
*page
, int numpages
)
954 unsigned long addr
= (unsigned long)page_address(page
);
956 return set_memory_ro(addr
, numpages
);
959 int set_pages_rw(struct page
*page
, int numpages
)
961 unsigned long addr
= (unsigned long)page_address(page
);
963 return set_memory_rw(addr
, numpages
);
966 #ifdef CONFIG_DEBUG_PAGEALLOC
968 static int __set_pages_p(struct page
*page
, int numpages
)
970 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
971 .numpages
= numpages
,
972 .mask_set
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
),
973 .mask_clr
= __pgprot(0)};
975 return __change_page_attr_set_clr(&cpa
, 1);
978 static int __set_pages_np(struct page
*page
, int numpages
)
980 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
981 .numpages
= numpages
,
982 .mask_set
= __pgprot(0),
983 .mask_clr
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
)};
985 return __change_page_attr_set_clr(&cpa
, 1);
988 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
990 if (PageHighMem(page
))
993 debug_check_no_locks_freed(page_address(page
),
994 numpages
* PAGE_SIZE
);
998 * If page allocator is not up yet then do not call c_p_a():
1000 if (!debug_pagealloc_enabled
)
1004 * The return value is ignored as the calls cannot fail.
1005 * Large pages are kept enabled at boot time, and are
1006 * split up quickly with DEBUG_PAGEALLOC. If a splitup
1007 * fails here (due to temporary memory shortage) no damage
1008 * is done because we just keep the largepage intact up
1009 * to the next attempt when it will likely be split up:
1012 __set_pages_p(page
, numpages
);
1014 __set_pages_np(page
, numpages
);
1017 * We should perform an IPI and flush all tlbs,
1018 * but that can deadlock->flush only current cpu:
1023 * Try to refill the page pool here. We can do this only after
1026 cpa_fill_pool(NULL
);
1029 #ifdef CONFIG_DEBUG_FS
1030 static int dpa_show(struct seq_file
*m
, void *v
)
1032 seq_puts(m
, "DEBUG_PAGEALLOC\n");
1033 seq_printf(m
, "pool_size : %lu\n", pool_size
);
1034 seq_printf(m
, "pool_pages : %lu\n", pool_pages
);
1035 seq_printf(m
, "pool_low : %lu\n", pool_low
);
1036 seq_printf(m
, "pool_used : %lu\n", pool_used
);
1037 seq_printf(m
, "pool_failed : %lu\n", pool_failed
);
1042 static int dpa_open(struct inode
*inode
, struct file
*filp
)
1044 return single_open(filp
, dpa_show
, NULL
);
1047 static const struct file_operations dpa_fops
= {
1050 .llseek
= seq_lseek
,
1051 .release
= single_release
,
1054 static int __init
debug_pagealloc_proc_init(void)
1058 de
= debugfs_create_file("debug_pagealloc", 0600, NULL
, NULL
,
1065 __initcall(debug_pagealloc_proc_init
);
1068 #ifdef CONFIG_HIBERNATION
1070 bool kernel_page_present(struct page
*page
)
1075 if (PageHighMem(page
))
1078 pte
= lookup_address((unsigned long)page_address(page
), &level
);
1079 return (pte_val(*pte
) & _PAGE_PRESENT
);
1082 #endif /* CONFIG_HIBERNATION */
1084 #endif /* CONFIG_DEBUG_PAGEALLOC */
1087 * The testcases use internal knowledge of the implementation that shouldn't
1088 * be exposed to the rest of the kernel. Include these directly here.
1090 #ifdef CONFIG_CPA_DEBUG
1091 #include "pageattr-test.c"