2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
11 #include <linux/interrupt.h>
14 #include <asm/processor.h>
15 #include <asm/tlbflush.h>
16 #include <asm/sections.h>
17 #include <asm/uaccess.h>
18 #include <asm/pgalloc.h>
19 #include <asm/proto.h>
22 * The current flushing context - we pass it instead of 5 arguments:
35 static inline unsigned long highmap_start_pfn(void)
37 return __pa(_text
) >> PAGE_SHIFT
;
40 static inline unsigned long highmap_end_pfn(void)
42 return __pa(round_up((unsigned long)_end
, PMD_SIZE
)) >> PAGE_SHIFT
;
47 #ifdef CONFIG_DEBUG_PAGEALLOC
48 # define debug_pagealloc 1
50 # define debug_pagealloc 0
54 within(unsigned long addr
, unsigned long start
, unsigned long end
)
56 return addr
>= start
&& addr
< end
;
64 * clflush_cache_range - flush a cache range with clflush
65 * @addr: virtual start address
66 * @size: number of bytes to flush
68 * clflush is an unordered instruction which needs fencing with mfence
69 * to avoid ordering issues.
71 void clflush_cache_range(void *vaddr
, unsigned int size
)
73 void *vend
= vaddr
+ size
- 1;
77 for (; vaddr
< vend
; vaddr
+= boot_cpu_data
.x86_clflush_size
)
80 * Flush any possible final partial cacheline:
87 static void __cpa_flush_all(void *arg
)
89 unsigned long cache
= (unsigned long)arg
;
92 * Flush all to work around Errata in early athlons regarding
93 * large page flushing.
97 if (cache
&& boot_cpu_data
.x86_model
>= 4)
101 static void cpa_flush_all(unsigned long cache
)
103 BUG_ON(irqs_disabled());
105 on_each_cpu(__cpa_flush_all
, (void *) cache
, 1, 1);
108 static void __cpa_flush_range(void *arg
)
111 * We could optimize that further and do individual per page
112 * tlb invalidates for a low number of pages. Caveat: we must
113 * flush the high aliases on 64bit as well.
118 static void cpa_flush_range(unsigned long start
, int numpages
, int cache
)
120 unsigned int i
, level
;
123 BUG_ON(irqs_disabled());
124 WARN_ON(PAGE_ALIGN(start
) != start
);
126 on_each_cpu(__cpa_flush_range
, NULL
, 1, 1);
132 * We only need to flush on one CPU,
133 * clflush is a MESI-coherent instruction that
134 * will cause all other CPUs to flush the same
137 for (i
= 0, addr
= start
; i
< numpages
; i
++, addr
+= PAGE_SIZE
) {
138 pte_t
*pte
= lookup_address(addr
, &level
);
141 * Only flush present addresses:
143 if (pte
&& (pte_val(*pte
) & _PAGE_PRESENT
))
144 clflush_cache_range((void *) addr
, PAGE_SIZE
);
149 * Certain areas of memory on x86 require very specific protection flags,
150 * for example the BIOS area or kernel text. Callers don't always get this
151 * right (again, ioremap() on BIOS memory is not uncommon) so this function
152 * checks and fixes these known static required protection bits.
154 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
,
157 pgprot_t forbidden
= __pgprot(0);
160 * The BIOS area between 640k and 1Mb needs to be executable for
161 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
163 if (within(pfn
, BIOS_BEGIN
>> PAGE_SHIFT
, BIOS_END
>> PAGE_SHIFT
))
164 pgprot_val(forbidden
) |= _PAGE_NX
;
167 * The kernel text needs to be executable for obvious reasons
168 * Does not cover __inittext since that is gone later on. On
169 * 64bit we do not enforce !NX on the low mapping
171 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
172 pgprot_val(forbidden
) |= _PAGE_NX
;
175 * The .rodata section needs to be read-only. Using the pfn
176 * catches all aliases.
178 if (within(pfn
, __pa((unsigned long)__start_rodata
) >> PAGE_SHIFT
,
179 __pa((unsigned long)__end_rodata
) >> PAGE_SHIFT
))
180 pgprot_val(forbidden
) |= _PAGE_RW
;
182 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
188 * Lookup the page table entry for a virtual address. Return a pointer
189 * to the entry and the level of the mapping.
191 * Note: We return pud and pmd either when the entry is marked large
192 * or when the present bit is not set. Otherwise we would return a
193 * pointer to a nonexisting mapping.
195 pte_t
*lookup_address(unsigned long address
, unsigned int *level
)
197 pgd_t
*pgd
= pgd_offset_k(address
);
201 *level
= PG_LEVEL_NONE
;
206 pud
= pud_offset(pgd
, address
);
210 *level
= PG_LEVEL_1G
;
211 if (pud_large(*pud
) || !pud_present(*pud
))
214 pmd
= pmd_offset(pud
, address
);
218 *level
= PG_LEVEL_2M
;
219 if (pmd_large(*pmd
) || !pmd_present(*pmd
))
222 *level
= PG_LEVEL_4K
;
224 return pte_offset_kernel(pmd
, address
);
228 * Set the new pmd in all the pgds we know about:
230 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
233 set_pte_atomic(kpte
, pte
);
235 if (!SHARED_KERNEL_PMD
) {
238 list_for_each_entry(page
, &pgd_list
, lru
) {
243 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
244 pud
= pud_offset(pgd
, address
);
245 pmd
= pmd_offset(pud
, address
);
246 set_pte_atomic((pte_t
*)pmd
, pte
);
253 try_preserve_large_page(pte_t
*kpte
, unsigned long address
,
254 struct cpa_data
*cpa
)
256 unsigned long nextpage_addr
, numpages
, pmask
, psize
, flags
, addr
, pfn
;
257 pte_t new_pte
, old_pte
, *tmp
;
258 pgprot_t old_prot
, new_prot
;
262 spin_lock_irqsave(&pgd_lock
, flags
);
264 * Check for races, another CPU might have split this page
267 tmp
= lookup_address(address
, &level
);
273 psize
= PMD_PAGE_SIZE
;
274 pmask
= PMD_PAGE_MASK
;
278 psize
= PUD_PAGE_SIZE
;
279 pmask
= PUD_PAGE_MASK
;
288 * Calculate the number of pages, which fit into this large
289 * page starting at address:
291 nextpage_addr
= (address
+ psize
) & pmask
;
292 numpages
= (nextpage_addr
- address
) >> PAGE_SHIFT
;
293 if (numpages
< cpa
->numpages
)
294 cpa
->numpages
= numpages
;
297 * We are safe now. Check whether the new pgprot is the same:
300 old_prot
= new_prot
= pte_pgprot(old_pte
);
302 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
303 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
306 * old_pte points to the large page base address. So we need
307 * to add the offset of the virtual address:
309 pfn
= pte_pfn(old_pte
) + ((address
& (psize
- 1)) >> PAGE_SHIFT
);
312 new_prot
= static_protections(new_prot
, address
, pfn
);
315 * We need to check the full range, whether
316 * static_protection() requires a different pgprot for one of
317 * the pages in the range we try to preserve:
319 addr
= address
+ PAGE_SIZE
;
321 for (i
= 1; i
< cpa
->numpages
; i
++, addr
+= PAGE_SIZE
, pfn
++) {
322 pgprot_t chk_prot
= static_protections(new_prot
, addr
, pfn
);
324 if (pgprot_val(chk_prot
) != pgprot_val(new_prot
))
329 * If there are no changes, return. maxpages has been updated
332 if (pgprot_val(new_prot
) == pgprot_val(old_prot
)) {
338 * We need to change the attributes. Check, whether we can
339 * change the large page in one go. We request a split, when
340 * the address is not aligned and the number of pages is
341 * smaller than the number of pages in the large page. Note
342 * that we limited the number of possible pages already to
343 * the number of pages in the large page.
345 if (address
== (nextpage_addr
- psize
) && cpa
->numpages
== numpages
) {
347 * The address is aligned and the number of pages
348 * covers the full page.
350 new_pte
= pfn_pte(pte_pfn(old_pte
), canon_pgprot(new_prot
));
351 __set_pmd_pte(kpte
, address
, new_pte
);
357 spin_unlock_irqrestore(&pgd_lock
, flags
);
362 static LIST_HEAD(page_pool
);
363 static unsigned long pool_size
, pool_pages
, pool_low
;
364 static unsigned long pool_used
, pool_failed
;
366 static void cpa_fill_pool(struct page
**ret
)
368 gfp_t gfp
= GFP_KERNEL
;
373 * Avoid recursion (on debug-pagealloc) and also signal
374 * our priority to get to these pagetables:
376 if (current
->flags
& PF_MEMALLOC
)
378 current
->flags
|= PF_MEMALLOC
;
381 * Allocate atomically from atomic contexts:
383 if (in_atomic() || irqs_disabled() || debug_pagealloc
)
384 gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
386 while (pool_pages
< pool_size
|| (ret
&& !*ret
)) {
387 p
= alloc_pages(gfp
, 0);
393 * If the call site needs a page right now, provide it:
399 spin_lock_irqsave(&pgd_lock
, flags
);
400 list_add(&p
->lru
, &page_pool
);
402 spin_unlock_irqrestore(&pgd_lock
, flags
);
405 current
->flags
&= ~PF_MEMALLOC
;
408 #define SHIFT_MB (20 - PAGE_SHIFT)
409 #define ROUND_MB_GB ((1 << 10) - 1)
410 #define SHIFT_MB_GB 10
411 #define POOL_PAGES_PER_GB 16
413 void __init
cpa_init(void)
420 * Calculate the number of pool pages:
422 * Convert totalram (nr of pages) to MiB and round to the next
423 * GiB. Shift MiB to Gib and multiply the result by
426 if (debug_pagealloc
) {
427 gb
= ((si
.totalram
>> SHIFT_MB
) + ROUND_MB_GB
) >> SHIFT_MB_GB
;
428 pool_size
= POOL_PAGES_PER_GB
* gb
;
432 pool_low
= pool_size
;
436 "CPA: page pool initialized %lu of %lu pages preallocated\n",
437 pool_pages
, pool_size
);
440 static int split_large_page(pte_t
*kpte
, unsigned long address
)
442 unsigned long flags
, pfn
, pfninc
= 1;
443 unsigned int i
, level
;
449 * Get a page from the pool. The pool list is protected by the
450 * pgd_lock, which we have to take anyway for the split
453 spin_lock_irqsave(&pgd_lock
, flags
);
454 if (list_empty(&page_pool
)) {
455 spin_unlock_irqrestore(&pgd_lock
, flags
);
457 cpa_fill_pool(&base
);
460 spin_lock_irqsave(&pgd_lock
, flags
);
462 base
= list_first_entry(&page_pool
, struct page
, lru
);
463 list_del(&base
->lru
);
466 if (pool_pages
< pool_low
)
467 pool_low
= pool_pages
;
471 * Check for races, another CPU might have split this page
474 tmp
= lookup_address(address
, &level
);
478 pbase
= (pte_t
*)page_address(base
);
480 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
482 ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
485 if (level
== PG_LEVEL_1G
) {
486 pfninc
= PMD_PAGE_SIZE
>> PAGE_SHIFT
;
487 pgprot_val(ref_prot
) |= _PAGE_PSE
;
492 * Get the target pfn from the original entry:
494 pfn
= pte_pfn(*kpte
);
495 for (i
= 0; i
< PTRS_PER_PTE
; i
++, pfn
+= pfninc
)
496 set_pte(&pbase
[i
], pfn_pte(pfn
, ref_prot
));
499 * Install the new, split up pagetable. Important details here:
501 * On Intel the NX bit of all levels must be cleared to make a
502 * page executable. See section 4.13.2 of Intel 64 and IA-32
503 * Architectures Software Developer's Manual).
505 * Mark the entry present. The current mapping might be
506 * set to not present, which we preserved above.
508 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
509 pgprot_val(ref_prot
) |= _PAGE_PRESENT
;
510 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
515 * If we dropped out via the lookup_address check under
516 * pgd_lock then stick the page back into the pool:
519 list_add(&base
->lru
, &page_pool
);
523 spin_unlock_irqrestore(&pgd_lock
, flags
);
528 static int __change_page_attr(struct cpa_data
*cpa
, int primary
)
530 unsigned long address
= cpa
->vaddr
;
533 pte_t
*kpte
, old_pte
;
536 kpte
= lookup_address(address
, &level
);
538 return primary
? -EINVAL
: 0;
541 if (!pte_val(old_pte
)) {
544 printk(KERN_WARNING
"CPA: called for zero pte. "
545 "vaddr = %lx cpa->vaddr = %lx\n", address
,
551 if (level
== PG_LEVEL_4K
) {
553 pgprot_t new_prot
= pte_pgprot(old_pte
);
554 unsigned long pfn
= pte_pfn(old_pte
);
556 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
557 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
559 new_prot
= static_protections(new_prot
, address
, pfn
);
562 * We need to keep the pfn from the existing PTE,
563 * after all we're only going to change it's attributes
564 * not the memory it points to
566 new_pte
= pfn_pte(pfn
, canon_pgprot(new_prot
));
569 * Do we really change anything ?
571 if (pte_val(old_pte
) != pte_val(new_pte
)) {
572 set_pte_atomic(kpte
, new_pte
);
580 * Check, whether we can keep the large page intact
581 * and just change the pte:
583 do_split
= try_preserve_large_page(kpte
, address
, cpa
);
585 * When the range fits into the existing large page,
586 * return. cp->numpages and cpa->tlbflush have been updated in
593 * We have to split the large page:
595 err
= split_large_page(kpte
, address
);
604 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
);
606 static int cpa_process_alias(struct cpa_data
*cpa
)
608 struct cpa_data alias_cpa
;
611 if (cpa
->pfn
> max_pfn_mapped
)
615 * No need to redo, when the primary call touched the direct
618 if (!within(cpa
->vaddr
, PAGE_OFFSET
,
619 PAGE_OFFSET
+ (max_pfn_mapped
<< PAGE_SHIFT
))) {
622 alias_cpa
.vaddr
= (unsigned long) __va(cpa
->pfn
<< PAGE_SHIFT
);
624 ret
= __change_page_attr_set_clr(&alias_cpa
, 0);
631 * No need to redo, when the primary call touched the high
634 if (within(cpa
->vaddr
, (unsigned long) _text
, (unsigned long) _end
))
638 * If the physical address is inside the kernel map, we need
639 * to touch the high mapped kernel as well:
641 if (!within(cpa
->pfn
, highmap_start_pfn(), highmap_end_pfn()))
646 (cpa
->pfn
<< PAGE_SHIFT
) + __START_KERNEL_map
- phys_base
;
649 * The high mapping range is imprecise, so ignore the return value.
651 __change_page_attr_set_clr(&alias_cpa
, 0);
656 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
)
658 int ret
, numpages
= cpa
->numpages
;
662 * Store the remaining nr of pages for the large page
663 * preservation check.
665 cpa
->numpages
= numpages
;
667 ret
= __change_page_attr(cpa
, checkalias
);
672 ret
= cpa_process_alias(cpa
);
678 * Adjust the number of pages with the result of the
679 * CPA operation. Either a large page has been
680 * preserved or a single page update happened.
682 BUG_ON(cpa
->numpages
> numpages
);
683 numpages
-= cpa
->numpages
;
684 cpa
->vaddr
+= cpa
->numpages
* PAGE_SIZE
;
689 static inline int cache_attr(pgprot_t attr
)
691 return pgprot_val(attr
) &
692 (_PAGE_PAT
| _PAGE_PAT_LARGE
| _PAGE_PWT
| _PAGE_PCD
);
695 static int change_page_attr_set_clr(unsigned long addr
, int numpages
,
696 pgprot_t mask_set
, pgprot_t mask_clr
)
699 int ret
, cache
, checkalias
;
702 * Check, if we are requested to change a not supported
705 mask_set
= canon_pgprot(mask_set
);
706 mask_clr
= canon_pgprot(mask_clr
);
707 if (!pgprot_val(mask_set
) && !pgprot_val(mask_clr
))
710 /* Ensure we are PAGE_SIZE aligned */
711 if (addr
& ~PAGE_MASK
) {
714 * People should not be passing in unaligned addresses:
720 cpa
.numpages
= numpages
;
721 cpa
.mask_set
= mask_set
;
722 cpa
.mask_clr
= mask_clr
;
725 /* No alias checking for _NX bit modifications */
726 checkalias
= (pgprot_val(mask_set
) | pgprot_val(mask_clr
)) != _PAGE_NX
;
728 ret
= __change_page_attr_set_clr(&cpa
, checkalias
);
731 * Check whether we really changed something:
737 * No need to flush, when we did not set any of the caching
740 cache
= cache_attr(mask_set
);
743 * On success we use clflush, when the CPU supports it to
744 * avoid the wbindv. If the CPU does not support it and in the
745 * error case we fall back to cpa_flush_all (which uses
748 if (!ret
&& cpu_has_clflush
)
749 cpa_flush_range(addr
, numpages
, cache
);
751 cpa_flush_all(cache
);
759 static inline int change_page_attr_set(unsigned long addr
, int numpages
,
762 return change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0));
765 static inline int change_page_attr_clear(unsigned long addr
, int numpages
,
768 return change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
);
771 int set_memory_uc(unsigned long addr
, int numpages
)
773 return change_page_attr_set(addr
, numpages
,
774 __pgprot(_PAGE_PCD
));
776 EXPORT_SYMBOL(set_memory_uc
);
778 int set_memory_wb(unsigned long addr
, int numpages
)
780 return change_page_attr_clear(addr
, numpages
,
781 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
783 EXPORT_SYMBOL(set_memory_wb
);
785 int set_memory_x(unsigned long addr
, int numpages
)
787 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_NX
));
789 EXPORT_SYMBOL(set_memory_x
);
791 int set_memory_nx(unsigned long addr
, int numpages
)
793 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_NX
));
795 EXPORT_SYMBOL(set_memory_nx
);
797 int set_memory_ro(unsigned long addr
, int numpages
)
799 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_RW
));
802 int set_memory_rw(unsigned long addr
, int numpages
)
804 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_RW
));
807 int set_memory_np(unsigned long addr
, int numpages
)
809 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
812 int set_pages_uc(struct page
*page
, int numpages
)
814 unsigned long addr
= (unsigned long)page_address(page
);
816 return set_memory_uc(addr
, numpages
);
818 EXPORT_SYMBOL(set_pages_uc
);
820 int set_pages_wb(struct page
*page
, int numpages
)
822 unsigned long addr
= (unsigned long)page_address(page
);
824 return set_memory_wb(addr
, numpages
);
826 EXPORT_SYMBOL(set_pages_wb
);
828 int set_pages_x(struct page
*page
, int numpages
)
830 unsigned long addr
= (unsigned long)page_address(page
);
832 return set_memory_x(addr
, numpages
);
834 EXPORT_SYMBOL(set_pages_x
);
836 int set_pages_nx(struct page
*page
, int numpages
)
838 unsigned long addr
= (unsigned long)page_address(page
);
840 return set_memory_nx(addr
, numpages
);
842 EXPORT_SYMBOL(set_pages_nx
);
844 int set_pages_ro(struct page
*page
, int numpages
)
846 unsigned long addr
= (unsigned long)page_address(page
);
848 return set_memory_ro(addr
, numpages
);
851 int set_pages_rw(struct page
*page
, int numpages
)
853 unsigned long addr
= (unsigned long)page_address(page
);
855 return set_memory_rw(addr
, numpages
);
858 #ifdef CONFIG_DEBUG_PAGEALLOC
860 static int __set_pages_p(struct page
*page
, int numpages
)
862 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
863 .numpages
= numpages
,
864 .mask_set
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
),
865 .mask_clr
= __pgprot(0)};
867 return __change_page_attr_set_clr(&cpa
, 1);
870 static int __set_pages_np(struct page
*page
, int numpages
)
872 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
873 .numpages
= numpages
,
874 .mask_set
= __pgprot(0),
875 .mask_clr
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
)};
877 return __change_page_attr_set_clr(&cpa
, 1);
880 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
882 if (PageHighMem(page
))
885 debug_check_no_locks_freed(page_address(page
),
886 numpages
* PAGE_SIZE
);
890 * If page allocator is not up yet then do not call c_p_a():
892 if (!debug_pagealloc_enabled
)
896 * The return value is ignored as the calls cannot fail.
897 * Large pages are kept enabled at boot time, and are
898 * split up quickly with DEBUG_PAGEALLOC. If a splitup
899 * fails here (due to temporary memory shortage) no damage
900 * is done because we just keep the largepage intact up
901 * to the next attempt when it will likely be split up:
904 __set_pages_p(page
, numpages
);
906 __set_pages_np(page
, numpages
);
909 * We should perform an IPI and flush all tlbs,
910 * but that can deadlock->flush only current cpu:
915 * Try to refill the page pool here. We can do this only after
921 #ifdef CONFIG_HIBERNATION
923 bool kernel_page_present(struct page
*page
)
928 if (PageHighMem(page
))
931 pte
= lookup_address((unsigned long)page_address(page
), &level
);
932 return (pte_val(*pte
) & _PAGE_PRESENT
);
935 #endif /* CONFIG_HIBERNATION */
937 #endif /* CONFIG_DEBUG_PAGEALLOC */
940 * The testcases use internal knowledge of the implementation that shouldn't
941 * be exposed to the rest of the kernel. Include these directly here.
943 #ifdef CONFIG_CPA_DEBUG
944 #include "pageattr-test.c"