2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
11 #include <linux/interrupt.h>
14 #include <asm/processor.h>
15 #include <asm/tlbflush.h>
16 #include <asm/sections.h>
17 #include <asm/uaccess.h>
18 #include <asm/pgalloc.h>
19 #include <asm/proto.h>
22 * The current flushing context - we pass it instead of 5 arguments:
36 static inline unsigned long highmap_start_pfn(void)
38 return __pa(_text
) >> PAGE_SHIFT
;
41 static inline unsigned long highmap_end_pfn(void)
43 return __pa(round_up((unsigned long)_end
, PMD_SIZE
)) >> PAGE_SHIFT
;
48 #ifdef CONFIG_DEBUG_PAGEALLOC
49 # define debug_pagealloc 1
51 # define debug_pagealloc 0
55 within(unsigned long addr
, unsigned long start
, unsigned long end
)
57 return addr
>= start
&& addr
< end
;
65 * clflush_cache_range - flush a cache range with clflush
66 * @addr: virtual start address
67 * @size: number of bytes to flush
69 * clflush is an unordered instruction which needs fencing with mfence
70 * to avoid ordering issues.
72 void clflush_cache_range(void *vaddr
, unsigned int size
)
74 void *vend
= vaddr
+ size
- 1;
78 for (; vaddr
< vend
; vaddr
+= boot_cpu_data
.x86_clflush_size
)
81 * Flush any possible final partial cacheline:
88 static void __cpa_flush_all(void *arg
)
90 unsigned long cache
= (unsigned long)arg
;
93 * Flush all to work around Errata in early athlons regarding
94 * large page flushing.
98 if (cache
&& boot_cpu_data
.x86_model
>= 4)
102 static void cpa_flush_all(unsigned long cache
)
104 BUG_ON(irqs_disabled());
106 on_each_cpu(__cpa_flush_all
, (void *) cache
, 1, 1);
109 static void __cpa_flush_range(void *arg
)
112 * We could optimize that further and do individual per page
113 * tlb invalidates for a low number of pages. Caveat: we must
114 * flush the high aliases on 64bit as well.
119 static void cpa_flush_range(unsigned long start
, int numpages
, int cache
)
121 unsigned int i
, level
;
124 BUG_ON(irqs_disabled());
125 WARN_ON(PAGE_ALIGN(start
) != start
);
127 on_each_cpu(__cpa_flush_range
, NULL
, 1, 1);
133 * We only need to flush on one CPU,
134 * clflush is a MESI-coherent instruction that
135 * will cause all other CPUs to flush the same
138 for (i
= 0, addr
= start
; i
< numpages
; i
++, addr
+= PAGE_SIZE
) {
139 pte_t
*pte
= lookup_address(addr
, &level
);
142 * Only flush present addresses:
144 if (pte
&& (pte_val(*pte
) & _PAGE_PRESENT
))
145 clflush_cache_range((void *) addr
, PAGE_SIZE
);
150 * Certain areas of memory on x86 require very specific protection flags,
151 * for example the BIOS area or kernel text. Callers don't always get this
152 * right (again, ioremap() on BIOS memory is not uncommon) so this function
153 * checks and fixes these known static required protection bits.
155 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
,
158 pgprot_t forbidden
= __pgprot(0);
161 * The BIOS area between 640k and 1Mb needs to be executable for
162 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
164 if (within(pfn
, BIOS_BEGIN
>> PAGE_SHIFT
, BIOS_END
>> PAGE_SHIFT
))
165 pgprot_val(forbidden
) |= _PAGE_NX
;
168 * The kernel text needs to be executable for obvious reasons
169 * Does not cover __inittext since that is gone later on. On
170 * 64bit we do not enforce !NX on the low mapping
172 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
173 pgprot_val(forbidden
) |= _PAGE_NX
;
176 * The .rodata section needs to be read-only. Using the pfn
177 * catches all aliases.
179 if (within(pfn
, __pa((unsigned long)__start_rodata
) >> PAGE_SHIFT
,
180 __pa((unsigned long)__end_rodata
) >> PAGE_SHIFT
))
181 pgprot_val(forbidden
) |= _PAGE_RW
;
183 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
189 * Lookup the page table entry for a virtual address. Return a pointer
190 * to the entry and the level of the mapping.
192 * Note: We return pud and pmd either when the entry is marked large
193 * or when the present bit is not set. Otherwise we would return a
194 * pointer to a nonexisting mapping.
196 pte_t
*lookup_address(unsigned long address
, unsigned int *level
)
198 pgd_t
*pgd
= pgd_offset_k(address
);
202 *level
= PG_LEVEL_NONE
;
207 pud
= pud_offset(pgd
, address
);
211 *level
= PG_LEVEL_1G
;
212 if (pud_large(*pud
) || !pud_present(*pud
))
215 pmd
= pmd_offset(pud
, address
);
219 *level
= PG_LEVEL_2M
;
220 if (pmd_large(*pmd
) || !pmd_present(*pmd
))
223 *level
= PG_LEVEL_4K
;
225 return pte_offset_kernel(pmd
, address
);
229 * Set the new pmd in all the pgds we know about:
231 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
234 set_pte_atomic(kpte
, pte
);
236 if (!SHARED_KERNEL_PMD
) {
239 list_for_each_entry(page
, &pgd_list
, lru
) {
244 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
245 pud
= pud_offset(pgd
, address
);
246 pmd
= pmd_offset(pud
, address
);
247 set_pte_atomic((pte_t
*)pmd
, pte
);
254 try_preserve_large_page(pte_t
*kpte
, unsigned long address
,
255 struct cpa_data
*cpa
)
257 unsigned long nextpage_addr
, numpages
, pmask
, psize
, flags
, addr
, pfn
;
258 pte_t new_pte
, old_pte
, *tmp
;
259 pgprot_t old_prot
, new_prot
;
263 spin_lock_irqsave(&pgd_lock
, flags
);
265 * Check for races, another CPU might have split this page
268 tmp
= lookup_address(address
, &level
);
274 psize
= PMD_PAGE_SIZE
;
275 pmask
= PMD_PAGE_MASK
;
279 psize
= PUD_PAGE_SIZE
;
280 pmask
= PUD_PAGE_MASK
;
289 * Calculate the number of pages, which fit into this large
290 * page starting at address:
292 nextpage_addr
= (address
+ psize
) & pmask
;
293 numpages
= (nextpage_addr
- address
) >> PAGE_SHIFT
;
294 if (numpages
< cpa
->processed
)
295 cpa
->processed
= numpages
;
298 * We are safe now. Check whether the new pgprot is the same:
301 old_prot
= new_prot
= pte_pgprot(old_pte
);
303 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
304 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
307 * old_pte points to the large page base address. So we need
308 * to add the offset of the virtual address:
310 pfn
= pte_pfn(old_pte
) + ((address
& (psize
- 1)) >> PAGE_SHIFT
);
313 new_prot
= static_protections(new_prot
, address
, pfn
);
316 * We need to check the full range, whether
317 * static_protection() requires a different pgprot for one of
318 * the pages in the range we try to preserve:
320 addr
= address
+ PAGE_SIZE
;
322 for (i
= 1; i
< cpa
->processed
; i
++, addr
+= PAGE_SIZE
, pfn
++) {
323 pgprot_t chk_prot
= static_protections(new_prot
, addr
, pfn
);
325 if (pgprot_val(chk_prot
) != pgprot_val(new_prot
))
330 * If there are no changes, return. maxpages has been updated
333 if (pgprot_val(new_prot
) == pgprot_val(old_prot
)) {
339 * We need to change the attributes. Check, whether we can
340 * change the large page in one go. We request a split, when
341 * the address is not aligned and the number of pages is
342 * smaller than the number of pages in the large page. Note
343 * that we limited the number of possible pages already to
344 * the number of pages in the large page.
346 if (address
== (nextpage_addr
- psize
) && cpa
->processed
== numpages
) {
348 * The address is aligned and the number of pages
349 * covers the full page.
351 new_pte
= pfn_pte(pte_pfn(old_pte
), canon_pgprot(new_prot
));
352 __set_pmd_pte(kpte
, address
, new_pte
);
358 spin_unlock_irqrestore(&pgd_lock
, flags
);
363 static LIST_HEAD(page_pool
);
364 static unsigned long pool_size
, pool_pages
, pool_low
;
365 static unsigned long pool_used
, pool_failed
;
367 static void cpa_fill_pool(struct page
**ret
)
369 gfp_t gfp
= GFP_KERNEL
;
374 * Avoid recursion (on debug-pagealloc) and also signal
375 * our priority to get to these pagetables:
377 if (current
->flags
& PF_MEMALLOC
)
379 current
->flags
|= PF_MEMALLOC
;
382 * Allocate atomically from atomic contexts:
384 if (in_atomic() || irqs_disabled() || debug_pagealloc
)
385 gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
387 while (pool_pages
< pool_size
|| (ret
&& !*ret
)) {
388 p
= alloc_pages(gfp
, 0);
394 * If the call site needs a page right now, provide it:
400 spin_lock_irqsave(&pgd_lock
, flags
);
401 list_add(&p
->lru
, &page_pool
);
403 spin_unlock_irqrestore(&pgd_lock
, flags
);
406 current
->flags
&= ~PF_MEMALLOC
;
409 #define SHIFT_MB (20 - PAGE_SHIFT)
410 #define ROUND_MB_GB ((1 << 10) - 1)
411 #define SHIFT_MB_GB 10
412 #define POOL_PAGES_PER_GB 16
414 void __init
cpa_init(void)
421 * Calculate the number of pool pages:
423 * Convert totalram (nr of pages) to MiB and round to the next
424 * GiB. Shift MiB to Gib and multiply the result by
427 if (debug_pagealloc
) {
428 gb
= ((si
.totalram
>> SHIFT_MB
) + ROUND_MB_GB
) >> SHIFT_MB_GB
;
429 pool_size
= POOL_PAGES_PER_GB
* gb
;
433 pool_low
= pool_size
;
437 "CPA: page pool initialized %lu of %lu pages preallocated\n",
438 pool_pages
, pool_size
);
441 static int split_large_page(pte_t
*kpte
, unsigned long address
)
443 unsigned long flags
, pfn
, pfninc
= 1;
444 unsigned int i
, level
;
450 * Get a page from the pool. The pool list is protected by the
451 * pgd_lock, which we have to take anyway for the split
454 spin_lock_irqsave(&pgd_lock
, flags
);
455 if (list_empty(&page_pool
)) {
456 spin_unlock_irqrestore(&pgd_lock
, flags
);
458 cpa_fill_pool(&base
);
461 spin_lock_irqsave(&pgd_lock
, flags
);
463 base
= list_first_entry(&page_pool
, struct page
, lru
);
464 list_del(&base
->lru
);
467 if (pool_pages
< pool_low
)
468 pool_low
= pool_pages
;
472 * Check for races, another CPU might have split this page
475 tmp
= lookup_address(address
, &level
);
479 pbase
= (pte_t
*)page_address(base
);
481 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
483 ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
486 if (level
== PG_LEVEL_1G
) {
487 pfninc
= PMD_PAGE_SIZE
>> PAGE_SHIFT
;
488 pgprot_val(ref_prot
) |= _PAGE_PSE
;
493 * Get the target pfn from the original entry:
495 pfn
= pte_pfn(*kpte
);
496 for (i
= 0; i
< PTRS_PER_PTE
; i
++, pfn
+= pfninc
)
497 set_pte(&pbase
[i
], pfn_pte(pfn
, ref_prot
));
500 * Install the new, split up pagetable. Important details here:
502 * On Intel the NX bit of all levels must be cleared to make a
503 * page executable. See section 4.13.2 of Intel 64 and IA-32
504 * Architectures Software Developer's Manual).
506 * Mark the entry present. The current mapping might be
507 * set to not present, which we preserved above.
509 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
510 pgprot_val(ref_prot
) |= _PAGE_PRESENT
;
511 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
516 * If we dropped out via the lookup_address check under
517 * pgd_lock then stick the page back into the pool:
520 list_add(&base
->lru
, &page_pool
);
524 spin_unlock_irqrestore(&pgd_lock
, flags
);
529 static int __change_page_attr(struct cpa_data
*cpa
, int primary
)
531 unsigned long address
= cpa
->vaddr
;
534 pte_t
*kpte
, old_pte
;
537 kpte
= lookup_address(address
, &level
);
539 return primary
? -EINVAL
: 0;
542 if (!pte_val(old_pte
)) {
545 printk(KERN_WARNING
"CPA: called for zero pte. "
546 "vaddr = %lx cpa->vaddr = %lx\n", address
,
552 if (level
== PG_LEVEL_4K
) {
554 pgprot_t new_prot
= pte_pgprot(old_pte
);
555 unsigned long pfn
= pte_pfn(old_pte
);
557 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
558 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
560 new_prot
= static_protections(new_prot
, address
, pfn
);
563 * We need to keep the pfn from the existing PTE,
564 * after all we're only going to change it's attributes
565 * not the memory it points to
567 new_pte
= pfn_pte(pfn
, canon_pgprot(new_prot
));
570 * Do we really change anything ?
572 if (pte_val(old_pte
) != pte_val(new_pte
)) {
573 set_pte_atomic(kpte
, new_pte
);
581 * Check, whether we can keep the large page intact
582 * and just change the pte:
584 do_split
= try_preserve_large_page(kpte
, address
, cpa
);
586 * When the range fits into the existing large page,
587 * return. cp->processed and cpa->tlbflush have been updated in
594 * We have to split the large page:
596 err
= split_large_page(kpte
, address
);
605 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
);
607 static int cpa_process_alias(struct cpa_data
*cpa
)
609 struct cpa_data alias_cpa
;
612 if (cpa
->pfn
> max_pfn_mapped
)
616 * No need to redo, when the primary call touched the direct
619 if (!within(cpa
->vaddr
, PAGE_OFFSET
,
620 PAGE_OFFSET
+ (max_pfn_mapped
<< PAGE_SHIFT
))) {
623 alias_cpa
.vaddr
= (unsigned long) __va(cpa
->pfn
<< PAGE_SHIFT
);
625 ret
= __change_page_attr_set_clr(&alias_cpa
, 0);
632 * No need to redo, when the primary call touched the high
635 if (within(cpa
->vaddr
, (unsigned long) _text
, (unsigned long) _end
))
639 * If the physical address is inside the kernel map, we need
640 * to touch the high mapped kernel as well:
642 if (!within(cpa
->pfn
, highmap_start_pfn(), highmap_end_pfn()))
647 (cpa
->pfn
<< PAGE_SHIFT
) + __START_KERNEL_map
- phys_base
;
650 * The high mapping range is imprecise, so ignore the return value.
652 __change_page_attr_set_clr(&alias_cpa
, 0);
657 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
)
659 int ret
, numpages
= cpa
->numpages
;
663 * Store the remaining nr of pages for the large page
664 * preservation check.
666 cpa
->numpages
= cpa
->processed
= numpages
;
668 ret
= __change_page_attr(cpa
, checkalias
);
673 ret
= cpa_process_alias(cpa
);
679 * Adjust the number of pages with the result of the
680 * CPA operation. Either a large page has been
681 * preserved or a single page update happened.
683 BUG_ON(cpa
->processed
> numpages
);
684 numpages
-= cpa
->processed
;
685 cpa
->vaddr
+= cpa
->processed
* PAGE_SIZE
;
690 static inline int cache_attr(pgprot_t attr
)
692 return pgprot_val(attr
) &
693 (_PAGE_PAT
| _PAGE_PAT_LARGE
| _PAGE_PWT
| _PAGE_PCD
);
696 static int change_page_attr_set_clr(unsigned long addr
, int numpages
,
697 pgprot_t mask_set
, pgprot_t mask_clr
)
700 int ret
, cache
, checkalias
;
703 * Check, if we are requested to change a not supported
706 mask_set
= canon_pgprot(mask_set
);
707 mask_clr
= canon_pgprot(mask_clr
);
708 if (!pgprot_val(mask_set
) && !pgprot_val(mask_clr
))
711 /* Ensure we are PAGE_SIZE aligned */
712 if (addr
& ~PAGE_MASK
) {
715 * People should not be passing in unaligned addresses:
721 cpa
.numpages
= numpages
;
722 cpa
.mask_set
= mask_set
;
723 cpa
.mask_clr
= mask_clr
;
726 /* No alias checking for _NX bit modifications */
727 checkalias
= (pgprot_val(mask_set
) | pgprot_val(mask_clr
)) != _PAGE_NX
;
729 ret
= __change_page_attr_set_clr(&cpa
, checkalias
);
732 * Check whether we really changed something:
738 * No need to flush, when we did not set any of the caching
741 cache
= cache_attr(mask_set
);
744 * On success we use clflush, when the CPU supports it to
745 * avoid the wbindv. If the CPU does not support it and in the
746 * error case we fall back to cpa_flush_all (which uses
749 if (!ret
&& cpu_has_clflush
)
750 cpa_flush_range(addr
, numpages
, cache
);
752 cpa_flush_all(cache
);
760 static inline int change_page_attr_set(unsigned long addr
, int numpages
,
763 return change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0));
766 static inline int change_page_attr_clear(unsigned long addr
, int numpages
,
769 return change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
);
772 int set_memory_uc(unsigned long addr
, int numpages
)
774 return change_page_attr_set(addr
, numpages
,
775 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
777 EXPORT_SYMBOL(set_memory_uc
);
779 int set_memory_wb(unsigned long addr
, int numpages
)
781 return change_page_attr_clear(addr
, numpages
,
782 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
784 EXPORT_SYMBOL(set_memory_wb
);
786 int set_memory_x(unsigned long addr
, int numpages
)
788 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_NX
));
790 EXPORT_SYMBOL(set_memory_x
);
792 int set_memory_nx(unsigned long addr
, int numpages
)
794 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_NX
));
796 EXPORT_SYMBOL(set_memory_nx
);
798 int set_memory_ro(unsigned long addr
, int numpages
)
800 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_RW
));
803 int set_memory_rw(unsigned long addr
, int numpages
)
805 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_RW
));
808 int set_memory_np(unsigned long addr
, int numpages
)
810 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
813 int set_pages_uc(struct page
*page
, int numpages
)
815 unsigned long addr
= (unsigned long)page_address(page
);
817 return set_memory_uc(addr
, numpages
);
819 EXPORT_SYMBOL(set_pages_uc
);
821 int set_pages_wb(struct page
*page
, int numpages
)
823 unsigned long addr
= (unsigned long)page_address(page
);
825 return set_memory_wb(addr
, numpages
);
827 EXPORT_SYMBOL(set_pages_wb
);
829 int set_pages_x(struct page
*page
, int numpages
)
831 unsigned long addr
= (unsigned long)page_address(page
);
833 return set_memory_x(addr
, numpages
);
835 EXPORT_SYMBOL(set_pages_x
);
837 int set_pages_nx(struct page
*page
, int numpages
)
839 unsigned long addr
= (unsigned long)page_address(page
);
841 return set_memory_nx(addr
, numpages
);
843 EXPORT_SYMBOL(set_pages_nx
);
845 int set_pages_ro(struct page
*page
, int numpages
)
847 unsigned long addr
= (unsigned long)page_address(page
);
849 return set_memory_ro(addr
, numpages
);
852 int set_pages_rw(struct page
*page
, int numpages
)
854 unsigned long addr
= (unsigned long)page_address(page
);
856 return set_memory_rw(addr
, numpages
);
859 #ifdef CONFIG_DEBUG_PAGEALLOC
861 static int __set_pages_p(struct page
*page
, int numpages
)
863 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
864 .numpages
= numpages
,
865 .mask_set
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
),
866 .mask_clr
= __pgprot(0)};
868 return __change_page_attr_set_clr(&cpa
, 1);
871 static int __set_pages_np(struct page
*page
, int numpages
)
873 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
874 .numpages
= numpages
,
875 .mask_set
= __pgprot(0),
876 .mask_clr
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
)};
878 return __change_page_attr_set_clr(&cpa
, 1);
881 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
883 if (PageHighMem(page
))
886 debug_check_no_locks_freed(page_address(page
),
887 numpages
* PAGE_SIZE
);
891 * If page allocator is not up yet then do not call c_p_a():
893 if (!debug_pagealloc_enabled
)
897 * The return value is ignored as the calls cannot fail.
898 * Large pages are kept enabled at boot time, and are
899 * split up quickly with DEBUG_PAGEALLOC. If a splitup
900 * fails here (due to temporary memory shortage) no damage
901 * is done because we just keep the largepage intact up
902 * to the next attempt when it will likely be split up:
905 __set_pages_p(page
, numpages
);
907 __set_pages_np(page
, numpages
);
910 * We should perform an IPI and flush all tlbs,
911 * but that can deadlock->flush only current cpu:
916 * Try to refill the page pool here. We can do this only after
922 #ifdef CONFIG_HIBERNATION
924 bool kernel_page_present(struct page
*page
)
929 if (PageHighMem(page
))
932 pte
= lookup_address((unsigned long)page_address(page
), &level
);
933 return (pte_val(*pte
) & _PAGE_PRESENT
);
936 #endif /* CONFIG_HIBERNATION */
938 #endif /* CONFIG_DEBUG_PAGEALLOC */
941 * The testcases use internal knowledge of the implementation that shouldn't
942 * be exposed to the rest of the kernel. Include these directly here.
944 #ifdef CONFIG_CPA_DEBUG
945 #include "pageattr-test.c"