2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
20 within(unsigned long addr
, unsigned long start
, unsigned long end
)
22 return addr
>= start
&& addr
< end
;
31 * clflush_cache_range - flush a cache range with clflush
32 * @addr: virtual start address
33 * @size: number of bytes to flush
35 * clflush is an unordered instruction which needs fencing with mfence
36 * to avoid ordering issues.
38 void clflush_cache_range(void *addr
, int size
)
43 for (i
= 0; i
< size
; i
+= boot_cpu_data
.x86_clflush_size
)
48 static void __cpa_flush_all(void *arg
)
51 * Flush all to work around Errata in early athlons regarding
52 * large page flushing.
56 if (boot_cpu_data
.x86_model
>= 4)
60 static void cpa_flush_all(void)
62 BUG_ON(irqs_disabled());
64 on_each_cpu(__cpa_flush_all
, NULL
, 1, 1);
72 static void __cpa_flush_range(void *arg
)
74 struct clflush_data
*cld
= arg
;
77 * We could optimize that further and do individual per page
78 * tlb invalidates for a low number of pages. Caveat: we must
79 * flush the high aliases on 64bit as well.
83 clflush_cache_range((void *) cld
->addr
, cld
->numpages
* PAGE_SIZE
);
86 static void cpa_flush_range(unsigned long addr
, int numpages
)
88 struct clflush_data cld
;
90 BUG_ON(irqs_disabled());
93 cld
.numpages
= numpages
;
95 on_each_cpu(__cpa_flush_range
, &cld
, 1, 1);
99 * Certain areas of memory on x86 require very specific protection flags,
100 * for example the BIOS area or kernel text. Callers don't always get this
101 * right (again, ioremap() on BIOS memory is not uncommon) so this function
102 * checks and fixes these known static required protection bits.
104 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
)
106 pgprot_t forbidden
= __pgprot(0);
109 * The BIOS area between 640k and 1Mb needs to be executable for
110 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
112 if (within(__pa(address
), BIOS_BEGIN
, BIOS_END
))
113 pgprot_val(forbidden
) |= _PAGE_NX
;
116 * The kernel text needs to be executable for obvious reasons
117 * Does not cover __inittext since that is gone later on
119 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
120 pgprot_val(forbidden
) |= _PAGE_NX
;
122 #ifdef CONFIG_DEBUG_RODATA
123 /* The .rodata section needs to be read-only */
124 if (within(address
, (unsigned long)__start_rodata
,
125 (unsigned long)__end_rodata
))
126 pgprot_val(forbidden
) |= _PAGE_RW
;
129 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
134 pte_t
*lookup_address(unsigned long address
, int *level
)
136 pgd_t
*pgd
= pgd_offset_k(address
);
140 *level
= PG_LEVEL_NONE
;
144 pud
= pud_offset(pgd
, address
);
147 pmd
= pmd_offset(pud
, address
);
151 *level
= PG_LEVEL_2M
;
155 *level
= PG_LEVEL_4K
;
156 return pte_offset_kernel(pmd
, address
);
159 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
162 set_pte_atomic(kpte
, pte
);
164 if (!SHARED_KERNEL_PMD
) {
167 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
172 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
173 pud
= pud_offset(pgd
, address
);
174 pmd
= pmd_offset(pud
, address
);
175 set_pte_atomic((pte_t
*)pmd
, pte
);
181 static int split_large_page(pte_t
*kpte
, unsigned long address
)
183 pgprot_t ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
184 gfp_t gfp_flags
= GFP_KERNEL
;
191 #ifdef CONFIG_DEBUG_PAGEALLOC
192 gfp_flags
= GFP_ATOMIC
;
194 base
= alloc_pages(gfp_flags
, 0);
198 spin_lock_irqsave(&pgd_lock
, flags
);
200 * Check for races, another CPU might have split this page
203 tmp
= lookup_address(address
, &level
);
209 address
= __pa(address
);
210 addr
= address
& LARGE_PAGE_MASK
;
211 pbase
= (pte_t
*)page_address(base
);
213 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
216 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
)
217 set_pte(&pbase
[i
], pfn_pte(addr
>> PAGE_SHIFT
, ref_prot
));
220 * Install the new, split up pagetable. Important detail here:
222 * On Intel the NX bit of all levels must be cleared to make a
223 * page executable. See section 4.13.2 of Intel 64 and IA-32
224 * Architectures Software Developer's Manual).
226 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
227 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
231 spin_unlock_irqrestore(&pgd_lock
, flags
);
234 __free_pages(base
, 0);
240 __change_page_attr(unsigned long address
, unsigned long pfn
, pgprot_t prot
)
242 struct page
*kpte_page
;
247 BUG_ON(pfn
> max_low_pfn
);
251 kpte
= lookup_address(address
, &level
);
255 kpte_page
= virt_to_page(kpte
);
256 BUG_ON(PageLRU(kpte_page
));
257 BUG_ON(PageCompound(kpte_page
));
259 prot
= static_protections(prot
, address
);
261 if (level
== PG_LEVEL_4K
) {
262 WARN_ON_ONCE(pgprot_val(prot
) & _PAGE_PSE
);
263 set_pte_atomic(kpte
, pfn_pte(pfn
, canon_pgprot(prot
)));
265 /* Clear the PSE bit for the 4k level pages ! */
266 pgprot_val(prot
) = pgprot_val(prot
) & ~_PAGE_PSE
;
268 err
= split_large_page(kpte
, address
);
276 * change_page_attr_addr - Change page table attributes in linear mapping
277 * @address: Virtual address in linear mapping.
278 * @prot: New page table attribute (PAGE_*)
280 * Change page attributes of a page in the direct mapping. This is a variant
281 * of change_page_attr() that also works on memory holes that do not have
282 * mem_map entry (pfn_valid() is false).
284 * See change_page_attr() documentation for more details.
286 * Modules and drivers should use the set_memory_* APIs instead.
289 static int change_page_attr_addr(unsigned long address
, pgprot_t prot
)
291 int err
= 0, kernel_map
= 0;
292 unsigned long pfn
= __pa(address
) >> PAGE_SHIFT
;
295 if (address
>= __START_KERNEL_map
&&
296 address
< __START_KERNEL_map
+ KERNEL_TEXT_SIZE
) {
298 address
= (unsigned long)__va(__pa(address
));
303 if (!kernel_map
|| pte_present(pfn_pte(0, prot
))) {
304 err
= __change_page_attr(address
, pfn
, prot
);
311 * Handle kernel mapping too which aliases part of
314 if (__pa(address
) < KERNEL_TEXT_SIZE
) {
318 addr2
= __START_KERNEL_map
+ __pa(address
);
319 /* Make sure the kernel mappings stay executable */
320 prot2
= pte_pgprot(pte_mkexec(pfn_pte(0, prot
)));
321 err
= __change_page_attr(addr2
, pfn
, prot2
);
328 static int __change_page_attr_set_clr(unsigned long addr
, int numpages
,
329 pgprot_t mask_set
, pgprot_t mask_clr
)
336 for (i
= 0; i
< numpages
; i
++) {
338 pte
= lookup_address(addr
, &level
);
342 new_prot
= pte_pgprot(*pte
);
344 pgprot_val(new_prot
) &= ~pgprot_val(mask_clr
);
345 pgprot_val(new_prot
) |= pgprot_val(mask_set
);
347 ret
= change_page_attr_addr(addr
, new_prot
);
356 static int change_page_attr_set_clr(unsigned long addr
, int numpages
,
357 pgprot_t mask_set
, pgprot_t mask_clr
)
359 int ret
= __change_page_attr_set_clr(addr
, numpages
, mask_set
,
363 * On success we use clflush, when the CPU supports it to
364 * avoid the wbindv. If the CPU does not support it and in the
365 * error case we fall back to cpa_flush_all (which uses
368 if (!ret
&& cpu_has_clflush
)
369 cpa_flush_range(addr
, numpages
);
376 static inline int change_page_attr_set(unsigned long addr
, int numpages
,
379 return change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0));
382 static inline int change_page_attr_clear(unsigned long addr
, int numpages
,
385 return __change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
);
389 int set_memory_uc(unsigned long addr
, int numpages
)
391 return change_page_attr_set(addr
, numpages
,
392 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
394 EXPORT_SYMBOL(set_memory_uc
);
396 int set_memory_wb(unsigned long addr
, int numpages
)
398 return change_page_attr_clear(addr
, numpages
,
399 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
401 EXPORT_SYMBOL(set_memory_wb
);
403 int set_memory_x(unsigned long addr
, int numpages
)
405 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_NX
));
407 EXPORT_SYMBOL(set_memory_x
);
409 int set_memory_nx(unsigned long addr
, int numpages
)
411 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_NX
));
413 EXPORT_SYMBOL(set_memory_nx
);
415 int set_memory_ro(unsigned long addr
, int numpages
)
417 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_RW
));
420 int set_memory_rw(unsigned long addr
, int numpages
)
422 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_RW
));
425 int set_memory_np(unsigned long addr
, int numpages
)
427 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
430 int set_pages_uc(struct page
*page
, int numpages
)
432 unsigned long addr
= (unsigned long)page_address(page
);
434 return set_memory_uc(addr
, numpages
);
436 EXPORT_SYMBOL(set_pages_uc
);
438 int set_pages_wb(struct page
*page
, int numpages
)
440 unsigned long addr
= (unsigned long)page_address(page
);
442 return set_memory_wb(addr
, numpages
);
444 EXPORT_SYMBOL(set_pages_wb
);
446 int set_pages_x(struct page
*page
, int numpages
)
448 unsigned long addr
= (unsigned long)page_address(page
);
450 return set_memory_x(addr
, numpages
);
452 EXPORT_SYMBOL(set_pages_x
);
454 int set_pages_nx(struct page
*page
, int numpages
)
456 unsigned long addr
= (unsigned long)page_address(page
);
458 return set_memory_nx(addr
, numpages
);
460 EXPORT_SYMBOL(set_pages_nx
);
462 int set_pages_ro(struct page
*page
, int numpages
)
464 unsigned long addr
= (unsigned long)page_address(page
);
466 return set_memory_ro(addr
, numpages
);
469 int set_pages_rw(struct page
*page
, int numpages
)
471 unsigned long addr
= (unsigned long)page_address(page
);
473 return set_memory_rw(addr
, numpages
);
477 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
478 static inline int __change_page_attr_set(unsigned long addr
, int numpages
,
481 return __change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0));
484 static inline int __change_page_attr_clear(unsigned long addr
, int numpages
,
487 return __change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
);
491 #ifdef CONFIG_DEBUG_PAGEALLOC
493 static int __set_pages_p(struct page
*page
, int numpages
)
495 unsigned long addr
= (unsigned long)page_address(page
);
497 return __change_page_attr_set(addr
, numpages
,
498 __pgprot(_PAGE_PRESENT
| _PAGE_RW
));
501 static int __set_pages_np(struct page
*page
, int numpages
)
503 unsigned long addr
= (unsigned long)page_address(page
);
505 return __change_page_attr_clear(addr
, numpages
,
506 __pgprot(_PAGE_PRESENT
));
509 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
511 if (PageHighMem(page
))
514 debug_check_no_locks_freed(page_address(page
),
515 numpages
* PAGE_SIZE
);
519 * If page allocator is not up yet then do not call c_p_a():
521 if (!debug_pagealloc_enabled
)
525 * The return value is ignored - the calls cannot fail,
526 * large pages are disabled at boot time:
529 __set_pages_p(page
, numpages
);
531 __set_pages_np(page
, numpages
);
534 * We should perform an IPI and flush all tlbs,
535 * but that can deadlock->flush only current cpu:
542 * The testcases use internal knowledge of the implementation that shouldn't
543 * be exposed to the rest of the kernel. Include these directly here.
545 #ifdef CONFIG_CPA_DEBUG
546 #include "pageattr-test.c"