2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
11 void clflush_cache_range(void *addr
, int size
)
15 for (i
= 0; i
< size
; i
+= boot_cpu_data
.x86_clflush_size
)
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/sections.h>
22 #include <asm/uaccess.h>
23 #include <asm/pgalloc.h>
26 * We allow the BIOS range to be executable:
28 #define BIOS_BEGIN 0x000a0000
29 #define BIOS_END 0x00100000
31 static inline pgprot_t
check_exec(pgprot_t prot
, unsigned long address
)
33 if (__pa(address
) >= BIOS_BEGIN
&& __pa(address
) < BIOS_END
)
34 pgprot_val(prot
) &= ~_PAGE_NX
;
36 * Better fail early if someone sets the kernel text to NX.
37 * Does not cover __inittext
39 BUG_ON(address
>= (unsigned long)&_text
&&
40 address
< (unsigned long)&_etext
&&
41 (pgprot_val(prot
) & _PAGE_NX
));
46 pte_t
*lookup_address(unsigned long address
, int *level
)
48 pgd_t
*pgd
= pgd_offset_k(address
);
52 *level
= PG_LEVEL_NONE
;
56 pud
= pud_offset(pgd
, address
);
59 pmd
= pmd_offset(pud
, address
);
68 return pte_offset_kernel(pmd
, address
);
71 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
74 set_pte_atomic(kpte
, pte
);
76 if (!SHARED_KERNEL_PMD
) {
79 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
84 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
85 pud
= pud_offset(pgd
, address
);
86 pmd
= pmd_offset(pud
, address
);
87 set_pte_atomic((pte_t
*)pmd
, pte
);
93 static int split_large_page(pte_t
*kpte
, unsigned long address
)
95 pgprot_t ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
96 gfp_t gfp_flags
= GFP_KERNEL
;
103 #ifdef CONFIG_DEBUG_PAGEALLOC
104 gfp_flags
= GFP_ATOMIC
;
106 base
= alloc_pages(gfp_flags
, 0);
110 spin_lock_irqsave(&pgd_lock
, flags
);
112 * Check for races, another CPU might have split this page
115 tmp
= lookup_address(address
, &level
);
121 address
= __pa(address
);
122 addr
= address
& LARGE_PAGE_MASK
;
123 pbase
= (pte_t
*)page_address(base
);
125 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
128 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
)
129 set_pte(&pbase
[i
], pfn_pte(addr
>> PAGE_SHIFT
, ref_prot
));
132 * Install the new, split up pagetable:
134 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
138 spin_unlock_irqrestore(&pgd_lock
, flags
);
141 __free_pages(base
, 0);
147 __change_page_attr(unsigned long address
, struct page
*page
, pgprot_t prot
)
149 struct page
*kpte_page
;
153 BUG_ON(PageHighMem(page
));
156 kpte
= lookup_address(address
, &level
);
160 kpte_page
= virt_to_page(kpte
);
161 BUG_ON(PageLRU(kpte_page
));
162 BUG_ON(PageCompound(kpte_page
));
164 prot
= check_exec(prot
, address
);
166 if (level
== PG_LEVEL_4K
) {
167 set_pte_atomic(kpte
, mk_pte(page
, canon_pgprot(prot
)));
169 err
= split_large_page(kpte
, address
);
177 * change_page_attr_addr - Change page table attributes in linear mapping
178 * @address: Virtual address in linear mapping.
179 * @numpages: Number of pages to change
180 * @prot: New page table attribute (PAGE_*)
182 * Change page attributes of a page in the direct mapping. This is a variant
183 * of change_page_attr() that also works on memory holes that do not have
184 * mem_map entry (pfn_valid() is false).
186 * See change_page_attr() documentation for more details.
189 int change_page_attr_addr(unsigned long address
, int numpages
, pgprot_t prot
)
191 int err
= 0, kernel_map
= 0, i
;
194 if (address
>= __START_KERNEL_map
&&
195 address
< __START_KERNEL_map
+ KERNEL_TEXT_SIZE
) {
197 address
= (unsigned long)__va(__pa(address
));
202 for (i
= 0; i
< numpages
; i
++, address
+= PAGE_SIZE
) {
203 unsigned long pfn
= __pa(address
) >> PAGE_SHIFT
;
205 if (!kernel_map
|| pte_present(pfn_pte(0, prot
))) {
206 err
= __change_page_attr(address
, pfn_to_page(pfn
), prot
);
212 * Handle kernel mapping too which aliases part of
215 if (__pa(address
) < KERNEL_TEXT_SIZE
) {
219 addr2
= __START_KERNEL_map
+ __pa(address
);
220 /* Make sure the kernel mappings stay executable */
221 prot2
= pte_pgprot(pte_mkexec(pfn_pte(0, prot
)));
222 err
= __change_page_attr(addr2
, pfn_to_page(pfn
), prot2
);
231 * change_page_attr - Change page table attributes in the linear mapping.
232 * @page: First page to change
233 * @numpages: Number of pages to change
234 * @prot: New protection/caching type (PAGE_*)
236 * Returns 0 on success, otherwise a negated errno.
238 * This should be used when a page is mapped with a different caching policy
239 * than write-back somewhere - some CPUs do not like it when mappings with
240 * different caching policies exist. This changes the page attributes of the
241 * in kernel linear mapping too.
243 * Caller must call global_flush_tlb() later to make the changes active.
245 * The caller needs to ensure that there are no conflicting mappings elsewhere
246 * (e.g. in user space) * This function only deals with the kernel linear map.
248 * For MMIO areas without mem_map use change_page_attr_addr() instead.
250 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
252 unsigned long addr
= (unsigned long)page_address(page
);
254 return change_page_attr_addr(addr
, numpages
, prot
);
256 EXPORT_SYMBOL(change_page_attr
);
258 static void flush_kernel_map(void *arg
)
261 * Flush all to work around Errata in early athlons regarding
262 * large page flushing.
266 if (boot_cpu_data
.x86_model
>= 4)
270 void global_flush_tlb(void)
272 BUG_ON(irqs_disabled());
274 on_each_cpu(flush_kernel_map
, NULL
, 1, 1);
276 EXPORT_SYMBOL(global_flush_tlb
);
278 #ifdef CONFIG_DEBUG_PAGEALLOC
279 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
281 if (PageHighMem(page
))
284 debug_check_no_locks_freed(page_address(page
),
285 numpages
* PAGE_SIZE
);
289 * If page allocator is not up yet then do not call c_p_a():
291 if (!debug_pagealloc_enabled
)
295 * The return value is ignored - the calls cannot fail,
296 * large pages are disabled at boot time:
298 change_page_attr(page
, numpages
, enable
? PAGE_KERNEL
: __pgprot(0));
301 * We should perform an IPI and flush all tlbs,
302 * but that can deadlock->flush only current cpu: