2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/uaccess.h>
17 pte_t
*lookup_address(unsigned long address
, int *level
)
19 pgd_t
*pgd
= pgd_offset_k(address
);
26 pud
= pud_offset(pgd
, address
);
27 if (!pud_present(*pud
))
29 pmd
= pmd_offset(pud
, address
);
30 if (!pmd_present(*pmd
))
37 pte
= pte_offset_kernel(pmd
, address
);
38 if (pte
&& !pte_present(*pte
))
45 split_large_page(unsigned long address
, pgprot_t prot
, pgprot_t ref_prot
)
52 base
= alloc_pages(GFP_KERNEL
, 0);
56 * page_private is used to track the number of entries in
57 * the page table page have non standard attributes.
60 page_private(base
) = 0;
62 address
= __pa(address
);
63 addr
= address
& LARGE_PAGE_MASK
;
64 pbase
= (pte_t
*)page_address(base
);
65 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
66 pbase
[i
] = pfn_pte(addr
>> PAGE_SHIFT
,
67 addr
== address
? prot
: ref_prot
);
72 void clflush_cache_range(void *addr
, int size
)
76 for (i
= 0; i
< size
; i
+= boot_cpu_data
.x86_clflush_size
)
80 static void flush_kernel_map(void *arg
)
82 struct list_head
*l
= (struct list_head
*)arg
;
87 /* When clflush is available always use it because it is
88 much cheaper than WBINVD. */
89 /* clflush is still broken. Disable for now. */
90 if (1 || !cpu_has_clflush
) {
93 list_for_each_entry(pg
, l
, lru
) {
94 void *addr
= page_address(pg
);
96 clflush_cache_range(addr
, PAGE_SIZE
);
101 static inline void flush_map(struct list_head
*l
)
103 on_each_cpu(flush_kernel_map
, l
, 1, 1);
106 static LIST_HEAD(deferred_pages
); /* protected by init_mm.mmap_sem */
108 static inline void save_page(struct page
*fpage
)
110 if (!test_and_set_bit(PG_arch_1
, &fpage
->flags
))
111 list_add(&fpage
->lru
, &deferred_pages
);
115 * No more special protections in this 2/4MB area - revert to a
118 static void revert_page(unsigned long address
, pgprot_t ref_prot
)
126 pgd
= pgd_offset_k(address
);
127 BUG_ON(pgd_none(*pgd
));
128 pud
= pud_offset(pgd
, address
);
129 BUG_ON(pud_none(*pud
));
130 pmd
= pmd_offset(pud
, address
);
131 BUG_ON(pmd_val(*pmd
) & _PAGE_PSE
);
132 pfn
= (__pa(address
) & LARGE_PAGE_MASK
) >> PAGE_SHIFT
;
133 large_pte
= pfn_pte(pfn
, ref_prot
);
134 large_pte
= pte_mkhuge(large_pte
);
136 set_pte((pte_t
*)pmd
, large_pte
);
140 __change_page_attr(unsigned long address
, unsigned long pfn
, pgprot_t prot
,
143 struct page
*kpte_page
;
148 kpte
= lookup_address(address
, &level
);
152 kpte_page
= virt_to_page(kpte
);
153 BUG_ON(PageLRU(kpte_page
));
154 BUG_ON(PageCompound(kpte_page
));
155 if (pgprot_val(prot
) != pgprot_val(ref_prot
)) {
156 if (!pte_huge(*kpte
)) {
157 set_pte(kpte
, pfn_pte(pfn
, prot
));
160 * split_large_page will take the reference for this
161 * change_page_attr on the split page.
165 ref_prot2
= pte_pgprot(pte_clrhuge(*kpte
));
166 split
= split_large_page(address
, prot
, ref_prot2
);
169 pgprot_val(ref_prot2
) &= ~_PAGE_NX
;
170 set_pte(kpte
, mk_pte(split
, ref_prot2
));
173 page_private(kpte_page
)++;
175 if (!pte_huge(*kpte
)) {
176 set_pte(kpte
, pfn_pte(pfn
, ref_prot
));
177 BUG_ON(page_private(kpte_page
) == 0);
178 page_private(kpte_page
)--;
183 /* on x86-64 the direct mapping set at boot is not using 4k pages */
184 BUG_ON(PageReserved(kpte_page
));
186 save_page(kpte_page
);
187 if (page_private(kpte_page
) == 0)
188 revert_page(address
, ref_prot
);
193 * Change the page attributes of an page in the linear mapping.
195 * This should be used when a page is mapped with a different caching policy
196 * than write-back somewhere - some CPUs do not like it when mappings with
197 * different caching policies exist. This changes the page attributes of the
198 * in kernel linear mapping too.
200 * The caller needs to ensure that there are no conflicting mappings elsewhere.
201 * This function only deals with the kernel linear map.
203 * Caller must call global_flush_tlb() after this.
205 int change_page_attr_addr(unsigned long address
, int numpages
, pgprot_t prot
)
207 int err
= 0, kernel_map
= 0, i
;
209 if (address
>= __START_KERNEL_map
&&
210 address
< __START_KERNEL_map
+ KERNEL_TEXT_SIZE
) {
212 address
= (unsigned long)__va(__pa(address
));
216 down_write(&init_mm
.mmap_sem
);
217 for (i
= 0; i
< numpages
; i
++, address
+= PAGE_SIZE
) {
218 unsigned long pfn
= __pa(address
) >> PAGE_SHIFT
;
220 if (!kernel_map
|| pte_present(pfn_pte(0, prot
))) {
221 err
= __change_page_attr(address
, pfn
, prot
,
226 /* Handle kernel mapping too which aliases part of the
228 if (__pa(address
) < KERNEL_TEXT_SIZE
) {
232 addr2
= __START_KERNEL_map
+ __pa(address
);
233 /* Make sure the kernel mappings stay executable */
234 prot2
= pte_pgprot(pte_mkexec(pfn_pte(0, prot
)));
235 err
= __change_page_attr(addr2
, pfn
, prot2
,
239 up_write(&init_mm
.mmap_sem
);
244 /* Don't call this for MMIO areas that may not have a mem_map entry */
245 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
247 unsigned long addr
= (unsigned long)page_address(page
);
249 return change_page_attr_addr(addr
, numpages
, prot
);
251 EXPORT_SYMBOL(change_page_attr
);
253 void global_flush_tlb(void)
255 struct page
*pg
, *next
;
259 * Write-protect the semaphore, to exclude two contexts
260 * doing a list_replace_init() call in parallel and to
261 * exclude new additions to the deferred_pages list:
263 down_write(&init_mm
.mmap_sem
);
264 list_replace_init(&deferred_pages
, &l
);
265 up_write(&init_mm
.mmap_sem
);
269 list_for_each_entry_safe(pg
, next
, &l
, lru
) {
271 clear_bit(PG_arch_1
, &pg
->flags
);
272 if (page_private(pg
) != 0)
274 ClearPagePrivate(pg
);
278 EXPORT_SYMBOL(global_flush_tlb
);