2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
17 static inline pte_t
*lookup_address(unsigned long address
)
19 pgd_t
*pgd
= pgd_offset_k(address
);
25 pud
= pud_offset(pgd
, address
);
26 if (!pud_present(*pud
))
28 pmd
= pmd_offset(pud
, address
);
29 if (!pmd_present(*pmd
))
33 pte
= pte_offset_kernel(pmd
, address
);
34 if (pte
&& !pte_present(*pte
))
39 static struct page
*split_large_page(unsigned long address
, pgprot_t prot
,
44 struct page
*base
= alloc_pages(GFP_KERNEL
, 0);
48 address
= __pa(address
);
49 addr
= address
& LARGE_PAGE_MASK
;
50 pbase
= (pte_t
*)page_address(base
);
51 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
52 pbase
[i
] = pfn_pte(addr
>> PAGE_SHIFT
,
53 addr
== address
? prot
: ref_prot
);
59 static void flush_kernel_map(void *address
)
61 if (0 && address
&& cpu_has_clflush
) {
62 /* is this worth it? */
64 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
65 asm volatile("clflush (%0)" :: "r" (address
+ i
));
67 asm volatile("wbinvd":::"memory");
69 __flush_tlb_one(address
);
75 static inline void flush_map(unsigned long address
)
77 on_each_cpu(flush_kernel_map
, (void *)address
, 1, 1);
80 struct deferred_page
{
81 struct deferred_page
*next
;
83 unsigned long address
;
85 static struct deferred_page
*df_list
; /* protected by init_mm.mmap_sem */
87 static inline void save_page(unsigned long address
, struct page
*fpage
)
89 struct deferred_page
*df
;
90 df
= kmalloc(sizeof(struct deferred_page
), GFP_KERNEL
);
97 df
->address
= address
;
103 * No more special protections in this 2/4MB area - revert to a
106 static void revert_page(unsigned long address
, pgprot_t ref_prot
)
113 pgd
= pgd_offset_k(address
);
114 BUG_ON(pgd_none(*pgd
));
115 pud
= pud_offset(pgd
,address
);
116 BUG_ON(pud_none(*pud
));
117 pmd
= pmd_offset(pud
, address
);
118 BUG_ON(pmd_val(*pmd
) & _PAGE_PSE
);
119 pgprot_val(ref_prot
) |= _PAGE_PSE
;
120 large_pte
= mk_pte_phys(__pa(address
) & LARGE_PAGE_MASK
, ref_prot
);
121 set_pte((pte_t
*)pmd
, large_pte
);
125 __change_page_attr(unsigned long address
, unsigned long pfn
, pgprot_t prot
,
129 struct page
*kpte_page
;
132 kpte
= lookup_address(address
);
134 kpte_page
= virt_to_page(((unsigned long)kpte
) & PAGE_MASK
);
135 kpte_flags
= pte_val(*kpte
);
136 if (pgprot_val(prot
) != pgprot_val(ref_prot
)) {
137 if ((kpte_flags
& _PAGE_PSE
) == 0) {
138 set_pte(kpte
, pfn_pte(pfn
, prot
));
141 * split_large_page will take the reference for this change_page_attr
146 ref_prot2
= __pgprot(pgprot_val(pte_pgprot(*lookup_address(address
))) & ~(1<<_PAGE_BIT_PSE
));
148 split
= split_large_page(address
, prot
, ref_prot2
);
151 set_pte(kpte
,mk_pte(split
, ref_prot2
));
155 } else if ((kpte_flags
& _PAGE_PSE
) == 0) {
156 set_pte(kpte
, pfn_pte(pfn
, ref_prot
));
157 __put_page(kpte_page
);
161 /* on x86-64 the direct mapping set at boot is not using 4k pages */
162 BUG_ON(PageReserved(kpte_page
));
164 switch (page_count(kpte_page
)) {
166 save_page(address
, kpte_page
);
167 revert_page(address
, ref_prot
);
170 BUG(); /* memleak and failed 2M page regeneration */
176 * Change the page attributes of an page in the linear mapping.
178 * This should be used when a page is mapped with a different caching policy
179 * than write-back somewhere - some CPUs do not like it when mappings with
180 * different caching policies exist. This changes the page attributes of the
181 * in kernel linear mapping too.
183 * The caller needs to ensure that there are no conflicting mappings elsewhere.
184 * This function only deals with the kernel linear map.
186 * Caller must call global_flush_tlb() after this.
188 int change_page_attr_addr(unsigned long address
, int numpages
, pgprot_t prot
)
193 down_write(&init_mm
.mmap_sem
);
194 for (i
= 0; i
< numpages
; i
++, address
+= PAGE_SIZE
) {
195 unsigned long pfn
= __pa(address
) >> PAGE_SHIFT
;
197 err
= __change_page_attr(address
, pfn
, prot
, PAGE_KERNEL
);
200 /* Handle kernel mapping too which aliases part of the
202 if (__pa(address
) < KERNEL_TEXT_SIZE
) {
204 pgprot_t prot2
= prot
;
205 addr2
= __START_KERNEL_map
+ __pa(address
);
206 pgprot_val(prot2
) &= ~_PAGE_NX
;
207 err
= __change_page_attr(addr2
, pfn
, prot2
, PAGE_KERNEL_EXEC
);
210 up_write(&init_mm
.mmap_sem
);
214 /* Don't call this for MMIO areas that may not have a mem_map entry */
215 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
217 unsigned long addr
= (unsigned long)page_address(page
);
218 return change_page_attr_addr(addr
, numpages
, prot
);
221 void global_flush_tlb(void)
223 struct deferred_page
*df
, *next_df
;
225 down_read(&init_mm
.mmap_sem
);
226 df
= xchg(&df_list
, NULL
);
227 up_read(&init_mm
.mmap_sem
);
228 flush_map((df
&& !df
->next
) ? df
->address
: 0);
229 for (; df
; df
= next_df
) {
232 __free_page(df
->fpage
);
237 EXPORT_SYMBOL(change_page_attr
);
238 EXPORT_SYMBOL(global_flush_tlb
);