2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
17 static inline pte_t
*lookup_address(unsigned long address
)
19 pgd_t
*pgd
= pgd_offset_k(address
);
22 if (!pgd
|| !pgd_present(*pgd
))
24 pmd
= pmd_offset(pgd
, address
);
25 if (!pmd_present(*pmd
))
29 pte
= pte_offset_kernel(pmd
, address
);
30 if (pte
&& !pte_present(*pte
))
35 static struct page
*split_large_page(unsigned long address
, pgprot_t prot
,
40 struct page
*base
= alloc_pages(GFP_KERNEL
, 0);
44 address
= __pa(address
);
45 addr
= address
& LARGE_PAGE_MASK
;
46 pbase
= (pte_t
*)page_address(base
);
47 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
48 pbase
[i
] = pfn_pte(addr
>> PAGE_SHIFT
,
49 addr
== address
? prot
: ref_prot
);
55 static void flush_kernel_map(void *address
)
57 if (0 && address
&& cpu_has_clflush
) {
58 /* is this worth it? */
60 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
61 asm volatile("clflush (%0)" :: "r" (address
+ i
));
63 asm volatile("wbinvd":::"memory");
64 __flush_tlb_one(address
);
68 static inline void flush_map(unsigned long address
)
70 on_each_cpu(flush_kernel_map
, (void *)address
, 1, 1);
73 struct deferred_page
{
74 struct deferred_page
*next
;
76 unsigned long address
;
78 static struct deferred_page
*df_list
; /* protected by init_mm.mmap_sem */
80 static inline void save_page(unsigned long address
, struct page
*fpage
)
82 struct deferred_page
*df
;
83 df
= kmalloc(sizeof(struct deferred_page
), GFP_KERNEL
);
90 df
->address
= address
;
96 * No more special protections in this 2/4MB area - revert to a
99 static void revert_page(unsigned long address
, pgprot_t ref_prot
)
105 pgd
= pgd_offset_k(address
);
106 pmd
= pmd_offset(pgd
, address
);
107 BUG_ON(pmd_val(*pmd
) & _PAGE_PSE
);
108 pgprot_val(ref_prot
) |= _PAGE_PSE
;
109 large_pte
= mk_pte_phys(__pa(address
) & LARGE_PAGE_MASK
, ref_prot
);
110 set_pte((pte_t
*)pmd
, large_pte
);
114 __change_page_attr(unsigned long address
, struct page
*page
, pgprot_t prot
,
118 struct page
*kpte_page
;
121 kpte
= lookup_address(address
);
123 kpte_page
= virt_to_page(((unsigned long)kpte
) & PAGE_MASK
);
124 kpte_flags
= pte_val(*kpte
);
125 if (pgprot_val(prot
) != pgprot_val(ref_prot
)) {
126 if ((kpte_flags
& _PAGE_PSE
) == 0) {
128 pte_t standard
= mk_pte(page
, ref_prot
);
130 set_pte(kpte
, mk_pte(page
, prot
));
131 if (pte_same(old
,standard
))
134 struct page
*split
= split_large_page(address
, prot
, ref_prot
);
138 set_pte(kpte
,mk_pte(split
, ref_prot
));
140 } else if ((kpte_flags
& _PAGE_PSE
) == 0) {
141 set_pte(kpte
, mk_pte(page
, ref_prot
));
142 __put_page(kpte_page
);
145 if (page_count(kpte_page
) == 1) {
146 save_page(address
, kpte_page
);
147 revert_page(address
, ref_prot
);
153 * Change the page attributes of an page in the linear mapping.
155 * This should be used when a page is mapped with a different caching policy
156 * than write-back somewhere - some CPUs do not like it when mappings with
157 * different caching policies exist. This changes the page attributes of the
158 * in kernel linear mapping too.
160 * The caller needs to ensure that there are no conflicting mappings elsewhere.
161 * This function only deals with the kernel linear map.
163 * Caller must call global_flush_tlb() after this.
165 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
170 down_write(&init_mm
.mmap_sem
);
171 for (i
= 0; i
< numpages
; !err
&& i
++, page
++) {
172 unsigned long address
= (unsigned long)page_address(page
);
173 err
= __change_page_attr(address
, page
, prot
, PAGE_KERNEL
);
176 /* Handle kernel mapping too which aliases part of the
178 /* Disabled right now. Fixme */
179 if (0 && page_to_phys(page
) < KERNEL_TEXT_SIZE
) {
181 addr2
= __START_KERNEL_map
+ page_to_phys(page
);
182 err
= __change_page_attr(addr2
, page
, prot
,
186 up_write(&init_mm
.mmap_sem
);
190 void global_flush_tlb(void)
192 struct deferred_page
*df
, *next_df
;
194 down_read(&init_mm
.mmap_sem
);
195 df
= xchg(&df_list
, NULL
);
196 up_read(&init_mm
.mmap_sem
);
197 flush_map((df
&& !df
->next
) ? df
->address
: 0);
198 for (; df
; df
= next_df
) {
201 __free_page(df
->fpage
);
206 EXPORT_SYMBOL(change_page_attr
);
207 EXPORT_SYMBOL(global_flush_tlb
);