2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pgalloc.h>
15 #include <asm/sections.h>
17 static DEFINE_SPINLOCK(cpa_lock
);
18 static struct list_head df_list
= LIST_HEAD_INIT(df_list
);
21 pte_t
*lookup_address(unsigned long address
)
23 pgd_t
*pgd
= pgd_offset_k(address
);
28 pud
= pud_offset(pgd
, address
);
31 pmd
= pmd_offset(pud
, address
);
36 return pte_offset_kernel(pmd
, address
);
39 static struct page
*split_large_page(unsigned long address
, pgprot_t prot
,
47 spin_unlock_irq(&cpa_lock
);
48 base
= alloc_pages(GFP_KERNEL
, 0);
49 spin_lock_irq(&cpa_lock
);
54 * page_private is used to track the number of entries in
55 * the page table page that have non standard attributes.
58 page_private(base
) = 0;
60 address
= __pa(address
);
61 addr
= address
& LARGE_PAGE_MASK
;
62 pbase
= (pte_t
*)page_address(base
);
63 paravirt_alloc_pt(page_to_pfn(base
));
64 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
65 set_pte(&pbase
[i
], pfn_pte(addr
>> PAGE_SHIFT
,
66 addr
== address
? prot
: ref_prot
));
71 static void flush_kernel_map(void *arg
)
73 unsigned long adr
= (unsigned long)arg
;
75 if (adr
&& cpu_has_clflush
) {
77 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
78 asm volatile("clflush (%0)" :: "r" (adr
+ i
));
79 } else if (boot_cpu_data
.x86_model
>= 4)
82 /* Flush all to work around Errata in early athlons regarding
83 * large page flushing.
88 static void set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
93 set_pte_atomic(kpte
, pte
); /* change init_mm */
97 spin_lock_irqsave(&pgd_lock
, flags
);
98 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
102 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
103 pud
= pud_offset(pgd
, address
);
104 pmd
= pmd_offset(pud
, address
);
105 set_pte_atomic((pte_t
*)pmd
, pte
);
107 spin_unlock_irqrestore(&pgd_lock
, flags
);
111 * No more special protections in this 2/4MB area - revert to a
114 static inline void revert_page(struct page
*kpte_page
, unsigned long address
)
120 ((address
& LARGE_PAGE_MASK
) < (unsigned long)&_etext
)
121 ? PAGE_KERNEL_LARGE_EXEC
: PAGE_KERNEL_LARGE
;
124 pmd_offset(pud_offset(pgd_offset_k(address
), address
), address
);
125 set_pmd_pte(linear
, address
,
126 pfn_pte((__pa(address
) & LARGE_PAGE_MASK
) >> PAGE_SHIFT
,
131 __change_page_attr(struct page
*page
, pgprot_t prot
)
134 unsigned long address
;
135 struct page
*kpte_page
;
137 BUG_ON(PageHighMem(page
));
138 address
= (unsigned long)page_address(page
);
140 kpte
= lookup_address(address
);
143 kpte_page
= virt_to_page(kpte
);
144 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
)) {
145 if ((pte_val(*kpte
) & _PAGE_PSE
) == 0) {
146 set_pte_atomic(kpte
, mk_pte(page
, prot
));
152 ((address
& LARGE_PAGE_MASK
) < (unsigned long)&_etext
)
153 ? PAGE_KERNEL_EXEC
: PAGE_KERNEL
;
154 split
= split_large_page(address
, prot
, ref_prot
);
157 set_pmd_pte(kpte
,address
,mk_pte(split
, ref_prot
));
160 page_private(kpte_page
)++;
161 } else if ((pte_val(*kpte
) & _PAGE_PSE
) == 0) {
162 set_pte_atomic(kpte
, mk_pte(page
, PAGE_KERNEL
));
163 BUG_ON(page_private(kpte_page
) == 0);
164 page_private(kpte_page
)--;
169 * If the pte was reserved, it means it was created at boot
170 * time (not via split_large_page) and in turn we must not
171 * replace it with a largepage.
173 if (!PageReserved(kpte_page
)) {
174 if (cpu_has_pse
&& (page_private(kpte_page
) == 0)) {
175 ClearPagePrivate(kpte_page
);
176 paravirt_release_pt(page_to_pfn(kpte_page
));
177 list_add(&kpte_page
->lru
, &df_list
);
178 revert_page(kpte_page
, address
);
184 static inline void flush_map(void *adr
)
186 on_each_cpu(flush_kernel_map
, adr
, 1, 1);
190 * Change the page attributes of an page in the linear mapping.
192 * This should be used when a page is mapped with a different caching policy
193 * than write-back somewhere - some CPUs do not like it when mappings with
194 * different caching policies exist. This changes the page attributes of the
195 * in kernel linear mapping too.
197 * The caller needs to ensure that there are no conflicting mappings elsewhere.
198 * This function only deals with the kernel linear map.
200 * Caller must call global_flush_tlb() after this.
202 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
208 spin_lock_irqsave(&cpa_lock
, flags
);
209 for (i
= 0; i
< numpages
; i
++, page
++) {
210 err
= __change_page_attr(page
, prot
);
214 spin_unlock_irqrestore(&cpa_lock
, flags
);
218 void global_flush_tlb(void)
221 struct page
*pg
, *next
;
223 BUG_ON(irqs_disabled());
225 spin_lock_irq(&cpa_lock
);
226 list_replace_init(&df_list
, &l
);
227 spin_unlock_irq(&cpa_lock
);
228 if (!cpu_has_clflush
)
230 list_for_each_entry_safe(pg
, next
, &l
, lru
) {
232 flush_map(page_address(pg
));
237 #ifdef CONFIG_DEBUG_PAGEALLOC
238 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
240 if (PageHighMem(page
))
243 debug_check_no_locks_freed(page_address(page
),
244 numpages
* PAGE_SIZE
);
246 /* the return value is ignored - the calls cannot fail,
247 * large pages are disabled at boot time.
249 change_page_attr(page
, numpages
, enable
? PAGE_KERNEL
: __pgprot(0));
250 /* we should perform an IPI and flush all tlbs,
251 * but that can deadlock->flush only current cpu.
257 EXPORT_SYMBOL(change_page_attr
);
258 EXPORT_SYMBOL(global_flush_tlb
);