2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
18 static DEFINE_SPINLOCK(cpa_lock
);
19 static struct list_head df_list
= LIST_HEAD_INIT(df_list
);
21 pte_t
*lookup_address(unsigned long address
, int *level
)
23 pgd_t
*pgd
= pgd_offset_k(address
);
29 pud
= pud_offset(pgd
, address
);
32 pmd
= pmd_offset(pud
, address
);
40 return pte_offset_kernel(pmd
, address
);
44 split_large_page(unsigned long address
, pgprot_t prot
, pgprot_t ref_prot
)
51 spin_unlock_irq(&cpa_lock
);
52 base
= alloc_pages(GFP_KERNEL
, 0);
53 spin_lock_irq(&cpa_lock
);
58 * page_private is used to track the number of entries in
59 * the page table page that have non standard attributes.
62 page_private(base
) = 0;
64 address
= __pa(address
);
65 addr
= address
& LARGE_PAGE_MASK
;
66 pbase
= (pte_t
*)page_address(base
);
67 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
69 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
70 set_pte(&pbase
[i
], pfn_pte(addr
>> PAGE_SHIFT
,
71 addr
== address
? prot
: ref_prot
));
76 static void cache_flush_page(struct page
*p
)
78 void *addr
= page_address(p
);
81 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
85 static void flush_kernel_map(void *arg
)
87 struct list_head
*lh
= (struct list_head
*)arg
;
91 * Flush all to work around Errata in early athlons regarding
92 * large page flushing.
96 /* High level code is not ready for clflush yet */
97 if (0 && cpu_has_clflush
) {
98 list_for_each_entry(p
, lh
, lru
)
101 if (boot_cpu_data
.x86_model
>= 4)
106 static void set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
112 set_pte_atomic(kpte
, pte
);
113 if (SHARED_KERNEL_PMD
)
116 spin_lock_irqsave(&pgd_lock
, flags
);
117 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
122 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
123 pud
= pud_offset(pgd
, address
);
124 pmd
= pmd_offset(pud
, address
);
125 set_pte_atomic((pte_t
*)pmd
, pte
);
127 spin_unlock_irqrestore(&pgd_lock
, flags
);
131 * No more special protections in this 2/4MB area - revert to a large
134 static inline void revert_page(struct page
*kpte_page
, unsigned long address
)
140 ((address
& LARGE_PAGE_MASK
) < (unsigned long)&_etext
)
141 ? PAGE_KERNEL_LARGE_EXEC
: PAGE_KERNEL_LARGE
;
144 pmd_offset(pud_offset(pgd_offset_k(address
), address
), address
);
145 set_pmd_pte(linear
, address
,
146 pfn_pte((__pa(address
) & LARGE_PAGE_MASK
) >> PAGE_SHIFT
,
150 static inline void save_page(struct page
*kpte_page
)
152 if (!test_and_set_bit(PG_arch_1
, &kpte_page
->flags
))
153 list_add(&kpte_page
->lru
, &df_list
);
156 static int __change_page_attr(struct page
*page
, pgprot_t prot
)
158 struct page
*kpte_page
;
159 unsigned long address
;
163 BUG_ON(PageHighMem(page
));
164 address
= (unsigned long)page_address(page
);
166 kpte
= lookup_address(address
, &level
);
170 kpte_page
= virt_to_page(kpte
);
171 BUG_ON(PageLRU(kpte_page
));
172 BUG_ON(PageCompound(kpte_page
));
174 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
)) {
176 set_pte_atomic(kpte
, mk_pte(page
, prot
));
182 ((address
& LARGE_PAGE_MASK
) < (unsigned long)&_etext
)
183 ? PAGE_KERNEL_EXEC
: PAGE_KERNEL
;
184 split
= split_large_page(address
, prot
, ref_prot
);
188 set_pmd_pte(kpte
, address
, mk_pte(split
, ref_prot
));
191 page_private(kpte_page
)++;
194 set_pte_atomic(kpte
, mk_pte(page
, PAGE_KERNEL
));
195 BUG_ON(page_private(kpte_page
) == 0);
196 page_private(kpte_page
)--;
202 * If the pte was reserved, it means it was created at boot
203 * time (not via split_large_page) and in turn we must not
204 * replace it with a largepage.
207 save_page(kpte_page
);
208 if (!PageReserved(kpte_page
)) {
209 if (cpu_has_pse
&& (page_private(kpte_page
) == 0)) {
210 paravirt_release_pt(page_to_pfn(kpte_page
));
211 revert_page(kpte_page
, address
);
217 static inline void flush_map(struct list_head
*l
)
219 on_each_cpu(flush_kernel_map
, l
, 1, 1);
223 * Change the page attributes of an page in the linear mapping.
225 * This should be used when a page is mapped with a different caching policy
226 * than write-back somewhere - some CPUs do not like it when mappings with
227 * different caching policies exist. This changes the page attributes of the
228 * in kernel linear mapping too.
230 * The caller needs to ensure that there are no conflicting mappings elsewhere.
231 * This function only deals with the kernel linear map.
233 * Caller must call global_flush_tlb() after this.
235 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
240 spin_lock_irqsave(&cpa_lock
, flags
);
241 for (i
= 0; i
< numpages
; i
++, page
++) {
242 err
= __change_page_attr(page
, prot
);
246 spin_unlock_irqrestore(&cpa_lock
, flags
);
250 EXPORT_SYMBOL(change_page_attr
);
252 void global_flush_tlb(void)
254 struct page
*pg
, *next
;
257 BUG_ON(irqs_disabled());
259 spin_lock_irq(&cpa_lock
);
260 list_replace_init(&df_list
, &l
);
261 spin_unlock_irq(&cpa_lock
);
263 list_for_each_entry_safe(pg
, next
, &l
, lru
) {
265 clear_bit(PG_arch_1
, &pg
->flags
);
266 if (PageReserved(pg
) || !cpu_has_pse
|| page_private(pg
) != 0)
268 ClearPagePrivate(pg
);
272 EXPORT_SYMBOL(global_flush_tlb
);
274 #ifdef CONFIG_DEBUG_PAGEALLOC
275 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
277 if (PageHighMem(page
))
280 debug_check_no_locks_freed(page_address(page
),
281 numpages
* PAGE_SIZE
);
285 * the return value is ignored - the calls cannot fail,
286 * large pages are disabled at boot time.
288 change_page_attr(page
, numpages
, enable
? PAGE_KERNEL
: __pgprot(0));
291 * we should perform an IPI and flush all tlbs,
292 * but that can deadlock->flush only current cpu.