[PATCH] x86_64: Fix the node cpumask of a cpu going down
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86_64 / mm / pageattr.c
blob35f1f1aab0638ac41699ec718980c26fd40b383d
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/io.h>
17 static inline pte_t *lookup_address(unsigned long address)
19 pgd_t *pgd = pgd_offset_k(address);
20 pud_t *pud;
21 pmd_t *pmd;
22 pte_t *pte;
23 if (pgd_none(*pgd))
24 return NULL;
25 pud = pud_offset(pgd, address);
26 if (!pud_present(*pud))
27 return NULL;
28 pmd = pmd_offset(pud, address);
29 if (!pmd_present(*pmd))
30 return NULL;
31 if (pmd_large(*pmd))
32 return (pte_t *)pmd;
33 pte = pte_offset_kernel(pmd, address);
34 if (pte && !pte_present(*pte))
35 pte = NULL;
36 return pte;
39 static struct page *split_large_page(unsigned long address, pgprot_t prot,
40 pgprot_t ref_prot)
42 int i;
43 unsigned long addr;
44 struct page *base = alloc_pages(GFP_KERNEL, 0);
45 pte_t *pbase;
46 if (!base)
47 return NULL;
48 address = __pa(address);
49 addr = address & LARGE_PAGE_MASK;
50 pbase = (pte_t *)page_address(base);
51 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
52 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
53 addr == address ? prot : ref_prot);
55 return base;
59 static void flush_kernel_map(void *address)
61 if (0 && address && cpu_has_clflush) {
62 /* is this worth it? */
63 int i;
64 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
65 asm volatile("clflush (%0)" :: "r" (address + i));
66 } else
67 asm volatile("wbinvd":::"memory");
68 if (address)
69 __flush_tlb_one(address);
70 else
71 __flush_tlb_all();
75 static inline void flush_map(unsigned long address)
77 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
80 struct deferred_page {
81 struct deferred_page *next;
82 struct page *fpage;
83 unsigned long address;
84 };
85 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
87 static inline void save_page(unsigned long address, struct page *fpage)
89 struct deferred_page *df;
90 df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
91 if (!df) {
92 flush_map(address);
93 __free_page(fpage);
94 } else {
95 df->next = df_list;
96 df->fpage = fpage;
97 df->address = address;
98 df_list = df;
103 * No more special protections in this 2/4MB area - revert to a
104 * large page again.
106 static void revert_page(unsigned long address, pgprot_t ref_prot)
108 pgd_t *pgd;
109 pud_t *pud;
110 pmd_t *pmd;
111 pte_t large_pte;
113 pgd = pgd_offset_k(address);
114 BUG_ON(pgd_none(*pgd));
115 pud = pud_offset(pgd,address);
116 BUG_ON(pud_none(*pud));
117 pmd = pmd_offset(pud, address);
118 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
119 pgprot_val(ref_prot) |= _PAGE_PSE;
120 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
121 set_pte((pte_t *)pmd, large_pte);
124 static int
125 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
126 pgprot_t ref_prot)
128 pte_t *kpte;
129 struct page *kpte_page;
130 unsigned kpte_flags;
131 pgprot_t ref_prot2;
132 kpte = lookup_address(address);
133 if (!kpte) return 0;
134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
135 kpte_flags = pte_val(*kpte);
136 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
137 if ((kpte_flags & _PAGE_PSE) == 0) {
138 set_pte(kpte, pfn_pte(pfn, prot));
139 } else {
141 * split_large_page will take the reference for this change_page_attr
142 * on the split page.
145 struct page *split;
146 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
148 split = split_large_page(address, prot, ref_prot2);
149 if (!split)
150 return -ENOMEM;
151 set_pte(kpte,mk_pte(split, ref_prot2));
152 kpte_page = split;
154 get_page(kpte_page);
155 } else if ((kpte_flags & _PAGE_PSE) == 0) {
156 set_pte(kpte, pfn_pte(pfn, ref_prot));
157 __put_page(kpte_page);
158 } else
159 BUG();
161 /* on x86-64 the direct mapping set at boot is not using 4k pages */
162 BUG_ON(PageReserved(kpte_page));
164 switch (page_count(kpte_page)) {
165 case 1:
166 save_page(address, kpte_page);
167 revert_page(address, ref_prot);
168 break;
169 case 0:
170 BUG(); /* memleak and failed 2M page regeneration */
172 return 0;
176 * Change the page attributes of an page in the linear mapping.
178 * This should be used when a page is mapped with a different caching policy
179 * than write-back somewhere - some CPUs do not like it when mappings with
180 * different caching policies exist. This changes the page attributes of the
181 * in kernel linear mapping too.
183 * The caller needs to ensure that there are no conflicting mappings elsewhere.
184 * This function only deals with the kernel linear map.
186 * Caller must call global_flush_tlb() after this.
188 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
190 int err = 0;
191 int i;
193 down_write(&init_mm.mmap_sem);
194 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
195 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
197 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
198 if (err)
199 break;
200 /* Handle kernel mapping too which aliases part of the
201 * lowmem */
202 if (__pa(address) < KERNEL_TEXT_SIZE) {
203 unsigned long addr2;
204 pgprot_t prot2 = prot;
205 addr2 = __START_KERNEL_map + __pa(address);
206 pgprot_val(prot2) &= ~_PAGE_NX;
207 err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
210 up_write(&init_mm.mmap_sem);
211 return err;
214 /* Don't call this for MMIO areas that may not have a mem_map entry */
215 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
217 unsigned long addr = (unsigned long)page_address(page);
218 return change_page_attr_addr(addr, numpages, prot);
221 void global_flush_tlb(void)
223 struct deferred_page *df, *next_df;
225 down_read(&init_mm.mmap_sem);
226 df = xchg(&df_list, NULL);
227 up_read(&init_mm.mmap_sem);
228 flush_map((df && !df->next) ? df->address : 0);
229 for (; df; df = next_df) {
230 next_df = df->next;
231 if (df->fpage)
232 __free_page(df->fpage);
233 kfree(df);
237 EXPORT_SYMBOL(change_page_attr);
238 EXPORT_SYMBOL(global_flush_tlb);