added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / arch / x86 / mm / pgtable.c
blob7e17f0a9b3a56cedc300e72e3b5f15b0cc87d6ce
1 #include <linux/mm.h>
2 #include <asm/pgalloc.h>
3 #include <asm/pgtable.h>
4 #include <asm/tlb.h>
5 #include <asm/fixmap.h>
7 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
11 return (pte_t *)__get_free_page(PGALLOC_GFP);
14 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
16 struct page *pte;
18 #ifdef CONFIG_HIGHPTE
19 pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
20 #else
21 pte = alloc_pages(PGALLOC_GFP, 0);
22 #endif
23 if (pte)
24 pgtable_page_ctor(pte);
25 return pte;
28 void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
30 pgtable_page_dtor(pte);
31 paravirt_release_pte(page_to_pfn(pte));
32 tlb_remove_page(tlb, pte);
35 #if PAGETABLE_LEVELS > 2
36 void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
38 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
39 tlb_remove_page(tlb, virt_to_page(pmd));
42 #if PAGETABLE_LEVELS > 3
43 void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
45 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
46 tlb_remove_page(tlb, virt_to_page(pud));
48 #endif /* PAGETABLE_LEVELS > 3 */
49 #endif /* PAGETABLE_LEVELS > 2 */
51 static inline void pgd_list_add(pgd_t *pgd)
53 struct page *page = virt_to_page(pgd);
55 list_add(&page->lru, &pgd_list);
58 static inline void pgd_list_del(pgd_t *pgd)
60 struct page *page = virt_to_page(pgd);
62 list_del(&page->lru);
65 #define UNSHARED_PTRS_PER_PGD \
66 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
68 static void pgd_ctor(pgd_t *pgd)
70 /* If the pgd points to a shared pagetable level (either the
71 ptes in non-PAE, or shared PMD in PAE), then just copy the
72 references from swapper_pg_dir. */
73 if (PAGETABLE_LEVELS == 2 ||
74 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
75 PAGETABLE_LEVELS == 4) {
76 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
77 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
78 KERNEL_PGD_PTRS);
79 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
80 __pa(swapper_pg_dir) >> PAGE_SHIFT,
81 KERNEL_PGD_BOUNDARY,
82 KERNEL_PGD_PTRS);
85 /* list required to sync kernel mapping updates */
86 if (!SHARED_KERNEL_PMD)
87 pgd_list_add(pgd);
90 static void pgd_dtor(pgd_t *pgd)
92 unsigned long flags; /* can be called from interrupt context */
94 if (SHARED_KERNEL_PMD)
95 return;
97 spin_lock_irqsave(&pgd_lock, flags);
98 pgd_list_del(pgd);
99 spin_unlock_irqrestore(&pgd_lock, flags);
103 * List of all pgd's needed for non-PAE so it can invalidate entries
104 * in both cached and uncached pgd's; not needed for PAE since the
105 * kernel pmd is shared. If PAE were not to share the pmd a similar
106 * tactic would be needed. This is essentially codepath-based locking
107 * against pageattr.c; it is the unique case in which a valid change
108 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
109 * vmalloc faults work because attached pagetables are never freed.
110 * -- wli
113 #ifdef CONFIG_X86_PAE
115 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
116 * updating the top-level pagetable entries to guarantee the
117 * processor notices the update. Since this is expensive, and
118 * all 4 top-level entries are used almost immediately in a
119 * new process's life, we just pre-populate them here.
121 * Also, if we're in a paravirt environment where the kernel pmd is
122 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
123 * and initialize the kernel pmds here.
125 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
127 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
129 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
131 /* Note: almost everything apart from _PAGE_PRESENT is
132 reserved at the pmd (PDPT) level. */
133 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
135 preempt_disable();
137 * According to Intel App note "TLBs, Paging-Structure Caches,
138 * and Their Invalidation", April 2007, document 317080-001,
139 * section 8.1: in PAE mode we explicitly have to flush the
140 * TLB via cr3 if the top-level pgd is changed...
142 if (mm == current->active_mm)
143 write_cr3(read_cr3());
144 preempt_enable();
146 #else /* !CONFIG_X86_PAE */
148 /* No need to prepopulate any pagetable entries in non-PAE modes. */
149 #define PREALLOCATED_PMDS 0
151 #endif /* CONFIG_X86_PAE */
153 static void free_pmds(pmd_t *pmds[])
155 int i;
157 for(i = 0; i < PREALLOCATED_PMDS; i++)
158 if (pmds[i])
159 free_page((unsigned long)pmds[i]);
162 static int preallocate_pmds(pmd_t *pmds[])
164 int i;
165 bool failed = false;
167 for(i = 0; i < PREALLOCATED_PMDS; i++) {
168 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
169 if (pmd == NULL)
170 failed = true;
171 pmds[i] = pmd;
174 if (failed) {
175 free_pmds(pmds);
176 return -ENOMEM;
179 return 0;
183 * Mop up any pmd pages which may still be attached to the pgd.
184 * Normally they will be freed by munmap/exit_mmap, but any pmd we
185 * preallocate which never got a corresponding vma will need to be
186 * freed manually.
188 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
190 int i;
192 for(i = 0; i < PREALLOCATED_PMDS; i++) {
193 pgd_t pgd = pgdp[i];
195 if (pgd_val(pgd) != 0) {
196 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
198 pgdp[i] = native_make_pgd(0);
200 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
201 pmd_free(mm, pmd);
206 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
208 pud_t *pud;
209 unsigned long addr;
210 int i;
212 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
213 return;
215 pud = pud_offset(pgd, 0);
217 for (addr = i = 0; i < PREALLOCATED_PMDS;
218 i++, pud++, addr += PUD_SIZE) {
219 pmd_t *pmd = pmds[i];
221 if (i >= KERNEL_PGD_BOUNDARY)
222 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
223 sizeof(pmd_t) * PTRS_PER_PMD);
225 pud_populate(mm, pud, pmd);
229 pgd_t *pgd_alloc(struct mm_struct *mm)
231 pgd_t *pgd;
232 pmd_t *pmds[PREALLOCATED_PMDS];
233 unsigned long flags;
235 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
237 if (pgd == NULL)
238 goto out;
240 mm->pgd = pgd;
242 if (preallocate_pmds(pmds) != 0)
243 goto out_free_pgd;
245 if (paravirt_pgd_alloc(mm) != 0)
246 goto out_free_pmds;
249 * Make sure that pre-populating the pmds is atomic with
250 * respect to anything walking the pgd_list, so that they
251 * never see a partially populated pgd.
253 spin_lock_irqsave(&pgd_lock, flags);
255 pgd_ctor(pgd);
256 pgd_prepopulate_pmd(mm, pgd, pmds);
258 spin_unlock_irqrestore(&pgd_lock, flags);
260 return pgd;
262 out_free_pmds:
263 free_pmds(pmds);
264 out_free_pgd:
265 free_page((unsigned long)pgd);
266 out:
267 return NULL;
270 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
272 pgd_mop_up_pmds(mm, pgd);
273 pgd_dtor(pgd);
274 paravirt_pgd_free(mm, pgd);
275 free_page((unsigned long)pgd);
278 int ptep_set_access_flags(struct vm_area_struct *vma,
279 unsigned long address, pte_t *ptep,
280 pte_t entry, int dirty)
282 int changed = !pte_same(*ptep, entry);
284 if (changed && dirty) {
285 *ptep = entry;
286 pte_update_defer(vma->vm_mm, address, ptep);
287 flush_tlb_page(vma, address);
290 return changed;
293 int ptep_test_and_clear_young(struct vm_area_struct *vma,
294 unsigned long addr, pte_t *ptep)
296 int ret = 0;
298 if (pte_young(*ptep))
299 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
300 (unsigned long *) &ptep->pte);
302 if (ret)
303 pte_update(vma->vm_mm, addr, ptep);
305 return ret;
308 int ptep_clear_flush_young(struct vm_area_struct *vma,
309 unsigned long address, pte_t *ptep)
311 int young;
313 young = ptep_test_and_clear_young(vma, address, ptep);
314 if (young)
315 flush_tlb_page(vma, address);
317 return young;
321 * reserve_top_address - reserves a hole in the top of kernel address space
322 * @reserve - size of hole to reserve
324 * Can be used to relocate the fixmap area and poke a hole in the top
325 * of kernel address space to make room for a hypervisor.
327 void __init reserve_top_address(unsigned long reserve)
329 #ifdef CONFIG_X86_32
330 BUG_ON(fixmaps_set > 0);
331 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
332 (int)-reserve);
333 __FIXADDR_TOP = -reserve - PAGE_SIZE;
334 __VMALLOC_RESERVE += reserve;
335 #endif
338 int fixmaps_set;
340 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
342 unsigned long address = __fix_to_virt(idx);
344 if (idx >= __end_of_fixed_addresses) {
345 BUG();
346 return;
348 set_pte_vaddr(address, pte);
349 fixmaps_set++;
352 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
353 pgprot_t flags)
355 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));