x86: add common mm/pgtable.c
[linux-2.6/openmoko-kernel/knife-kernel.git] / arch / x86 / mm / pgtable.c
blobd526b46ae1883aa6f13286f6c70b4e964b4da06a
1 #include <linux/mm.h>
2 #include <asm/pgalloc.h>
3 #include <asm/tlb.h>
5 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
7 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
10 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
12 struct page *pte;
14 #ifdef CONFIG_HIGHPTE
15 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
16 #else
17 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
18 #endif
19 if (pte)
20 pgtable_page_ctor(pte);
21 return pte;
24 #ifdef CONFIG_X86_64
25 static inline void pgd_list_add(pgd_t *pgd)
27 struct page *page = virt_to_page(pgd);
28 unsigned long flags;
30 spin_lock_irqsave(&pgd_lock, flags);
31 list_add(&page->lru, &pgd_list);
32 spin_unlock_irqrestore(&pgd_lock, flags);
35 static inline void pgd_list_del(pgd_t *pgd)
37 struct page *page = virt_to_page(pgd);
38 unsigned long flags;
40 spin_lock_irqsave(&pgd_lock, flags);
41 list_del(&page->lru);
42 spin_unlock_irqrestore(&pgd_lock, flags);
45 pgd_t *pgd_alloc(struct mm_struct *mm)
47 unsigned boundary;
48 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
49 if (!pgd)
50 return NULL;
51 pgd_list_add(pgd);
53 * Copy kernel pointers in from init.
54 * Could keep a freelist or slab cache of those because the kernel
55 * part never changes.
57 boundary = pgd_index(__PAGE_OFFSET);
58 memset(pgd, 0, boundary * sizeof(pgd_t));
59 memcpy(pgd + boundary,
60 init_level4_pgt + boundary,
61 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
62 return pgd;
65 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
67 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
68 pgd_list_del(pgd);
69 free_page((unsigned long)pgd);
71 #else
73 * List of all pgd's needed for non-PAE so it can invalidate entries
74 * in both cached and uncached pgd's; not needed for PAE since the
75 * kernel pmd is shared. If PAE were not to share the pmd a similar
76 * tactic would be needed. This is essentially codepath-based locking
77 * against pageattr.c; it is the unique case in which a valid change
78 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
79 * vmalloc faults work because attached pagetables are never freed.
80 * -- wli
82 static inline void pgd_list_add(pgd_t *pgd)
84 struct page *page = virt_to_page(pgd);
86 list_add(&page->lru, &pgd_list);
89 static inline void pgd_list_del(pgd_t *pgd)
91 struct page *page = virt_to_page(pgd);
93 list_del(&page->lru);
96 #define UNSHARED_PTRS_PER_PGD \
97 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
99 static void pgd_ctor(void *p)
101 pgd_t *pgd = p;
102 unsigned long flags;
104 /* Clear usermode parts of PGD */
105 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
107 spin_lock_irqsave(&pgd_lock, flags);
109 /* If the pgd points to a shared pagetable level (either the
110 ptes in non-PAE, or shared PMD in PAE), then just copy the
111 references from swapper_pg_dir. */
112 if (PAGETABLE_LEVELS == 2 ||
113 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
114 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
115 swapper_pg_dir + USER_PTRS_PER_PGD,
116 KERNEL_PGD_PTRS);
117 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
118 __pa(swapper_pg_dir) >> PAGE_SHIFT,
119 USER_PTRS_PER_PGD,
120 KERNEL_PGD_PTRS);
123 /* list required to sync kernel mapping updates */
124 if (!SHARED_KERNEL_PMD)
125 pgd_list_add(pgd);
127 spin_unlock_irqrestore(&pgd_lock, flags);
130 static void pgd_dtor(void *pgd)
132 unsigned long flags; /* can be called from interrupt context */
134 if (SHARED_KERNEL_PMD)
135 return;
137 spin_lock_irqsave(&pgd_lock, flags);
138 pgd_list_del(pgd);
139 spin_unlock_irqrestore(&pgd_lock, flags);
142 #ifdef CONFIG_X86_PAE
144 * Mop up any pmd pages which may still be attached to the pgd.
145 * Normally they will be freed by munmap/exit_mmap, but any pmd we
146 * preallocate which never got a corresponding vma will need to be
147 * freed manually.
149 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
151 int i;
153 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
154 pgd_t pgd = pgdp[i];
156 if (pgd_val(pgd) != 0) {
157 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
159 pgdp[i] = native_make_pgd(0);
161 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
162 pmd_free(mm, pmd);
168 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
169 * updating the top-level pagetable entries to guarantee the
170 * processor notices the update. Since this is expensive, and
171 * all 4 top-level entries are used almost immediately in a
172 * new process's life, we just pre-populate them here.
174 * Also, if we're in a paravirt environment where the kernel pmd is
175 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
176 * and initialize the kernel pmds here.
178 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
180 pud_t *pud;
181 unsigned long addr;
182 int i;
184 pud = pud_offset(pgd, 0);
185 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
186 i++, pud++, addr += PUD_SIZE) {
187 pmd_t *pmd = pmd_alloc_one(mm, addr);
189 if (!pmd) {
190 pgd_mop_up_pmds(mm, pgd);
191 return 0;
194 if (i >= USER_PTRS_PER_PGD)
195 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
196 sizeof(pmd_t) * PTRS_PER_PMD);
198 pud_populate(mm, pud, pmd);
201 return 1;
203 #else /* !CONFIG_X86_PAE */
204 /* No need to prepopulate any pagetable entries in non-PAE modes. */
205 static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
207 return 1;
210 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
213 #endif /* CONFIG_X86_PAE */
215 pgd_t *pgd_alloc(struct mm_struct *mm)
217 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
219 /* so that alloc_pd can use it */
220 mm->pgd = pgd;
221 if (pgd)
222 pgd_ctor(pgd);
224 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
225 pgd_dtor(pgd);
226 free_page((unsigned long)pgd);
227 pgd = NULL;
230 return pgd;
233 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
235 pgd_mop_up_pmds(mm, pgd);
236 pgd_dtor(pgd);
237 free_page((unsigned long)pgd);
239 #endif