2 #include <asm/pgalloc.h>
5 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
7 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
10 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
15 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
17 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
20 pgtable_page_ctor(pte
);
24 void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
26 pgtable_page_dtor(pte
);
27 paravirt_release_pt(page_to_pfn(pte
));
28 tlb_remove_page(tlb
, pte
);
31 #if PAGETABLE_LEVELS > 2
32 void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
34 paravirt_release_pd(__pa(pmd
) >> PAGE_SHIFT
);
35 tlb_remove_page(tlb
, virt_to_page(pmd
));
38 #if PAGETABLE_LEVELS > 3
39 void __pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
41 tlb_remove_page(tlb
, virt_to_page(pud
));
43 #endif /* PAGETABLE_LEVELS > 3 */
44 #endif /* PAGETABLE_LEVELS > 2 */
47 static inline void pgd_list_add(pgd_t
*pgd
)
49 struct page
*page
= virt_to_page(pgd
);
52 spin_lock_irqsave(&pgd_lock
, flags
);
53 list_add(&page
->lru
, &pgd_list
);
54 spin_unlock_irqrestore(&pgd_lock
, flags
);
57 static inline void pgd_list_del(pgd_t
*pgd
)
59 struct page
*page
= virt_to_page(pgd
);
62 spin_lock_irqsave(&pgd_lock
, flags
);
64 spin_unlock_irqrestore(&pgd_lock
, flags
);
67 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
70 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
75 * Copy kernel pointers in from init.
76 * Could keep a freelist or slab cache of those because the kernel
79 boundary
= pgd_index(__PAGE_OFFSET
);
80 memset(pgd
, 0, boundary
* sizeof(pgd_t
));
81 memcpy(pgd
+ boundary
,
82 init_level4_pgt
+ boundary
,
83 (PTRS_PER_PGD
- boundary
) * sizeof(pgd_t
));
87 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
89 BUG_ON((unsigned long)pgd
& (PAGE_SIZE
-1));
91 free_page((unsigned long)pgd
);
95 * List of all pgd's needed for non-PAE so it can invalidate entries
96 * in both cached and uncached pgd's; not needed for PAE since the
97 * kernel pmd is shared. If PAE were not to share the pmd a similar
98 * tactic would be needed. This is essentially codepath-based locking
99 * against pageattr.c; it is the unique case in which a valid change
100 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
101 * vmalloc faults work because attached pagetables are never freed.
104 static inline void pgd_list_add(pgd_t
*pgd
)
106 struct page
*page
= virt_to_page(pgd
);
108 list_add(&page
->lru
, &pgd_list
);
111 static inline void pgd_list_del(pgd_t
*pgd
)
113 struct page
*page
= virt_to_page(pgd
);
115 list_del(&page
->lru
);
118 #define UNSHARED_PTRS_PER_PGD \
119 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
121 static void pgd_ctor(void *p
)
126 /* Clear usermode parts of PGD */
127 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
129 spin_lock_irqsave(&pgd_lock
, flags
);
131 /* If the pgd points to a shared pagetable level (either the
132 ptes in non-PAE, or shared PMD in PAE), then just copy the
133 references from swapper_pg_dir. */
134 if (PAGETABLE_LEVELS
== 2 ||
135 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
)) {
136 clone_pgd_range(pgd
+ USER_PTRS_PER_PGD
,
137 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
139 paravirt_alloc_pd_clone(__pa(pgd
) >> PAGE_SHIFT
,
140 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
145 /* list required to sync kernel mapping updates */
146 if (!SHARED_KERNEL_PMD
)
149 spin_unlock_irqrestore(&pgd_lock
, flags
);
152 static void pgd_dtor(void *pgd
)
154 unsigned long flags
; /* can be called from interrupt context */
156 if (SHARED_KERNEL_PMD
)
159 spin_lock_irqsave(&pgd_lock
, flags
);
161 spin_unlock_irqrestore(&pgd_lock
, flags
);
164 #ifdef CONFIG_X86_PAE
166 * Mop up any pmd pages which may still be attached to the pgd.
167 * Normally they will be freed by munmap/exit_mmap, but any pmd we
168 * preallocate which never got a corresponding vma will need to be
171 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
175 for(i
= 0; i
< UNSHARED_PTRS_PER_PGD
; i
++) {
178 if (pgd_val(pgd
) != 0) {
179 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
181 pgdp
[i
] = native_make_pgd(0);
183 paravirt_release_pd(pgd_val(pgd
) >> PAGE_SHIFT
);
190 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
191 * updating the top-level pagetable entries to guarantee the
192 * processor notices the update. Since this is expensive, and
193 * all 4 top-level entries are used almost immediately in a
194 * new process's life, we just pre-populate them here.
196 * Also, if we're in a paravirt environment where the kernel pmd is
197 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
198 * and initialize the kernel pmds here.
200 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
206 pud
= pud_offset(pgd
, 0);
207 for (addr
= i
= 0; i
< UNSHARED_PTRS_PER_PGD
;
208 i
++, pud
++, addr
+= PUD_SIZE
) {
209 pmd_t
*pmd
= pmd_alloc_one(mm
, addr
);
212 pgd_mop_up_pmds(mm
, pgd
);
216 if (i
>= USER_PTRS_PER_PGD
)
217 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
218 sizeof(pmd_t
) * PTRS_PER_PMD
);
220 pud_populate(mm
, pud
, pmd
);
226 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
228 paravirt_alloc_pd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
230 /* Note: almost everything apart from _PAGE_PRESENT is
231 reserved at the pmd (PDPT) level. */
232 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
235 * According to Intel App note "TLBs, Paging-Structure Caches,
236 * and Their Invalidation", April 2007, document 317080-001,
237 * section 8.1: in PAE mode we explicitly have to flush the
238 * TLB via cr3 if the top-level pgd is changed...
240 if (mm
== current
->active_mm
)
241 write_cr3(read_cr3());
243 #else /* !CONFIG_X86_PAE */
244 /* No need to prepopulate any pagetable entries in non-PAE modes. */
245 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
250 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgd
)
253 #endif /* CONFIG_X86_PAE */
255 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
257 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
259 /* so that alloc_pd can use it */
264 if (pgd
&& !pgd_prepopulate_pmd(mm
, pgd
)) {
266 free_page((unsigned long)pgd
);
273 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
275 pgd_mop_up_pmds(mm
, pgd
);
277 free_page((unsigned long)pgd
);