2 #include <asm/pgalloc.h>
5 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
7 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
10 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
15 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
17 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
20 pgtable_page_ctor(pte
);
25 static inline void pgd_list_add(pgd_t
*pgd
)
27 struct page
*page
= virt_to_page(pgd
);
30 spin_lock_irqsave(&pgd_lock
, flags
);
31 list_add(&page
->lru
, &pgd_list
);
32 spin_unlock_irqrestore(&pgd_lock
, flags
);
35 static inline void pgd_list_del(pgd_t
*pgd
)
37 struct page
*page
= virt_to_page(pgd
);
40 spin_lock_irqsave(&pgd_lock
, flags
);
42 spin_unlock_irqrestore(&pgd_lock
, flags
);
45 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
48 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
53 * Copy kernel pointers in from init.
54 * Could keep a freelist or slab cache of those because the kernel
57 boundary
= pgd_index(__PAGE_OFFSET
);
58 memset(pgd
, 0, boundary
* sizeof(pgd_t
));
59 memcpy(pgd
+ boundary
,
60 init_level4_pgt
+ boundary
,
61 (PTRS_PER_PGD
- boundary
) * sizeof(pgd_t
));
65 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
67 BUG_ON((unsigned long)pgd
& (PAGE_SIZE
-1));
69 free_page((unsigned long)pgd
);
73 * List of all pgd's needed for non-PAE so it can invalidate entries
74 * in both cached and uncached pgd's; not needed for PAE since the
75 * kernel pmd is shared. If PAE were not to share the pmd a similar
76 * tactic would be needed. This is essentially codepath-based locking
77 * against pageattr.c; it is the unique case in which a valid change
78 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
79 * vmalloc faults work because attached pagetables are never freed.
82 static inline void pgd_list_add(pgd_t
*pgd
)
84 struct page
*page
= virt_to_page(pgd
);
86 list_add(&page
->lru
, &pgd_list
);
89 static inline void pgd_list_del(pgd_t
*pgd
)
91 struct page
*page
= virt_to_page(pgd
);
96 #define UNSHARED_PTRS_PER_PGD \
97 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
99 static void pgd_ctor(void *p
)
104 /* Clear usermode parts of PGD */
105 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
107 spin_lock_irqsave(&pgd_lock
, flags
);
109 /* If the pgd points to a shared pagetable level (either the
110 ptes in non-PAE, or shared PMD in PAE), then just copy the
111 references from swapper_pg_dir. */
112 if (PAGETABLE_LEVELS
== 2 ||
113 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
)) {
114 clone_pgd_range(pgd
+ USER_PTRS_PER_PGD
,
115 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
117 paravirt_alloc_pd_clone(__pa(pgd
) >> PAGE_SHIFT
,
118 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
123 /* list required to sync kernel mapping updates */
124 if (!SHARED_KERNEL_PMD
)
127 spin_unlock_irqrestore(&pgd_lock
, flags
);
130 static void pgd_dtor(void *pgd
)
132 unsigned long flags
; /* can be called from interrupt context */
134 if (SHARED_KERNEL_PMD
)
137 spin_lock_irqsave(&pgd_lock
, flags
);
139 spin_unlock_irqrestore(&pgd_lock
, flags
);
142 #ifdef CONFIG_X86_PAE
144 * Mop up any pmd pages which may still be attached to the pgd.
145 * Normally they will be freed by munmap/exit_mmap, but any pmd we
146 * preallocate which never got a corresponding vma will need to be
149 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
153 for(i
= 0; i
< UNSHARED_PTRS_PER_PGD
; i
++) {
156 if (pgd_val(pgd
) != 0) {
157 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
159 pgdp
[i
] = native_make_pgd(0);
161 paravirt_release_pd(pgd_val(pgd
) >> PAGE_SHIFT
);
168 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
169 * updating the top-level pagetable entries to guarantee the
170 * processor notices the update. Since this is expensive, and
171 * all 4 top-level entries are used almost immediately in a
172 * new process's life, we just pre-populate them here.
174 * Also, if we're in a paravirt environment where the kernel pmd is
175 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
176 * and initialize the kernel pmds here.
178 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
184 pud
= pud_offset(pgd
, 0);
185 for (addr
= i
= 0; i
< UNSHARED_PTRS_PER_PGD
;
186 i
++, pud
++, addr
+= PUD_SIZE
) {
187 pmd_t
*pmd
= pmd_alloc_one(mm
, addr
);
190 pgd_mop_up_pmds(mm
, pgd
);
194 if (i
>= USER_PTRS_PER_PGD
)
195 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
196 sizeof(pmd_t
) * PTRS_PER_PMD
);
198 pud_populate(mm
, pud
, pmd
);
203 #else /* !CONFIG_X86_PAE */
204 /* No need to prepopulate any pagetable entries in non-PAE modes. */
205 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
210 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgd
)
213 #endif /* CONFIG_X86_PAE */
215 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
217 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
219 /* so that alloc_pd can use it */
224 if (pgd
&& !pgd_prepopulate_pmd(mm
, pgd
)) {
226 free_page((unsigned long)pgd
);
233 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
235 pgd_mop_up_pmds(mm
, pgd
);
237 free_page((unsigned long)pgd
);