2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/fixmap.h>
25 #include <asm/tlbflush.h>
29 int total
= 0, reserved
= 0;
30 int shared
= 0, cached
= 0;
37 printk(KERN_INFO
"Mem-info:\n");
39 printk(KERN_INFO
"Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
40 for_each_online_pgdat(pgdat
) {
41 pgdat_resize_lock(pgdat
, &flags
);
42 for (i
= 0; i
< pgdat
->node_spanned_pages
; ++i
) {
43 if (unlikely(i
% MAX_ORDER_NR_PAGES
== 0))
45 page
= pgdat_page_nr(pgdat
, i
);
47 if (PageHighMem(page
))
49 if (PageReserved(page
))
51 else if (PageSwapCache(page
))
53 else if (page_count(page
))
54 shared
+= page_count(page
) - 1;
56 pgdat_resize_unlock(pgdat
, &flags
);
58 printk(KERN_INFO
"%d pages of RAM\n", total
);
59 printk(KERN_INFO
"%d pages of HIGHMEM\n", highmem
);
60 printk(KERN_INFO
"%d reserved pages\n", reserved
);
61 printk(KERN_INFO
"%d pages shared\n", shared
);
62 printk(KERN_INFO
"%d pages swap cached\n", cached
);
64 printk(KERN_INFO
"%lu pages dirty\n", global_page_state(NR_FILE_DIRTY
));
65 printk(KERN_INFO
"%lu pages writeback\n",
66 global_page_state(NR_WRITEBACK
));
67 printk(KERN_INFO
"%lu pages mapped\n", global_page_state(NR_FILE_MAPPED
));
68 printk(KERN_INFO
"%lu pages slab\n",
69 global_page_state(NR_SLAB_RECLAIMABLE
) +
70 global_page_state(NR_SLAB_UNRECLAIMABLE
));
71 printk(KERN_INFO
"%lu pages pagetables\n",
72 global_page_state(NR_PAGETABLE
));
76 * Associate a virtual page frame with a given physical page frame
77 * and protection flags for that frame.
79 static void set_pte_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
86 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
91 pud
= pud_offset(pgd
, vaddr
);
96 pmd
= pmd_offset(pud
, vaddr
);
101 pte
= pte_offset_kernel(pmd
, vaddr
);
102 if (pgprot_val(flags
))
103 set_pte_present(&init_mm
, vaddr
, pte
, pfn_pte(pfn
, flags
));
105 pte_clear(&init_mm
, vaddr
, pte
);
108 * It's enough to flush this one mapping.
109 * (PGE mappings get flushed as well)
111 __flush_tlb_one(vaddr
);
115 * Associate a large virtual page frame with a given physical page frame
116 * and protection flags for that frame. pfn is for the base of the page,
117 * vaddr is what the page gets mapped to - both must be properly aligned.
118 * The pmd must already be instantiated. Assumes PAE mode.
120 void set_pmd_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
126 if (vaddr
& (PMD_SIZE
-1)) { /* vaddr is misaligned */
127 printk(KERN_WARNING
"set_pmd_pfn: vaddr misaligned\n");
130 if (pfn
& (PTRS_PER_PTE
-1)) { /* pfn is misaligned */
131 printk(KERN_WARNING
"set_pmd_pfn: pfn misaligned\n");
134 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
135 if (pgd_none(*pgd
)) {
136 printk(KERN_WARNING
"set_pmd_pfn: pgd_none\n");
139 pud
= pud_offset(pgd
, vaddr
);
140 pmd
= pmd_offset(pud
, vaddr
);
141 set_pmd(pmd
, pfn_pmd(pfn
, flags
));
143 * It's enough to flush this one mapping.
144 * (PGE mappings get flushed as well)
146 __flush_tlb_one(vaddr
);
150 unsigned long __FIXADDR_TOP
= 0xfffff000;
151 EXPORT_SYMBOL(__FIXADDR_TOP
);
153 void __set_fixmap (enum fixed_addresses idx
, unsigned long phys
, pgprot_t flags
)
155 unsigned long address
= __fix_to_virt(idx
);
157 if (idx
>= __end_of_fixed_addresses
) {
161 set_pte_pfn(address
, phys
>> PAGE_SHIFT
, flags
);
166 * reserve_top_address - reserves a hole in the top of kernel address space
167 * @reserve - size of hole to reserve
169 * Can be used to relocate the fixmap area and poke a hole in the top
170 * of kernel address space to make room for a hypervisor.
172 void reserve_top_address(unsigned long reserve
)
175 printk(KERN_INFO
"Reserving virtual address space above 0x%08x\n",
177 __FIXADDR_TOP
= -reserve
- PAGE_SIZE
;
178 __VMALLOC_RESERVE
+= reserve
;
181 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
183 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
186 struct page
*pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
190 #ifdef CONFIG_HIGHPTE
191 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
193 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
198 void pmd_ctor(struct kmem_cache
*cache
, void *pmd
)
200 memset(pmd
, 0, PTRS_PER_PMD
*sizeof(pmd_t
));
204 * List of all pgd's needed for non-PAE so it can invalidate entries
205 * in both cached and uncached pgd's; not needed for PAE since the
206 * kernel pmd is shared. If PAE were not to share the pmd a similar
207 * tactic would be needed. This is essentially codepath-based locking
208 * against pageattr.c; it is the unique case in which a valid change
209 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
210 * vmalloc faults work because attached pagetables are never freed.
213 DEFINE_SPINLOCK(pgd_lock
);
214 struct page
*pgd_list
;
216 static inline void pgd_list_add(pgd_t
*pgd
)
218 struct page
*page
= virt_to_page(pgd
);
219 page
->index
= (unsigned long)pgd_list
;
221 set_page_private(pgd_list
, (unsigned long)&page
->index
);
223 set_page_private(page
, (unsigned long)&pgd_list
);
226 static inline void pgd_list_del(pgd_t
*pgd
)
228 struct page
*next
, **pprev
, *page
= virt_to_page(pgd
);
229 next
= (struct page
*)page
->index
;
230 pprev
= (struct page
**)page_private(page
);
233 set_page_private(next
, (unsigned long)pprev
);
238 #if (PTRS_PER_PMD == 1)
239 /* Non-PAE pgd constructor */
240 static void pgd_ctor(void *pgd
)
244 /* !PAE, no pagetable sharing */
245 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
247 spin_lock_irqsave(&pgd_lock
, flags
);
249 /* must happen under lock */
250 clone_pgd_range((pgd_t
*)pgd
+ USER_PTRS_PER_PGD
,
251 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
253 paravirt_alloc_pd_clone(__pa(pgd
) >> PAGE_SHIFT
,
254 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
258 spin_unlock_irqrestore(&pgd_lock
, flags
);
260 #else /* PTRS_PER_PMD > 1 */
261 /* PAE pgd constructor */
262 static void pgd_ctor(void *pgd
)
264 /* PAE, kernel PMD may be shared */
266 if (SHARED_KERNEL_PMD
) {
267 clone_pgd_range((pgd_t
*)pgd
+ USER_PTRS_PER_PGD
,
268 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
273 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
274 spin_lock_irqsave(&pgd_lock
, flags
);
276 spin_unlock_irqrestore(&pgd_lock
, flags
);
279 #endif /* PTRS_PER_PMD */
281 static void pgd_dtor(void *pgd
)
283 unsigned long flags
; /* can be called from interrupt context */
285 if (SHARED_KERNEL_PMD
)
288 paravirt_release_pd(__pa(pgd
) >> PAGE_SHIFT
);
289 spin_lock_irqsave(&pgd_lock
, flags
);
291 spin_unlock_irqrestore(&pgd_lock
, flags
);
294 #define UNSHARED_PTRS_PER_PGD \
295 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
297 /* If we allocate a pmd for part of the kernel address space, then
298 make sure its initialized with the appropriate kernel mappings.
299 Otherwise use a cached zeroed pmd. */
300 static pmd_t
*pmd_cache_alloc(int idx
)
304 if (idx
>= USER_PTRS_PER_PGD
) {
305 pmd
= (pmd_t
*)__get_free_page(GFP_KERNEL
);
309 (void *)pgd_page_vaddr(swapper_pg_dir
[idx
]),
310 sizeof(pmd_t
) * PTRS_PER_PMD
);
312 pmd
= kmem_cache_alloc(pmd_cache
, GFP_KERNEL
);
317 static void pmd_cache_free(pmd_t
*pmd
, int idx
)
319 if (idx
>= USER_PTRS_PER_PGD
)
320 free_page((unsigned long)pmd
);
322 kmem_cache_free(pmd_cache
, pmd
);
325 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
328 pgd_t
*pgd
= quicklist_alloc(0, GFP_KERNEL
, pgd_ctor
);
330 if (PTRS_PER_PMD
== 1 || !pgd
)
333 for (i
= 0; i
< UNSHARED_PTRS_PER_PGD
; ++i
) {
334 pmd_t
*pmd
= pmd_cache_alloc(i
);
339 paravirt_alloc_pd(__pa(pmd
) >> PAGE_SHIFT
);
340 set_pgd(&pgd
[i
], __pgd(1 + __pa(pmd
)));
345 for (i
--; i
>= 0; i
--) {
346 pgd_t pgdent
= pgd
[i
];
347 void* pmd
= (void *)__va(pgd_val(pgdent
)-1);
348 paravirt_release_pd(__pa(pmd
) >> PAGE_SHIFT
);
349 pmd_cache_free(pmd
, i
);
351 quicklist_free(0, pgd_dtor
, pgd
);
355 void pgd_free(pgd_t
*pgd
)
359 /* in the PAE case user pgd entries are overwritten before usage */
360 if (PTRS_PER_PMD
> 1)
361 for (i
= 0; i
< UNSHARED_PTRS_PER_PGD
; ++i
) {
362 pgd_t pgdent
= pgd
[i
];
363 void* pmd
= (void *)__va(pgd_val(pgdent
)-1);
364 paravirt_release_pd(__pa(pmd
) >> PAGE_SHIFT
);
365 pmd_cache_free(pmd
, i
);
367 /* in the non-PAE case, free_pgtables() clears user pgd entries */
368 quicklist_free(0, pgd_dtor
, pgd
);
371 void check_pgt_cache(void)
373 quicklist_trim(0, pgd_dtor
, 25, 16);