2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/smp.h>
11 #include <linux/highmem.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/spinlock.h>
16 #include <asm/system.h>
17 #include <asm/pgtable.h>
18 #include <asm/pgalloc.h>
19 #include <asm/fixmap.h>
22 #include <asm/tlbflush.h>
26 int total
= 0, reserved
= 0;
27 int shared
= 0, cached
= 0;
34 printk(KERN_INFO
"Mem-info:\n");
36 printk(KERN_INFO
"Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
37 for_each_online_pgdat(pgdat
) {
38 pgdat_resize_lock(pgdat
, &flags
);
39 for (i
= 0; i
< pgdat
->node_spanned_pages
; ++i
) {
40 page
= pgdat_page_nr(pgdat
, i
);
42 if (PageHighMem(page
))
44 if (PageReserved(page
))
46 else if (PageSwapCache(page
))
48 else if (page_count(page
))
49 shared
+= page_count(page
) - 1;
51 pgdat_resize_unlock(pgdat
, &flags
);
53 printk(KERN_INFO
"%d pages of RAM\n", total
);
54 printk(KERN_INFO
"%d pages of HIGHMEM\n", highmem
);
55 printk(KERN_INFO
"%d reserved pages\n", reserved
);
56 printk(KERN_INFO
"%d pages shared\n", shared
);
57 printk(KERN_INFO
"%d pages swap cached\n", cached
);
59 printk(KERN_INFO
"%lu pages dirty\n", global_page_state(NR_FILE_DIRTY
));
60 printk(KERN_INFO
"%lu pages writeback\n",
61 global_page_state(NR_WRITEBACK
));
62 printk(KERN_INFO
"%lu pages mapped\n", global_page_state(NR_FILE_MAPPED
));
63 printk(KERN_INFO
"%lu pages slab\n",
64 global_page_state(NR_SLAB_RECLAIMABLE
) +
65 global_page_state(NR_SLAB_UNRECLAIMABLE
));
66 printk(KERN_INFO
"%lu pages pagetables\n",
67 global_page_state(NR_PAGETABLE
));
71 * Associate a virtual page frame with a given physical page frame
72 * and protection flags for that frame.
74 static void set_pte_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
81 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
86 pud
= pud_offset(pgd
, vaddr
);
91 pmd
= pmd_offset(pud
, vaddr
);
96 pte
= pte_offset_kernel(pmd
, vaddr
);
97 /* <pfn,flags> stored as-is, to permit clearing entries */
98 set_pte(pte
, pfn_pte(pfn
, flags
));
101 * It's enough to flush this one mapping.
102 * (PGE mappings get flushed as well)
104 __flush_tlb_one(vaddr
);
108 * Associate a large virtual page frame with a given physical page frame
109 * and protection flags for that frame. pfn is for the base of the page,
110 * vaddr is what the page gets mapped to - both must be properly aligned.
111 * The pmd must already be instantiated. Assumes PAE mode.
113 void set_pmd_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
119 if (vaddr
& (PMD_SIZE
-1)) { /* vaddr is misaligned */
120 printk(KERN_WARNING
"set_pmd_pfn: vaddr misaligned\n");
123 if (pfn
& (PTRS_PER_PTE
-1)) { /* pfn is misaligned */
124 printk(KERN_WARNING
"set_pmd_pfn: pfn misaligned\n");
127 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
128 if (pgd_none(*pgd
)) {
129 printk(KERN_WARNING
"set_pmd_pfn: pgd_none\n");
132 pud
= pud_offset(pgd
, vaddr
);
133 pmd
= pmd_offset(pud
, vaddr
);
134 set_pmd(pmd
, pfn_pmd(pfn
, flags
));
136 * It's enough to flush this one mapping.
137 * (PGE mappings get flushed as well)
139 __flush_tlb_one(vaddr
);
142 void __set_fixmap (enum fixed_addresses idx
, unsigned long phys
, pgprot_t flags
)
144 unsigned long address
= __fix_to_virt(idx
);
146 if (idx
>= __end_of_fixed_addresses
) {
150 set_pte_pfn(address
, phys
>> PAGE_SHIFT
, flags
);
153 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
155 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
158 struct page
*pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
162 #ifdef CONFIG_HIGHPTE
163 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
165 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
170 void pmd_ctor(void *pmd
, kmem_cache_t
*cache
, unsigned long flags
)
172 memset(pmd
, 0, PTRS_PER_PMD
*sizeof(pmd_t
));
176 * List of all pgd's needed for non-PAE so it can invalidate entries
177 * in both cached and uncached pgd's; not needed for PAE since the
178 * kernel pmd is shared. If PAE were not to share the pmd a similar
179 * tactic would be needed. This is essentially codepath-based locking
180 * against pageattr.c; it is the unique case in which a valid change
181 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
182 * vmalloc faults work because attached pagetables are never freed.
183 * The locking scheme was chosen on the basis of manfred's
184 * recommendations and having no core impact whatsoever.
187 DEFINE_SPINLOCK(pgd_lock
);
188 struct page
*pgd_list
;
190 static inline void pgd_list_add(pgd_t
*pgd
)
192 struct page
*page
= virt_to_page(pgd
);
193 page
->index
= (unsigned long)pgd_list
;
195 set_page_private(pgd_list
, (unsigned long)&page
->index
);
197 set_page_private(page
, (unsigned long)&pgd_list
);
200 static inline void pgd_list_del(pgd_t
*pgd
)
202 struct page
*next
, **pprev
, *page
= virt_to_page(pgd
);
203 next
= (struct page
*)page
->index
;
204 pprev
= (struct page
**)page_private(page
);
207 set_page_private(next
, (unsigned long)pprev
);
210 void pgd_ctor(void *pgd
, kmem_cache_t
*cache
, unsigned long unused
)
214 if (PTRS_PER_PMD
== 1) {
215 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
216 spin_lock_irqsave(&pgd_lock
, flags
);
219 clone_pgd_range((pgd_t
*)pgd
+ USER_PTRS_PER_PGD
,
220 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
222 if (PTRS_PER_PMD
> 1)
226 spin_unlock_irqrestore(&pgd_lock
, flags
);
229 /* never called when PTRS_PER_PMD > 1 */
230 void pgd_dtor(void *pgd
, kmem_cache_t
*cache
, unsigned long unused
)
232 unsigned long flags
; /* can be called from interrupt context */
234 spin_lock_irqsave(&pgd_lock
, flags
);
236 spin_unlock_irqrestore(&pgd_lock
, flags
);
239 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
242 pgd_t
*pgd
= kmem_cache_alloc(pgd_cache
, GFP_KERNEL
);
244 if (PTRS_PER_PMD
== 1 || !pgd
)
247 for (i
= 0; i
< USER_PTRS_PER_PGD
; ++i
) {
248 pmd_t
*pmd
= kmem_cache_alloc(pmd_cache
, GFP_KERNEL
);
251 set_pgd(&pgd
[i
], __pgd(1 + __pa(pmd
)));
256 for (i
--; i
>= 0; i
--)
257 kmem_cache_free(pmd_cache
, (void *)__va(pgd_val(pgd
[i
])-1));
258 kmem_cache_free(pgd_cache
, pgd
);
262 void pgd_free(pgd_t
*pgd
)
266 /* in the PAE case user pgd entries are overwritten before usage */
267 if (PTRS_PER_PMD
> 1)
268 for (i
= 0; i
< USER_PTRS_PER_PGD
; ++i
)
269 kmem_cache_free(pmd_cache
, (void *)__va(pgd_val(pgd
[i
])-1));
270 /* in the non-PAE case, free_pgtables() clears user pgd entries */
271 kmem_cache_free(pgd_cache
, pgd
);