initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / i386 / mm / pgtable.c
blob137d18db72ff3cf163bc408140eb9497c594bd03
1 /*
2 * linux/arch/i386/mm/pgtable.c
3 */
5 #include <linux/config.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/fixmap.h>
21 #include <asm/e820.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
25 void show_mem(void)
27 int total = 0, reserved = 0;
28 int shared = 0, cached = 0;
29 int highmem = 0;
30 struct page *page;
31 pg_data_t *pgdat;
32 unsigned long i;
34 printk("Mem-info:\n");
35 show_free_areas();
36 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
37 for_each_pgdat(pgdat) {
38 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
39 page = pgdat->node_mem_map + i;
40 total++;
41 if (PageHighMem(page))
42 highmem++;
43 if (PageReserved(page))
44 reserved++;
45 else if (PageSwapCache(page))
46 cached++;
47 else if (page_count(page))
48 shared += page_count(page) - 1;
51 printk("%d pages of RAM\n", total);
52 printk("%d pages of HIGHMEM\n",highmem);
53 printk("%d reserved pages\n",reserved);
54 printk("%d pages shared\n",shared);
55 printk("%d pages swap cached\n",cached);
59 * Associate a virtual page frame with a given physical page frame
60 * and protection flags for that frame.
61 */
62 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
64 pgd_t *pgd;
65 pmd_t *pmd;
66 pte_t *pte;
68 pgd = swapper_pg_dir + pgd_index(vaddr);
69 if (pgd_none(*pgd)) {
70 BUG();
71 return;
73 pmd = pmd_offset(pgd, vaddr);
74 if (pmd_none(*pmd)) {
75 BUG();
76 return;
78 pte = pte_offset_kernel(pmd, vaddr);
79 /* <pfn,flags> stored as-is, to permit clearing entries */
80 set_pte(pte, pfn_pte(pfn, flags));
83 * It's enough to flush this one mapping.
84 * (PGE mappings get flushed as well)
86 __flush_tlb_one(vaddr);
90 * Associate a large virtual page frame with a given physical page frame
91 * and protection flags for that frame. pfn is for the base of the page,
92 * vaddr is what the page gets mapped to - both must be properly aligned.
93 * The pmd must already be instantiated. Assumes PAE mode.
94 */
95 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97 pgd_t *pgd;
98 pmd_t *pmd;
100 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
101 printk ("set_pmd_pfn: vaddr misaligned\n");
102 return; /* BUG(); */
104 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
105 printk ("set_pmd_pfn: pfn misaligned\n");
106 return; /* BUG(); */
108 pgd = swapper_pg_dir + pgd_index(vaddr);
109 if (pgd_none(*pgd)) {
110 printk ("set_pmd_pfn: pgd_none\n");
111 return; /* BUG(); */
113 pmd = pmd_offset(pgd, vaddr);
114 set_pmd(pmd, pfn_pmd(pfn, flags));
116 * It's enough to flush this one mapping.
117 * (PGE mappings get flushed as well)
119 __flush_tlb_one(vaddr);
122 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
124 unsigned long address = __fix_to_virt(idx);
126 if (idx >= __end_of_fixed_addresses) {
127 BUG();
128 return;
130 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
133 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
135 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
136 if (pte)
137 clear_page(pte);
138 return pte;
141 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
143 struct page *pte;
145 #ifdef CONFIG_HIGHPTE
146 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
147 #else
148 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
149 #endif
150 if (pte)
151 clear_highpage(pte);
152 return pte;
155 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
157 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
161 * List of all pgd's needed for non-PAE so it can invalidate entries
162 * in both cached and uncached pgd's; not needed for PAE since the
163 * kernel pmd is shared. If PAE were not to share the pmd a similar
164 * tactic would be needed. This is essentially codepath-based locking
165 * against pageattr.c; it is the unique case in which a valid change
166 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
167 * vmalloc faults work because attached pagetables are never freed.
168 * If the locking proves to be non-performant, a ticketing scheme with
169 * checks at dup_mmap(), exec(), and other mmlist addition points
170 * could be used. The locking scheme was chosen on the basis of
171 * manfred's recommendations and having no core impact whatsoever.
172 * -- wli
174 spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED;
175 struct page *pgd_list;
177 static inline void pgd_list_add(pgd_t *pgd)
179 struct page *page = virt_to_page(pgd);
180 page->index = (unsigned long)pgd_list;
181 if (pgd_list)
182 pgd_list->private = (unsigned long)&page->index;
183 pgd_list = page;
184 page->private = (unsigned long)&pgd_list;
187 static inline void pgd_list_del(pgd_t *pgd)
189 struct page *next, **pprev, *page = virt_to_page(pgd);
190 next = (struct page *)page->index;
191 pprev = (struct page **)page->private;
192 *pprev = next;
193 if (next)
194 next->private = (unsigned long)pprev;
197 void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
199 unsigned long flags;
201 if (PTRS_PER_PMD == 1)
202 spin_lock_irqsave(&pgd_lock, flags);
204 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
205 swapper_pg_dir + USER_PTRS_PER_PGD,
206 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
208 if (PTRS_PER_PMD > 1)
209 return;
211 pgd_list_add(pgd);
212 spin_unlock_irqrestore(&pgd_lock, flags);
213 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
216 /* never called when PTRS_PER_PMD > 1 */
217 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
219 unsigned long flags; /* can be called from interrupt context */
221 spin_lock_irqsave(&pgd_lock, flags);
222 pgd_list_del(pgd);
223 spin_unlock_irqrestore(&pgd_lock, flags);
226 pgd_t *pgd_alloc(struct mm_struct *mm)
228 int i;
229 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
231 if (PTRS_PER_PMD == 1 || !pgd)
232 return pgd;
234 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
235 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
236 if (!pmd)
237 goto out_oom;
238 set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd))));
240 return pgd;
242 out_oom:
243 for (i--; i >= 0; i--)
244 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
245 kmem_cache_free(pgd_cache, pgd);
246 return NULL;
249 void pgd_free(pgd_t *pgd)
251 int i;
253 /* in the PAE case user pgd entries are overwritten before usage */
254 if (PTRS_PER_PMD > 1)
255 for (i = 0; i < USER_PTRS_PER_PGD; ++i)
256 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
257 /* in the non-PAE case, clear_page_tables() clears user pgd entries */
258 kmem_cache_free(pgd_cache, pgd);