[PATCH] zoned vm counters: conversion of nr_slab to per zone counter
[linux-2.6.22.y-op.git] / arch / i386 / mm / pgtable.c
blob73ac3599a0eac5d207caea768be6fd1e04f4fafd
1 /*
2 * linux/arch/i386/mm/pgtable.c
3 */
5 #include <linux/config.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/fixmap.h>
21 #include <asm/e820.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
25 void show_mem(void)
27 int total = 0, reserved = 0;
28 int shared = 0, cached = 0;
29 int highmem = 0;
30 struct page *page;
31 pg_data_t *pgdat;
32 unsigned long i;
33 struct page_state ps;
34 unsigned long flags;
36 printk(KERN_INFO "Mem-info:\n");
37 show_free_areas();
38 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
39 for_each_online_pgdat(pgdat) {
40 pgdat_resize_lock(pgdat, &flags);
41 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
42 page = pgdat_page_nr(pgdat, i);
43 total++;
44 if (PageHighMem(page))
45 highmem++;
46 if (PageReserved(page))
47 reserved++;
48 else if (PageSwapCache(page))
49 cached++;
50 else if (page_count(page))
51 shared += page_count(page) - 1;
53 pgdat_resize_unlock(pgdat, &flags);
55 printk(KERN_INFO "%d pages of RAM\n", total);
56 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
57 printk(KERN_INFO "%d reserved pages\n", reserved);
58 printk(KERN_INFO "%d pages shared\n", shared);
59 printk(KERN_INFO "%d pages swap cached\n", cached);
61 get_page_state(&ps);
62 printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
63 printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
64 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
65 printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
66 printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
70 * Associate a virtual page frame with a given physical page frame
71 * and protection flags for that frame.
72 */
73 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
75 pgd_t *pgd;
76 pud_t *pud;
77 pmd_t *pmd;
78 pte_t *pte;
80 pgd = swapper_pg_dir + pgd_index(vaddr);
81 if (pgd_none(*pgd)) {
82 BUG();
83 return;
85 pud = pud_offset(pgd, vaddr);
86 if (pud_none(*pud)) {
87 BUG();
88 return;
90 pmd = pmd_offset(pud, vaddr);
91 if (pmd_none(*pmd)) {
92 BUG();
93 return;
95 pte = pte_offset_kernel(pmd, vaddr);
96 /* <pfn,flags> stored as-is, to permit clearing entries */
97 set_pte(pte, pfn_pte(pfn, flags));
100 * It's enough to flush this one mapping.
101 * (PGE mappings get flushed as well)
103 __flush_tlb_one(vaddr);
107 * Associate a large virtual page frame with a given physical page frame
108 * and protection flags for that frame. pfn is for the base of the page,
109 * vaddr is what the page gets mapped to - both must be properly aligned.
110 * The pmd must already be instantiated. Assumes PAE mode.
112 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
114 pgd_t *pgd;
115 pud_t *pud;
116 pmd_t *pmd;
118 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
119 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
120 return; /* BUG(); */
122 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
123 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
124 return; /* BUG(); */
126 pgd = swapper_pg_dir + pgd_index(vaddr);
127 if (pgd_none(*pgd)) {
128 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
129 return; /* BUG(); */
131 pud = pud_offset(pgd, vaddr);
132 pmd = pmd_offset(pud, vaddr);
133 set_pmd(pmd, pfn_pmd(pfn, flags));
135 * It's enough to flush this one mapping.
136 * (PGE mappings get flushed as well)
138 __flush_tlb_one(vaddr);
141 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
143 unsigned long address = __fix_to_virt(idx);
145 if (idx >= __end_of_fixed_addresses) {
146 BUG();
147 return;
149 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
152 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
154 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
157 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
159 struct page *pte;
161 #ifdef CONFIG_HIGHPTE
162 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
163 #else
164 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
165 #endif
166 return pte;
169 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
171 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
175 * List of all pgd's needed for non-PAE so it can invalidate entries
176 * in both cached and uncached pgd's; not needed for PAE since the
177 * kernel pmd is shared. If PAE were not to share the pmd a similar
178 * tactic would be needed. This is essentially codepath-based locking
179 * against pageattr.c; it is the unique case in which a valid change
180 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
181 * vmalloc faults work because attached pagetables are never freed.
182 * The locking scheme was chosen on the basis of manfred's
183 * recommendations and having no core impact whatsoever.
184 * -- wli
186 DEFINE_SPINLOCK(pgd_lock);
187 struct page *pgd_list;
189 static inline void pgd_list_add(pgd_t *pgd)
191 struct page *page = virt_to_page(pgd);
192 page->index = (unsigned long)pgd_list;
193 if (pgd_list)
194 set_page_private(pgd_list, (unsigned long)&page->index);
195 pgd_list = page;
196 set_page_private(page, (unsigned long)&pgd_list);
199 static inline void pgd_list_del(pgd_t *pgd)
201 struct page *next, **pprev, *page = virt_to_page(pgd);
202 next = (struct page *)page->index;
203 pprev = (struct page **)page_private(page);
204 *pprev = next;
205 if (next)
206 set_page_private(next, (unsigned long)pprev);
209 void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
211 unsigned long flags;
213 if (PTRS_PER_PMD == 1) {
214 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
215 spin_lock_irqsave(&pgd_lock, flags);
218 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
219 swapper_pg_dir + USER_PTRS_PER_PGD,
220 KERNEL_PGD_PTRS);
221 if (PTRS_PER_PMD > 1)
222 return;
224 pgd_list_add(pgd);
225 spin_unlock_irqrestore(&pgd_lock, flags);
228 /* never called when PTRS_PER_PMD > 1 */
229 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
231 unsigned long flags; /* can be called from interrupt context */
233 spin_lock_irqsave(&pgd_lock, flags);
234 pgd_list_del(pgd);
235 spin_unlock_irqrestore(&pgd_lock, flags);
238 pgd_t *pgd_alloc(struct mm_struct *mm)
240 int i;
241 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
243 if (PTRS_PER_PMD == 1 || !pgd)
244 return pgd;
246 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
247 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
248 if (!pmd)
249 goto out_oom;
250 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
252 return pgd;
254 out_oom:
255 for (i--; i >= 0; i--)
256 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
257 kmem_cache_free(pgd_cache, pgd);
258 return NULL;
261 void pgd_free(pgd_t *pgd)
263 int i;
265 /* in the PAE case user pgd entries are overwritten before usage */
266 if (PTRS_PER_PMD > 1)
267 for (i = 0; i < USER_PTRS_PER_PGD; ++i)
268 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
269 /* in the non-PAE case, free_pgtables() clears user pgd entries */
270 kmem_cache_free(pgd_cache, pgd);