2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/config.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
16 #include <asm/system.h>
17 #include <asm/pgtable.h>
18 #include <asm/pgalloc.h>
19 #include <asm/fixmap.h>
22 #include <asm/tlbflush.h>
26 int total
= 0, reserved
= 0;
27 int shared
= 0, cached
= 0;
33 printk("Mem-info:\n");
35 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
36 for_each_pgdat(pgdat
) {
37 for (i
= 0; i
< pgdat
->node_spanned_pages
; ++i
) {
38 page
= pgdat
->node_mem_map
+ i
;
40 if (PageHighMem(page
))
42 if (PageReserved(page
))
44 else if (PageSwapCache(page
))
46 else if (page_count(page
))
47 shared
+= page_count(page
) - 1;
50 printk("%d pages of RAM\n", total
);
51 printk("%d pages of HIGHMEM\n",highmem
);
52 printk("%d reserved pages\n",reserved
);
53 printk("%d pages shared\n",shared
);
54 printk("%d pages swap cached\n",cached
);
58 * Associate a virtual page frame with a given physical page frame
59 * and protection flags for that frame.
61 static void set_pte_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
67 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
72 pmd
= pmd_offset(pgd
, vaddr
);
77 pte
= pte_offset_kernel(pmd
, vaddr
);
78 /* <pfn,flags> stored as-is, to permit clearing entries */
79 set_pte(pte
, pfn_pte(pfn
, flags
));
82 * It's enough to flush this one mapping.
83 * (PGE mappings get flushed as well)
85 __flush_tlb_one(vaddr
);
89 * Associate a large virtual page frame with a given physical page frame
90 * and protection flags for that frame. pfn is for the base of the page,
91 * vaddr is what the page gets mapped to - both must be properly aligned.
92 * The pmd must already be instantiated. Assumes PAE mode.
94 void set_pmd_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
99 if (vaddr
& (PMD_SIZE
-1)) { /* vaddr is misaligned */
100 printk ("set_pmd_pfn: vaddr misaligned\n");
103 if (pfn
& (PTRS_PER_PTE
-1)) { /* pfn is misaligned */
104 printk ("set_pmd_pfn: pfn misaligned\n");
107 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
108 if (pgd_none(*pgd
)) {
109 printk ("set_pmd_pfn: pgd_none\n");
112 pmd
= pmd_offset(pgd
, vaddr
);
113 set_pmd(pmd
, pfn_pmd(pfn
, flags
));
115 * It's enough to flush this one mapping.
116 * (PGE mappings get flushed as well)
118 __flush_tlb_one(vaddr
);
121 void __set_fixmap (enum fixed_addresses idx
, unsigned long phys
, pgprot_t flags
)
123 unsigned long address
= __fix_to_virt(idx
);
125 if (idx
>= __end_of_fixed_addresses
) {
129 set_pte_pfn(address
, phys
>> PAGE_SHIFT
, flags
);
132 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
134 pte_t
*pte
= (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
140 struct page
*pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
144 #ifdef CONFIG_HIGHPTE
145 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
, 0);
147 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
, 0);
154 #ifdef CONFIG_X86_PAE
156 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
159 pgd_t
*pgd
= kmem_cache_alloc(pae_pgd_cachep
, GFP_KERNEL
);
162 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++) {
163 unsigned long pmd
= __get_free_page(GFP_KERNEL
);
167 set_pgd(pgd
+ i
, __pgd(1 + __pa(pmd
)));
169 memcpy(pgd
+ USER_PTRS_PER_PGD
,
170 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
171 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
175 for (i
--; i
>= 0; i
--)
176 free_page((unsigned long)__va(pgd_val(pgd
[i
])-1));
177 kmem_cache_free(pae_pgd_cachep
, pgd
);
181 void pgd_free(pgd_t
*pgd
)
185 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++)
186 free_page((unsigned long)__va(pgd_val(pgd
[i
])-1));
187 kmem_cache_free(pae_pgd_cachep
, pgd
);
192 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
194 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
);
197 memset(pgd
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
198 memcpy(pgd
+ USER_PTRS_PER_PGD
,
199 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
200 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
205 void pgd_free(pgd_t
*pgd
)
207 free_page((unsigned long)pgd
);
210 #endif /* CONFIG_X86_PAE */