- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / sh / mm / init.c
blobe03dba90aa69071ff78211c767f261d964b6f742
1 /* $Id: init.c,v 1.17 2000-04-08 15:38:54+09 gniibe Exp $
3 * linux/arch/sh/mm/init.c
5 * Copyright (C) 1999 Niibe Yutaka
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
9 */
11 #include <linux/config.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #ifdef CONFIG_BLK_DEV_INITRD
25 #include <linux/blk.h>
26 #endif
27 #include <linux/highmem.h>
28 #include <linux/bootmem.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/mmu_context.h>
36 #include <asm/io.h>
39 * Cache of MMU context last used.
41 unsigned long mmu_context_cache;
43 static unsigned long totalram_pages;
44 static unsigned long totalhigh_pages;
46 extern unsigned long init_smp_mappings(unsigned long);
49 * BAD_PAGE is the page that is used for page faults when linux
50 * is out-of-memory. Older versions of linux just did a
51 * do_exit(), but using this instead means there is less risk
52 * for a process dying in kernel mode, possibly leaving an inode
53 * unused etc..
55 * BAD_PAGETABLE is the accompanying page-table: it is initialized
56 * to point to BAD_PAGE entries.
58 * ZERO_PAGE is a special page that is used for zero-initialized
59 * data and COW.
62 unsigned long empty_bad_page[1024];
63 pte_t empty_bad_pte_table[PTRS_PER_PTE];
64 extern unsigned long empty_zero_page[1024];
66 static pte_t * get_bad_pte_table(void)
68 pte_t v;
69 int i;
71 v = pte_mkdirty(mk_pte_phys(__pa(empty_bad_page), PAGE_SHARED));
73 for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
74 empty_bad_pte_table[i] = v;
76 return empty_bad_pte_table;
79 void __handle_bad_pmd(pmd_t *pmd)
81 pmd_ERROR(*pmd);
82 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
85 void __handle_bad_pmd_kernel(pmd_t *pmd)
87 pmd_ERROR(*pmd);
88 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
91 pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
93 pte_t *pte;
95 pte = (pte_t *) __get_free_page(GFP_KERNEL);
96 if (pmd_none(*pmd)) {
97 if (pte) {
98 clear_page(pte);
99 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
100 return pte + offset;
102 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
103 return NULL;
105 free_page((unsigned long)pte);
106 if (pmd_bad(*pmd)) {
107 __handle_bad_pmd_kernel(pmd);
108 return NULL;
110 return (pte_t *) pmd_page(*pmd) + offset;
113 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
115 unsigned long pte;
117 pte = (unsigned long) __get_free_page(GFP_KERNEL);
118 if (pmd_none(*pmd)) {
119 if (pte) {
120 clear_page((void *)pte);
121 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));
122 return (pte_t *)pte + offset;
124 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
125 return NULL;
127 free_page(pte);
128 if (pmd_bad(*pmd)) {
129 __handle_bad_pmd(pmd);
130 return NULL;
132 return (pte_t *) pmd_page(*pmd) + offset;
135 int do_check_pgt_cache(int low, int high)
137 int freed = 0;
138 if (pgtable_cache_size > high) {
139 do {
140 if (pgd_quicklist)
141 free_pgd_slow(get_pgd_fast()), freed++;
142 if (pmd_quicklist)
143 free_pmd_slow(get_pmd_fast()), freed++;
144 if (pte_quicklist)
145 free_pte_slow(get_pte_fast()), freed++;
146 } while (pgtable_cache_size > low);
148 return freed;
151 void show_mem(void)
153 int i, total = 0, reserved = 0;
154 int shared = 0, cached = 0;
156 printk("Mem-info:\n");
157 show_free_areas();
158 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
159 i = max_mapnr;
160 while (i-- > 0) {
161 total++;
162 if (PageReserved(mem_map+i))
163 reserved++;
164 else if (PageSwapCache(mem_map+i))
165 cached++;
166 else if (page_count(mem_map+i))
167 shared += page_count(mem_map+i) - 1;
169 printk("%d pages of RAM\n",total);
170 printk("%d reserved pages\n",reserved);
171 printk("%d pages shared\n",shared);
172 printk("%d pages swap cached\n",cached);
173 printk("%ld pages in page table cache\n",pgtable_cache_size);
174 show_buffers();
177 /* References to section boundaries */
179 extern char _text, _etext, _edata, __bss_start, _end;
180 extern char __init_begin, __init_end;
182 pgd_t swapper_pg_dir[1024];
184 /* It'd be good if these lines were in the standard header file. */
185 #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
186 #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
189 * paging_init() sets up the page tables
191 * This routines also unmaps the page at virtual kernel address 0, so
192 * that we can trap those pesky NULL-reference errors in the kernel.
194 void __init paging_init(void)
196 int i;
197 pgd_t * pg_dir;
199 /* We don't need kernel mapping as hardware support that. */
200 pg_dir = swapper_pg_dir;
202 for (i=0; i < USER_PTRS_PER_PGD*2; i++)
203 pgd_val(pg_dir[i]) = 0;
205 /* Enable MMU */
206 ctrl_outl(MMU_CONTROL_INIT, MMUCR);
208 /* The manual suggests doing some nops after turning on the MMU */
209 asm volatile("nop;nop;nop;nop;nop;nop;");
211 mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
212 set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
215 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
216 unsigned long max_dma, low, start_pfn;
218 start_pfn = START_PFN;
219 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
220 low = MAX_LOW_PFN;
222 if (low < max_dma)
223 zones_size[ZONE_DMA] = low - start_pfn;
224 else {
225 zones_size[ZONE_DMA] = max_dma - start_pfn;
226 zones_size[ZONE_NORMAL] = low - max_dma;
228 free_area_init_node(0, 0, 0, zones_size, __MEMORY_START, 0);
232 void __init mem_init(void)
234 int codesize, reservedpages, datasize, initsize;
235 int tmp;
237 max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
238 high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
240 /* clear the zero-page */
241 memset(empty_zero_page, 0, PAGE_SIZE);
242 flush_page_to_ram(virt_to_page(empty_zero_page));
244 /* this will put all low memory onto the freelists */
245 totalram_pages += free_all_bootmem();
246 reservedpages = 0;
247 for (tmp = 0; tmp < num_physpages; tmp++)
249 * Only count reserved RAM pages
251 if (PageReserved(mem_map+tmp))
252 reservedpages++;
253 codesize = (unsigned long) &_etext - (unsigned long) &_text;
254 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
255 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
257 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
258 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
259 max_mapnr << (PAGE_SHIFT-10),
260 codesize >> 10,
261 reservedpages << (PAGE_SHIFT-10),
262 datasize >> 10,
263 initsize >> 10);
266 void free_initmem(void)
268 unsigned long addr;
270 addr = (unsigned long)(&__init_begin);
271 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
272 ClearPageReserved(virt_to_page(addr));
273 set_page_count(virt_to_page(addr), 1);
274 free_page(addr);
275 totalram_pages++;
277 printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
280 #ifdef CONFIG_BLK_DEV_INITRD
281 void free_initrd_mem(unsigned long start, unsigned long end)
283 unsigned long p;
284 for (p = start; p < end; p += PAGE_SIZE) {
285 ClearPageReserved(virt_to_page(p));
286 set_page_count(virt_to_page(p), 1);
287 free_page(p);
288 totalram_pages++;
290 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
292 #endif
294 void si_meminfo(struct sysinfo *val)
296 val->totalram = totalram_pages;
297 val->sharedram = 0;
298 val->freeram = nr_free_pages();
299 val->bufferram = atomic_read(&buffermem_pages);
300 val->totalhigh = totalhigh_pages;
301 val->freehigh = nr_free_highpages();
302 val->mem_unit = PAGE_SIZE;
303 return;