- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / s390 / mm / init.c
blob177e5e8f2c114ff720414a6749a07bced91e26d5
1 /*
2 * arch/s390/mm/init.c
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
8 * Derived from "arch/i386/mm/init.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/config.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/swap.h>
23 #include <linux/smp.h>
24 #include <linux/init.h>
25 #ifdef CONFIG_BLK_DEV_INITRD
26 #include <linux/blk.h>
27 #endif
28 #include <linux/pagemap.h>
29 #include <linux/bootmem.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
36 #include <asm/dma.h>
37 #include <asm/lowcore.h>
39 static unsigned long totalram_pages;
42 * BAD_PAGE is the page that is used for page faults when linux
43 * is out-of-memory. Older versions of linux just did a
44 * do_exit(), but using this instead means there is less risk
45 * for a process dying in kernel mode, possibly leaving an inode
46 * unused etc..
48 * BAD_PAGETABLE is the accompanying page-table: it is initialized
49 * to point to BAD_PAGE entries.
51 * ZERO_PAGE is a special page that is used for zero-initialized
52 * data and COW.
55 pgd_t swapper_pg_dir[512] __attribute__ ((__aligned__ (4096)));
56 unsigned long empty_bad_page[1024] __attribute__ ((__aligned__ (4096)));
57 unsigned long empty_zero_page[1024] __attribute__ ((__aligned__ (4096)));
58 pte_t empty_bad_pte_table[1024] __attribute__ ((__aligned__ (4096)));
60 static int test_access(unsigned long loc)
62 static const int ssm_mask = 0x07000000L;
63 int rc, i;
65 rc = 0;
66 for (i=0; i<4; i++) {
67 __asm__ __volatile__(
68 " slr %0,%0\n"
69 " ssm %1\n"
70 " tprot 0(%2),0\n"
71 "0: jne 1f\n"
72 " lhi %0,1\n"
73 "1: ssm %3\n"
74 ".section __ex_table,\"a\"\n"
75 " .align 4\n"
76 " .long 0b,1b\n"
77 ".previous"
78 : "+&d" (rc) : "i" (0), "a" (loc), "m" (ssm_mask)
79 : "cc");
80 if (rc == 0)
81 break;
82 loc += 0x100000;
84 return rc;
87 static pte_t * get_bad_pte_table(void)
89 pte_t v;
90 int i;
92 v = pte_mkdirty(mk_pte_phys(__pa(empty_bad_page), PAGE_SHARED));
94 for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
95 empty_bad_pte_table[i] = v;
97 return empty_bad_pte_table;
100 static inline void invalidate_page(pte_t *pte)
102 int i;
103 for (i=0;i<PTRS_PER_PTE;i++)
104 pte_clear(pte++);
107 void __handle_bad_pmd(pmd_t *pmd)
109 pmd_ERROR(*pmd);
110 pmd_val(*pmd) = _PAGE_TABLE + __pa(get_bad_pte_table());
113 void __handle_bad_pmd_kernel(pmd_t *pmd)
115 pmd_ERROR(*pmd);
116 pmd_val(*pmd) = _KERNPG_TABLE + __pa(get_bad_pte_table());
119 pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
121 pte_t *pte;
123 pte = (pte_t *) __get_free_page(GFP_KERNEL);
124 if (pmd_none(*pmd)) {
125 if (pte) {
126 invalidate_page(pte);
127 pmd_val(pmd[0]) = _KERNPG_TABLE + __pa(pte);
128 pmd_val(pmd[1]) = _KERNPG_TABLE + __pa(pte)+1024;
129 pmd_val(pmd[2]) = _KERNPG_TABLE + __pa(pte)+2048;
130 pmd_val(pmd[3]) = _KERNPG_TABLE + __pa(pte)+3072;
131 return pte + offset;
133 pte = get_bad_pte_table();
134 pmd_val(pmd[0]) = _KERNPG_TABLE + __pa(pte);
135 pmd_val(pmd[1]) = _KERNPG_TABLE + __pa(pte)+1024;
136 pmd_val(pmd[2]) = _KERNPG_TABLE + __pa(pte)+2048;
137 pmd_val(pmd[3]) = _KERNPG_TABLE + __pa(pte)+3072;
138 return NULL;
140 free_page((unsigned long)pte);
141 if (pmd_bad(*pmd)) {
142 __handle_bad_pmd_kernel(pmd);
143 return NULL;
145 return (pte_t *) pmd_page(*pmd) + offset;
148 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
150 unsigned long pte;
152 pte = (unsigned long) __get_free_page(GFP_KERNEL);
153 if (pmd_none(*pmd)) {
154 if (pte) {
155 invalidate_page((pte_t*) pte);
156 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
157 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte)+1024;
158 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte)+2048;
159 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte)+3072;
160 return (pte_t *) pte + offset;
162 pte = (unsigned long) get_bad_pte_table();
163 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
164 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte)+1024;
165 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte)+2048;
166 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte)+3072;
167 return NULL;
169 free_page(pte);
170 if (pmd_bad(*pmd)) {
171 __handle_bad_pmd(pmd);
172 return NULL;
174 return (pte_t *) pmd_page(*pmd) + offset;
177 int do_check_pgt_cache(int low, int high)
179 int freed = 0;
180 if(pgtable_cache_size > high) {
181 do {
182 if(pgd_quicklist)
183 free_pgd_slow(get_pgd_fast()), freed++;
184 if(pmd_quicklist)
185 free_pmd_slow(get_pmd_fast()), freed++;
186 if(pte_quicklist)
187 free_pte_slow(get_pte_fast()), freed++;
188 } while(pgtable_cache_size > low);
190 return freed;
193 void show_mem(void)
195 int i, total = 0, reserved = 0;
196 int shared = 0, cached = 0;
198 printk("Mem-info:\n");
199 show_free_areas();
200 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
201 i = max_mapnr;
202 while (i-- > 0) {
203 total++;
204 if (PageReserved(mem_map+i))
205 reserved++;
206 else if (PageSwapCache(mem_map+i))
207 cached++;
208 else if (page_count(mem_map+i))
209 shared += atomic_read(&mem_map[i].count) - 1;
211 printk("%d pages of RAM\n",total);
212 printk("%d reserved pages\n",reserved);
213 printk("%d pages shared\n",shared);
214 printk("%d pages swap cached\n",cached);
215 printk("%ld pages in page table cache\n",pgtable_cache_size);
216 show_buffers();
219 /* References to section boundaries */
221 extern unsigned long _text;
222 extern unsigned long _etext;
223 extern unsigned long _edata;
224 extern unsigned long __bss_start;
225 extern unsigned long _end;
227 extern unsigned long __init_begin;
228 extern unsigned long __init_end;
231 * paging_init() sets up the page tables - note that the first 4MB are
232 * already mapped by head.S.
233 * paging_init will erase this initial mapping
236 unsigned long last_valid_pfn;
238 void __init paging_init(void)
240 pgd_t * pg_dir;
241 pte_t * pg_table;
242 pte_t pte;
243 int i;
244 unsigned long tmp;
245 unsigned long address=0;
246 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
247 unsigned long end_mem = (unsigned long) __va(max_low_pfn*PAGE_SIZE);
249 /* unmap whole virtual address space */
251 pg_dir = swapper_pg_dir;
253 for (i=0;i<KERNEL_PGD_PTRS;i++)
254 pmd_clear((pmd_t*)pg_dir++);
257 * map whole physical memory to virtual memory (identity mapping)
260 pg_dir = swapper_pg_dir;
262 while (address < end_mem) {
264 * pg_table is physical at this point
266 pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
268 pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table));
269 pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024));
270 pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048));
271 pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072));
272 pg_dir++;
274 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
275 pte = mk_pte_phys(address, PAGE_KERNEL);
276 if (address >= end_mem)
277 pte_clear(&pte);
278 set_pte(pg_table, pte);
279 address += PAGE_SIZE;
283 /* enable virtual mapping in kernel mode */
284 __asm__ __volatile__(" LCTL 1,1,%0\n"
285 " LCTL 7,7,%0\n"
286 " LCTL 13,13,%0"
287 : :"m" (pgdir_k));
289 local_flush_tlb();
292 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0};
294 zones_size[ZONE_DMA] = max_low_pfn;
295 free_area_init(zones_size);
298 return;
301 void __init mem_init(void)
303 int codesize, reservedpages, datasize, initsize;
304 int tmp;
306 max_mapnr = num_physpages = max_low_pfn;
307 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
309 /* clear the zero-page */
310 memset(empty_zero_page, 0, PAGE_SIZE);
312 /* this will put all low memory onto the freelists */
313 totalram_pages += free_all_bootmem();
315 /* mark usable pages in the mem_map[] and count reserved pages */
316 reservedpages = 0;
317 tmp = 0;
318 do {
319 if (tmp && (tmp & 0x3ff) == 0 &&
320 test_access(tmp * PAGE_SIZE) == 0) {
321 printk("4M Segment %lX not available\n",tmp*PAGE_SIZE);
322 do {
323 set_bit(PG_reserved, &mem_map[tmp].flags);
324 reservedpages++;
325 tmp++;
326 } while (tmp < max_low_pfn && (tmp & 0x3ff));
327 } else {
328 if (PageReserved(mem_map+tmp))
329 reservedpages++;
330 tmp++;
332 } while (tmp < max_low_pfn);
334 codesize = (unsigned long) &_etext - (unsigned long) &_text;
335 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
336 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
337 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
338 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
339 max_mapnr << (PAGE_SHIFT-10),
340 codesize >> 10,
341 reservedpages << (PAGE_SHIFT-10),
342 datasize >>10,
343 initsize >> 10);
346 void free_initmem(void)
348 unsigned long addr;
350 addr = (unsigned long)(&__init_begin);
351 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
352 ClearPageReserved(virt_to_page(addr));
353 set_page_count(virt_to_page(addr), 1);
354 free_page(addr);
355 totalram_pages++;
357 printk ("Freeing unused kernel memory: %dk freed\n",
358 (&__init_end - &__init_begin) >> 10);
361 #ifdef CONFIG_BLK_DEV_INITRD
362 void free_initrd_mem(unsigned long start, unsigned long end)
364 if (start < end)
365 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
366 for (; start < end; start += PAGE_SIZE) {
367 ClearPageReserved(virt_to_page(start));
368 set_page_count(virt_to_page(start), 1);
369 free_page(start);
370 totalram_pages++;
373 #endif
375 void si_meminfo(struct sysinfo *val)
377 val->totalram = totalram_pages;
378 val->sharedram = 0;
379 val->freeram = nr_free_pages();
380 val->bufferram = atomic_read(&buffermem_pages);
381 val->mem_unit = PAGE_SIZE;
382 return;