drivers/block/cciss.c: kmalloc + memset conversion to kzalloc
[linux-2.6/mini2440.git] / arch / s390 / mm / init.c
blob9098531a26714050ddcabbf28510712f002bba09
1 /*
2 * arch/s390/mm/init.c
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
8 * Derived from "arch/i386/mm/init.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/bootmem.h>
26 #include <linux/pfn.h>
27 #include <linux/poison.h>
28 #include <linux/initrd.h>
29 #include <asm/processor.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/dma.h>
35 #include <asm/lowcore.h>
36 #include <asm/tlb.h>
37 #include <asm/tlbflush.h>
38 #include <asm/sections.h>
40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
43 char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
45 void diag10(unsigned long addr)
47 if (addr >= 0x7ff00000)
48 return;
49 asm volatile(
50 #ifdef CONFIG_64BIT
51 " sam31\n"
52 " diag %0,%0,0x10\n"
53 "0: sam64\n"
54 #else
55 " diag %0,%0,0x10\n"
56 "0:\n"
57 #endif
58 EX_TABLE(0b,0b)
59 : : "a" (addr));
62 void show_mem(void)
64 int i, total = 0, reserved = 0;
65 int shared = 0, cached = 0;
66 struct page *page;
68 printk("Mem-info:\n");
69 show_free_areas();
70 printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
71 i = max_mapnr;
72 while (i-- > 0) {
73 if (!pfn_valid(i))
74 continue;
75 page = pfn_to_page(i);
76 total++;
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
84 printk("%d pages of RAM\n", total);
85 printk("%d reserved pages\n", reserved);
86 printk("%d pages shared\n", shared);
87 printk("%d pages swap cached\n", cached);
89 printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
90 printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
91 printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
92 printk("%lu pages slab\n",
93 global_page_state(NR_SLAB_RECLAIMABLE) +
94 global_page_state(NR_SLAB_UNRECLAIMABLE));
95 printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
98 static void __init setup_ro_region(void)
100 pgd_t *pgd;
101 pmd_t *pmd;
102 pte_t *pte;
103 pte_t new_pte;
104 unsigned long address, end;
106 address = ((unsigned long)&_stext) & PAGE_MASK;
107 end = PFN_ALIGN((unsigned long)&_eshared);
109 for (; address < end; address += PAGE_SIZE) {
110 pgd = pgd_offset_k(address);
111 pmd = pmd_offset(pgd, address);
112 pte = pte_offset_kernel(pmd, address);
113 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
114 *pte = new_pte;
119 * paging_init() sets up the page tables
121 void __init paging_init(void)
123 pgd_t *pg_dir;
124 int i;
125 unsigned long pgdir_k;
126 static const int ssm_mask = 0x04000000L;
127 unsigned long max_zone_pfns[MAX_NR_ZONES];
129 pg_dir = swapper_pg_dir;
131 #ifdef CONFIG_64BIT
132 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
133 for (i = 0; i < PTRS_PER_PGD; i++)
134 pgd_clear_kernel(pg_dir + i);
135 #else
136 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
137 for (i = 0; i < PTRS_PER_PGD; i++)
138 pmd_clear_kernel((pmd_t *)(pg_dir + i));
139 #endif
140 vmem_map_init();
141 setup_ro_region();
143 S390_lowcore.kernel_asce = pgdir_k;
145 /* enable virtual mapping in kernel mode */
146 __ctl_load(pgdir_k, 1, 1);
147 __ctl_load(pgdir_k, 7, 7);
148 __ctl_load(pgdir_k, 13, 13);
149 __raw_local_irq_ssm(ssm_mask);
151 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
152 #ifdef CONFIG_ZONE_DMA
153 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
154 #endif
155 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
156 free_area_init_nodes(max_zone_pfns);
159 void __init mem_init(void)
161 unsigned long codesize, reservedpages, datasize, initsize;
163 max_mapnr = num_physpages = max_low_pfn;
164 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
166 /* clear the zero-page */
167 memset(empty_zero_page, 0, PAGE_SIZE);
169 /* this will put all low memory onto the freelists */
170 totalram_pages += free_all_bootmem();
172 reservedpages = 0;
174 codesize = (unsigned long) &_etext - (unsigned long) &_text;
175 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
176 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
177 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
178 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
179 max_mapnr << (PAGE_SHIFT-10),
180 codesize >> 10,
181 reservedpages << (PAGE_SHIFT-10),
182 datasize >>10,
183 initsize >> 10);
184 printk("Write protected kernel read-only data: %#lx - %#lx\n",
185 (unsigned long)&_stext,
186 PFN_ALIGN((unsigned long)&_eshared) - 1);
189 void free_initmem(void)
191 unsigned long addr;
193 addr = (unsigned long)(&__init_begin);
194 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
195 ClearPageReserved(virt_to_page(addr));
196 init_page_count(virt_to_page(addr));
197 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
198 free_page(addr);
199 totalram_pages++;
201 printk ("Freeing unused kernel memory: %ldk freed\n",
202 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
205 #ifdef CONFIG_BLK_DEV_INITRD
206 void free_initrd_mem(unsigned long start, unsigned long end)
208 if (start < end)
209 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
210 for (; start < end; start += PAGE_SIZE) {
211 ClearPageReserved(virt_to_page(start));
212 init_page_count(virt_to_page(start));
213 free_page(start);
214 totalram_pages++;
217 #endif