Import 2.3.18pre1
[davej-history.git] / arch / sparc / mm / init.c
blob69c69d212eb060cf72ef66c249ecab1f14d98536
1 /* $Id: init.c,v 1.69 1999/09/06 22:56:17 ecd Exp $
2 * linux/arch/sparc/mm/init.c
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/swapctl.h>
21 #ifdef CONFIG_BLK_DEV_INITRD
22 #include <linux/blk.h>
23 #endif
24 #include <linux/init.h>
26 #include <asm/system.h>
27 #include <asm/segment.h>
28 #include <asm/vac-ops.h>
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/vaddrs.h>
33 /* Turn this off if you suspect some place in some physical memory hole
34 might get into page tables (something would be broken very much). */
36 #define FREE_UNUSED_MEM_MAP
38 extern void show_net_buffers(void);
40 unsigned long *sparc_valid_addr_bitmap;
42 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
43 unsigned long sparc_unmapped_base;
45 struct pgtable_cache_struct pgt_quicklists;
47 /* References to section boundaries */
48 extern char __init_begin, __init_end, etext;
51 * BAD_PAGE is the page that is used for page faults when linux
52 * is out-of-memory. Older versions of linux just did a
53 * do_exit(), but using this instead means there is less risk
54 * for a process dying in kernel mode, possibly leaving an inode
55 * unused etc..
57 * BAD_PAGETABLE is the accompanying page-table: it is initialized
58 * to point to BAD_PAGE entries.
60 * ZERO_PAGE is a special page that is used for zero-initialized
61 * data and COW.
63 pte_t *__bad_pagetable(void)
65 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
66 return (pte_t *) EMPTY_PGT;
69 pte_t __bad_page(void)
71 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
72 return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
75 void show_mem(void)
77 int free = 0,total = 0,reserved = 0;
78 int shared = 0, cached = 0;
79 struct page *page, *end;
81 printk("\nMem-info:\n");
82 show_free_areas();
83 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
84 for (page = mem_map, end = mem_map + max_mapnr;
85 page < end; page++) {
86 if (PageSkip(page)) {
87 if (page->next_hash < page)
88 break;
89 page = page->next_hash;
91 total++;
92 if (PageReserved(page))
93 reserved++;
94 else if (PageSwapCache(page))
95 cached++;
96 else if (!atomic_read(&page->count))
97 free++;
98 else
99 shared += atomic_read(&page->count) - 1;
101 printk("%d pages of RAM\n",total);
102 printk("%d free pages\n",free);
103 printk("%d reserved pages\n",reserved);
104 printk("%d pages shared\n",shared);
105 printk("%d pages swap cached\n",cached);
106 printk("%ld page tables cached\n",pgtable_cache_size);
107 if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
108 printk("%ld page dirs cached\n", pgd_cache_size);
109 #ifdef CONFIG_NET
110 show_net_buffers();
111 #endif
114 extern pgprot_t protection_map[16];
116 unsigned long __init sparc_context_init(unsigned long start_mem, int numctx)
118 int ctx;
120 ctx_list_pool = (struct ctx_list *) start_mem;
121 start_mem += (numctx * sizeof(struct ctx_list));
122 for(ctx = 0; ctx < numctx; ctx++) {
123 struct ctx_list *clist;
125 clist = (ctx_list_pool + ctx);
126 clist->ctx_number = ctx;
127 clist->ctx_mm = 0;
129 ctx_free.next = ctx_free.prev = &ctx_free;
130 ctx_used.next = ctx_used.prev = &ctx_used;
131 for(ctx = 0; ctx < numctx; ctx++)
132 add_to_free_ctxlist(ctx_list_pool + ctx);
133 return start_mem;
137 * paging_init() sets up the page tables: We call the MMU specific
138 * init routine based upon the Sun model type on the Sparc.
141 extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
142 extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
143 extern unsigned long device_scan(unsigned long);
145 unsigned long __init
146 paging_init(unsigned long start_mem, unsigned long end_mem)
148 switch(sparc_cpu_model) {
149 case sun4c:
150 case sun4e:
151 case sun4:
152 start_mem = sun4c_paging_init(start_mem, end_mem);
153 sparc_unmapped_base = 0xe0000000;
154 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
155 break;
156 case sun4m:
157 case sun4d:
158 start_mem = srmmu_paging_init(start_mem, end_mem);
159 sparc_unmapped_base = 0x50000000;
160 BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
161 break;
163 case ap1000:
164 #if CONFIG_AP1000
165 start_mem = apmmu_paging_init(start_mem, end_mem);
166 sparc_unmapped_base = 0x50000000;
167 BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
168 break;
169 #endif
171 default:
172 prom_printf("paging_init: Cannot init paging on this Sparc\n");
173 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
174 prom_printf("paging_init: Halting...\n");
175 prom_halt();
178 /* Initialize the protection map with non-constant, MMU dependent values. */
179 protection_map[0] = PAGE_NONE;
180 protection_map[1] = PAGE_READONLY;
181 protection_map[2] = PAGE_COPY;
182 protection_map[3] = PAGE_COPY;
183 protection_map[4] = PAGE_READONLY;
184 protection_map[5] = PAGE_READONLY;
185 protection_map[6] = PAGE_COPY;
186 protection_map[7] = PAGE_COPY;
187 protection_map[8] = PAGE_NONE;
188 protection_map[9] = PAGE_READONLY;
189 protection_map[10] = PAGE_SHARED;
190 protection_map[11] = PAGE_SHARED;
191 protection_map[12] = PAGE_READONLY;
192 protection_map[13] = PAGE_READONLY;
193 protection_map[14] = PAGE_SHARED;
194 protection_map[15] = PAGE_SHARED;
195 btfixup();
196 return device_scan(start_mem);
199 struct cache_palias *sparc_aliases;
201 extern void srmmu_frob_mem_map(unsigned long);
203 int physmem_mapped_contig __initdata = 1;
205 static void __init taint_real_pages(unsigned long start_mem, unsigned long end_mem)
207 unsigned long addr, tmp2 = 0;
209 if(physmem_mapped_contig) {
210 for(addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
211 if(addr >= KERNBASE && addr < start_mem)
212 addr = start_mem;
213 for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
214 unsigned long phys_addr = (addr - PAGE_OFFSET);
215 unsigned long base = sp_banks[tmp2].base_addr;
216 unsigned long limit = base + sp_banks[tmp2].num_bytes;
218 if((phys_addr >= base) && (phys_addr < limit) &&
219 ((phys_addr + PAGE_SIZE) < limit)) {
220 mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
221 set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap);
225 } else {
226 if((sparc_cpu_model == sun4m) || (sparc_cpu_model == sun4d)) {
227 srmmu_frob_mem_map(start_mem);
228 } else {
229 for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) {
230 mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
231 set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap);
237 void __init mem_init(unsigned long start_mem, unsigned long end_mem)
239 int codepages = 0;
240 int datapages = 0;
241 int initpages = 0;
242 int i;
243 unsigned long addr;
244 struct page *page, *end;
246 /* Saves us work later. */
247 memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE);
249 end_mem &= PAGE_MASK;
250 max_mapnr = MAP_NR(end_mem);
251 high_memory = (void *) end_mem;
253 sparc_valid_addr_bitmap = (unsigned long *)start_mem;
254 i = max_mapnr >> (8 + 5);
255 i += 1;
256 memset(sparc_valid_addr_bitmap, 0, i << 2);
257 start_mem += i << 2;
259 start_mem = PAGE_ALIGN(start_mem);
260 num_physpages = 0;
262 addr = KERNBASE;
263 while(addr < start_mem) {
264 #ifdef CONFIG_BLK_DEV_INITRD
265 if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end)
266 mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
267 else
268 #endif
269 mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
270 set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap);
271 addr += PAGE_SIZE;
274 taint_real_pages(start_mem, end_mem);
276 #ifdef FREE_UNUSED_MEM_MAP
277 end = mem_map + max_mapnr;
278 for (page = mem_map; page < end; page++) {
279 if (PageSkip(page)) {
280 unsigned long low, high;
282 /* See srmmu_frob_mem_map() for why this is done. -DaveM */
283 page++;
285 low = PAGE_ALIGN((unsigned long)(page+1));
286 if (page->next_hash < page)
287 high = ((unsigned long)end) & PAGE_MASK;
288 else
289 high = ((unsigned long)page->next_hash) & PAGE_MASK;
290 while (low < high) {
291 mem_map[MAP_NR(low)].flags &= ~(1<<PG_reserved);
292 low += PAGE_SIZE;
296 #endif
298 for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
299 if (PageSkip(mem_map + MAP_NR(addr))) {
300 unsigned long next = mem_map[MAP_NR(addr)].next_hash - mem_map;
302 next = (next << PAGE_SHIFT) + PAGE_OFFSET;
303 if (next < addr || next >= end_mem)
304 break;
305 addr = next;
307 num_physpages++;
308 if(PageReserved(mem_map + MAP_NR(addr))) {
309 if ((addr < (unsigned long) &etext) && (addr >= KERNBASE))
310 codepages++;
311 else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end))
312 initpages++;
313 else if((addr < start_mem) && (addr >= KERNBASE))
314 datapages++;
315 continue;
317 atomic_set(&mem_map[MAP_NR(addr)].count, 1);
318 #ifdef CONFIG_BLK_DEV_INITRD
319 if (!initrd_start ||
320 (addr < initrd_start || addr >= initrd_end))
321 #endif
322 free_page(addr);
325 printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
326 nr_free_pages << (PAGE_SHIFT-10),
327 codepages << (PAGE_SHIFT-10),
328 datapages << (PAGE_SHIFT-10),
329 initpages << (PAGE_SHIFT-10),
330 (unsigned long)PAGE_OFFSET, end_mem);
332 /* NOTE NOTE NOTE NOTE
333 * Please keep track of things and make sure this
334 * always matches the code in mm/page_alloc.c -DaveM
336 i = nr_free_pages >> 7;
337 if (i < 48)
338 i = 48;
339 if (i > 256)
340 i = 256;
341 freepages.min = i;
342 freepages.low = i << 1;
343 freepages.high = freepages.low + i;
346 void free_initmem (void)
348 unsigned long addr;
350 addr = (unsigned long)(&__init_begin);
351 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
352 mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
353 atomic_set(&mem_map[MAP_NR(addr)].count, 1);
354 free_page(addr);
358 void si_meminfo(struct sysinfo *val)
360 struct page *page, *end;
362 val->totalram = 0;
363 val->sharedram = 0;
364 val->freeram = nr_free_pages << PAGE_SHIFT;
365 val->bufferram = atomic_read(&buffermem);
366 for (page = mem_map, end = mem_map + max_mapnr;
367 page < end; page++) {
368 if (PageSkip(page)) {
369 if (page->next_hash < page)
370 break;
371 page = page->next_hash;
373 if (PageReserved(page))
374 continue;
375 val->totalram++;
376 if (!atomic_read(&page->count))
377 continue;
378 val->sharedram += atomic_read(&page->count) - 1;
380 val->totalram <<= PAGE_SHIFT;
381 val->sharedram <<= PAGE_SHIFT;
382 val->totalbig = 0;
383 val->freebig = 0;