Import 2.3.40pre5
[davej-history.git] / arch / sparc / mm / init.c
blob817861a194590d33eb08b9b41c369b3c5481c849
1 /* $Id: init.c,v 1.73 2000/01/15 00:51:26 anton Exp $
2 * linux/arch/sparc/mm/init.c
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 2000 Anton Blanchard (anton@progsoc.uts.edu.au)
8 */
10 #include <linux/config.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/swapctl.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/init.h>
26 #include <linux/highmem.h>
27 #include <linux/bootmem.h>
29 #include <asm/system.h>
30 #include <asm/segment.h>
31 #include <asm/vac-ops.h>
32 #include <asm/page.h>
33 #include <asm/pgtable.h>
34 #include <asm/vaddrs.h>
36 extern void show_net_buffers(void);
38 unsigned long *sparc_valid_addr_bitmap;
40 unsigned long phys_base;
42 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
43 unsigned long sparc_unmapped_base;
45 struct pgtable_cache_struct pgt_quicklists;
47 /* References to section boundaries */
48 extern char __init_begin, __init_end, _start, _end, etext , edata;
50 static unsigned long totalram_pages = 0;
53 * BAD_PAGE is the page that is used for page faults when linux
54 * is out-of-memory. Older versions of linux just did a
55 * do_exit(), but using this instead means there is less risk
56 * for a process dying in kernel mode, possibly leaving an inode
57 * unused etc..
59 * BAD_PAGETABLE is the accompanying page-table: it is initialized
60 * to point to BAD_PAGE entries.
62 * ZERO_PAGE is a special page that is used for zero-initialized
63 * data and COW.
65 pte_t *__bad_pagetable(void)
67 memset((void *) &empty_bad_page_table, 0, PAGE_SIZE);
68 return (pte_t *) &empty_bad_page_table;
71 pte_t __bad_page(void)
73 memset((void *) &empty_bad_page, 0, PAGE_SIZE);
74 return pte_mkdirty(mk_pte_phys((unsigned long)__pa(&empty_bad_page) + phys_base,
75 PAGE_SHARED));
78 void show_mem(void)
80 printk("Mem-info:\n");
81 show_free_areas();
82 printk("Free swap: %6dkB\n",
83 nr_swap_pages << (PAGE_SHIFT-10));
84 printk("%ld pages of RAM\n", totalram_pages);
85 printk("%d free pages\n", nr_free_pages());
86 printk("%ld pages in page table cache\n",pgtable_cache_size);
87 #ifndef __SMP__
88 if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
89 printk("%ld entries in page dir cache\n",pgd_cache_size);
90 #endif
91 show_buffers();
92 #ifdef CONFIG_NET
93 show_net_buffers();
94 #endif
97 extern pgprot_t protection_map[16];
99 void __init sparc_context_init(int numctx)
101 int ctx;
103 ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
105 for(ctx = 0; ctx < numctx; ctx++) {
106 struct ctx_list *clist;
108 clist = (ctx_list_pool + ctx);
109 clist->ctx_number = ctx;
110 clist->ctx_mm = 0;
112 ctx_free.next = ctx_free.prev = &ctx_free;
113 ctx_used.next = ctx_used.prev = &ctx_used;
114 for(ctx = 0; ctx < numctx; ctx++)
115 add_to_free_ctxlist(ctx_list_pool + ctx);
118 #define DEBUG_BOOTMEM
120 extern unsigned long cmdline_memory_size;
122 unsigned long __init bootmem_init(void)
124 unsigned long bootmap_size, start_pfn, end_pfn;
125 unsigned long end_of_phys_memory = 0UL;
126 int i;
128 /* Limit maximum memory until we implement highmem for sparc */
129 if (cmdline_memory_size > 0x9000000)
130 cmdline_memory_size = 0x9000000;
132 /* XXX It is a bit ambiguous here, whether we should
133 * XXX treat the user specified mem=xxx as total wanted
134 * XXX physical memory, or as a limit to the upper
135 * XXX physical address we allow. For now it is the
136 * XXX latter. -DaveM
138 #ifdef DEBUG_BOOTMEM
139 prom_printf("bootmem_init: Scan sp_banks, ");
140 #endif
141 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
142 end_of_phys_memory = sp_banks[i].base_addr +
143 sp_banks[i].num_bytes;
144 if (cmdline_memory_size) {
145 if (end_of_phys_memory > cmdline_memory_size) {
146 if (cmdline_memory_size > sp_banks[i].base_addr) {
147 end_of_phys_memory =
148 sp_banks[i-1].base_addr +
149 sp_banks[i-1].num_bytes;
150 sp_banks[i].base_addr = 0xdeadbeef;
151 sp_banks[i].num_bytes = 0;
152 } else {
153 sp_banks[i].num_bytes -=
154 (end_of_phys_memory -
155 cmdline_memory_size);
156 end_of_phys_memory = cmdline_memory_size;
157 sp_banks[++i].base_addr = 0xdeadbeef;
158 sp_banks[i].num_bytes = 0;
160 break;
165 /* Start with page aligned address of last symbol in kernel
166 * image.
168 start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
170 /* Adjust up to the physical address where the kernel begins. */
171 start_pfn += phys_base;
173 /* Now shift down to get the real physical page frame number. */
174 start_pfn >>= PAGE_SHIFT;
176 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
178 /* Initialize the boot-time allocator. */
179 #ifdef DEBUG_BOOTMEM
180 prom_printf("init_bootmem(spfn[%lx],epfn[%lx])\n",
181 start_pfn, end_pfn);
182 #endif
183 bootmap_size = init_bootmem(start_pfn, end_pfn);
185 /* Now register the available physical memory with the
186 * allocator.
188 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
189 #ifdef DEBUG_BOOTMEM
190 prom_printf("free_bootmem: base[%lx] size[%lx]\n",
191 sp_banks[i].base_addr,
192 sp_banks[i].num_bytes);
193 #endif
194 free_bootmem(sp_banks[i].base_addr,
195 sp_banks[i].num_bytes);
198 /* Reserve the kernel text/data/bss and the bootmem bitmap. */
199 #ifdef DEBUG_BOOTMEM
200 prom_printf("reserve_bootmem: base[%lx] size[%lx]\n",
201 phys_base,
202 (((start_pfn << PAGE_SHIFT) +
203 bootmap_size) - phys_base));
204 #endif
205 reserve_bootmem(phys_base, (((start_pfn << PAGE_SHIFT) +
206 bootmap_size) - phys_base));
208 #ifdef DEBUG_BOOTMEM
209 prom_printf("init_bootmem: return end_pfn[%lx]\n", end_pfn);
210 #endif
211 return end_pfn;
215 * paging_init() sets up the page tables: We call the MMU specific
216 * init routine based upon the Sun model type on the Sparc.
219 extern void sun4c_paging_init(void);
220 extern void srmmu_paging_init(void);
221 extern void device_scan(void);
223 unsigned long last_valid_pfn;
225 void __init paging_init(void)
227 switch(sparc_cpu_model) {
228 case sun4c:
229 case sun4e:
230 case sun4:
231 sun4c_paging_init();
232 sparc_unmapped_base = 0xe0000000;
233 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
234 break;
235 case sun4m:
236 case sun4d:
237 srmmu_paging_init();
238 sparc_unmapped_base = 0x50000000;
239 BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
240 break;
242 case ap1000:
243 #if CONFIG_AP1000
244 apmmu_paging_init();
245 sparc_unmapped_base = 0x50000000;
246 BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
247 break;
248 #endif
250 default:
251 prom_printf("paging_init: Cannot init paging on this Sparc\n");
252 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
253 prom_printf("paging_init: Halting...\n");
254 prom_halt();
257 /* Initialize the protection map with non-constant, MMU dependent values. */
258 protection_map[0] = PAGE_NONE;
259 protection_map[1] = PAGE_READONLY;
260 protection_map[2] = PAGE_COPY;
261 protection_map[3] = PAGE_COPY;
262 protection_map[4] = PAGE_READONLY;
263 protection_map[5] = PAGE_READONLY;
264 protection_map[6] = PAGE_COPY;
265 protection_map[7] = PAGE_COPY;
266 protection_map[8] = PAGE_NONE;
267 protection_map[9] = PAGE_READONLY;
268 protection_map[10] = PAGE_SHARED;
269 protection_map[11] = PAGE_SHARED;
270 protection_map[12] = PAGE_READONLY;
271 protection_map[13] = PAGE_READONLY;
272 protection_map[14] = PAGE_SHARED;
273 protection_map[15] = PAGE_SHARED;
274 btfixup();
275 device_scan();
278 struct cache_palias *sparc_aliases;
280 static void __init taint_real_pages(void)
282 int i;
284 for (i = 0; sp_banks[i].num_bytes; i++) {
285 unsigned long start, end;
287 start = sp_banks[i].base_addr;
288 end = start + sp_banks[i].num_bytes;
290 while (start < end) {
291 set_bit (start >> 20,
292 sparc_valid_addr_bitmap);
293 start += PAGE_SIZE;
298 void __init free_mem_map_range(struct page *first, struct page *last)
300 first = (struct page *) PAGE_ALIGN((unsigned long)first);
301 last = (struct page *) ((unsigned long)last & PAGE_MASK);
302 #ifdef DEBUG_BOOTMEM
303 prom_printf("[%p,%p] ", first, last);
304 #endif
305 while (first < last) {
306 ClearPageReserved(mem_map + MAP_NR(first));
307 set_page_count(mem_map + MAP_NR(first), 1);
308 free_page((unsigned long)first);
309 totalram_pages++;
310 num_physpages++;
312 first = (struct page *)((unsigned long)first + PAGE_SIZE);
316 /* Walk through holes in sp_banks regions, if the mem_map array
317 * areas representing those holes consume a page or more, free
318 * up such pages. This helps a lot on machines where physical
319 * ram is configured such that it begins at some hugh value.
321 * The sp_banks array is sorted by base address.
323 void __init free_unused_mem_map(void)
325 int i;
327 #ifdef DEBUG_BOOTMEM
328 prom_printf("free_unused_mem_map: ");
329 #endif
330 for (i = 0; sp_banks[i].num_bytes; i++) {
331 if (i == 0) {
332 struct page *first, *last;
334 first = mem_map;
335 last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT];
336 free_mem_map_range(first, last);
337 } else {
338 struct page *first, *last;
339 unsigned long prev_end;
341 prev_end = sp_banks[i-1].base_addr +
342 sp_banks[i-1].num_bytes;
343 prev_end = PAGE_ALIGN(prev_end);
344 first = &mem_map[prev_end >> PAGE_SHIFT];
345 last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT];
347 free_mem_map_range(first, last);
349 if (!sp_banks[i+1].num_bytes) {
350 prev_end = sp_banks[i].base_addr +
351 sp_banks[i].num_bytes;
352 first = &mem_map[prev_end >> PAGE_SHIFT];
353 last = &mem_map[last_valid_pfn];
354 free_mem_map_range(first, last);
358 #ifdef DEBUG_BOOTMEM
359 prom_printf("\n");
360 #endif
363 void __init mem_init(void)
365 int codepages = 0;
366 int datapages = 0;
367 int initpages = 0;
368 int i;
369 unsigned long addr, last;
371 /* Saves us work later. */
372 memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE);
374 i = last_valid_pfn >> (8 + 5);
375 i += 1;
377 sparc_valid_addr_bitmap = (unsigned long *)
378 __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
380 if (sparc_valid_addr_bitmap == NULL) {
381 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
382 prom_halt();
384 memset(sparc_valid_addr_bitmap, 0, i << 2);
386 /* fix this */
387 #ifdef CONFIG_BLK_DEV_INITRD
388 addr = __va(phys_base);
389 last = PAGE_ALIGN((unsigned long)&_end) + phys_base;
390 while(addr < last) {
391 if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end)
392 mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
393 else
394 addr += PAGE_SIZE;
396 #endif
398 taint_real_pages();
400 max_mapnr = last_valid_pfn;
401 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
403 #ifdef DEBUG_BOOTMEM
404 prom_printf("mem_init: Calling free_all_bootmem().\n");
405 #endif
406 num_physpages = totalram_pages = free_all_bootmem();
408 #if 0
409 free_unused_mem_map();
410 #endif
412 codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
413 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
414 datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
415 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
416 initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
417 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
419 printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
420 nr_free_pages() << (PAGE_SHIFT-10),
421 codepages << (PAGE_SHIFT-10),
422 datapages << (PAGE_SHIFT-10),
423 initpages << (PAGE_SHIFT-10),
424 (unsigned long)PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
426 /* NOTE NOTE NOTE NOTE
427 * Please keep track of things and make sure this
428 * always matches the code in mm/page_alloc.c -DaveM
430 i = nr_free_pages() >> 7;
431 if (i < 48)
432 i = 48;
433 if (i > 256)
434 i = 256;
435 freepages.min = i;
436 freepages.low = i << 1;
437 freepages.high = freepages.low + i;
440 void free_initmem (void)
442 unsigned long addr;
444 addr = (unsigned long)(&__init_begin);
445 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
446 unsigned long page;
447 struct page *p;
449 page = addr + phys_base;
450 p = mem_map + MAP_NR(page);
452 ClearPageReserved(p);
453 set_page_count(p, 1);
454 __free_page(p);
455 totalram_pages++;
456 num_physpages++;
460 void si_meminfo(struct sysinfo *val)
462 val->totalram = totalram_pages;
463 val->sharedram = 0;
464 val->freeram = nr_free_pages();
465 val->bufferram = atomic_read(&buffermem_pages);
467 val->totalhigh = 0;
468 val->freehigh = 0;
470 val->mem_unit = PAGE_SIZE;