1 /* $Id: init.c,v 1.69 1999/09/06 22:56:17 ecd Exp $
2 * linux/arch/sparc/mm/init.c
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/swapctl.h>
21 #ifdef CONFIG_BLK_DEV_INITRD
22 #include <linux/blk.h>
24 #include <linux/init.h>
26 #include <asm/system.h>
27 #include <asm/segment.h>
28 #include <asm/vac-ops.h>
30 #include <asm/pgtable.h>
31 #include <asm/vaddrs.h>
33 /* Turn this off if you suspect some place in some physical memory hole
34 might get into page tables (something would be broken very much). */
36 #define FREE_UNUSED_MEM_MAP
38 extern void show_net_buffers(void);
40 unsigned long *sparc_valid_addr_bitmap
;
42 struct sparc_phys_banks sp_banks
[SPARC_PHYS_BANKS
];
43 unsigned long sparc_unmapped_base
;
45 struct pgtable_cache_struct pgt_quicklists
;
47 /* References to section boundaries */
48 extern char __init_begin
, __init_end
, etext
;
51 * BAD_PAGE is the page that is used for page faults when linux
52 * is out-of-memory. Older versions of linux just did a
53 * do_exit(), but using this instead means there is less risk
54 * for a process dying in kernel mode, possibly leaving an inode
57 * BAD_PAGETABLE is the accompanying page-table: it is initialized
58 * to point to BAD_PAGE entries.
60 * ZERO_PAGE is a special page that is used for zero-initialized
63 pte_t
*__bad_pagetable(void)
65 memset((void *) EMPTY_PGT
, 0, PAGE_SIZE
);
66 return (pte_t
*) EMPTY_PGT
;
69 pte_t
__bad_page(void)
71 memset((void *) EMPTY_PGE
, 0, PAGE_SIZE
);
72 return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE
, PAGE_SHARED
));
77 int free
= 0,total
= 0,reserved
= 0;
78 int shared
= 0, cached
= 0;
79 struct page
*page
, *end
;
81 printk("\nMem-info:\n");
83 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
84 for (page
= mem_map
, end
= mem_map
+ max_mapnr
;
87 if (page
->next_hash
< page
)
89 page
= page
->next_hash
;
92 if (PageReserved(page
))
94 else if (PageSwapCache(page
))
96 else if (!atomic_read(&page
->count
))
99 shared
+= atomic_read(&page
->count
) - 1;
101 printk("%d pages of RAM\n",total
);
102 printk("%d free pages\n",free
);
103 printk("%d reserved pages\n",reserved
);
104 printk("%d pages shared\n",shared
);
105 printk("%d pages swap cached\n",cached
);
106 printk("%ld page tables cached\n",pgtable_cache_size
);
107 if (sparc_cpu_model
== sun4m
|| sparc_cpu_model
== sun4d
)
108 printk("%ld page dirs cached\n", pgd_cache_size
);
114 extern pgprot_t protection_map
[16];
116 unsigned long __init
sparc_context_init(unsigned long start_mem
, int numctx
)
120 ctx_list_pool
= (struct ctx_list
*) start_mem
;
121 start_mem
+= (numctx
* sizeof(struct ctx_list
));
122 for(ctx
= 0; ctx
< numctx
; ctx
++) {
123 struct ctx_list
*clist
;
125 clist
= (ctx_list_pool
+ ctx
);
126 clist
->ctx_number
= ctx
;
129 ctx_free
.next
= ctx_free
.prev
= &ctx_free
;
130 ctx_used
.next
= ctx_used
.prev
= &ctx_used
;
131 for(ctx
= 0; ctx
< numctx
; ctx
++)
132 add_to_free_ctxlist(ctx_list_pool
+ ctx
);
137 * paging_init() sets up the page tables: We call the MMU specific
138 * init routine based upon the Sun model type on the Sparc.
141 extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
142 extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
143 extern unsigned long device_scan(unsigned long);
146 paging_init(unsigned long start_mem
, unsigned long end_mem
)
148 switch(sparc_cpu_model
) {
152 start_mem
= sun4c_paging_init(start_mem
, end_mem
);
153 sparc_unmapped_base
= 0xe0000000;
154 BTFIXUPSET_SETHI(sparc_unmapped_base
, 0xe0000000);
158 start_mem
= srmmu_paging_init(start_mem
, end_mem
);
159 sparc_unmapped_base
= 0x50000000;
160 BTFIXUPSET_SETHI(sparc_unmapped_base
, 0x50000000);
165 start_mem
= apmmu_paging_init(start_mem
, end_mem
);
166 sparc_unmapped_base
= 0x50000000;
167 BTFIXUPSET_SETHI(sparc_unmapped_base
, 0x50000000);
172 prom_printf("paging_init: Cannot init paging on this Sparc\n");
173 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model
);
174 prom_printf("paging_init: Halting...\n");
178 /* Initialize the protection map with non-constant, MMU dependent values. */
179 protection_map
[0] = PAGE_NONE
;
180 protection_map
[1] = PAGE_READONLY
;
181 protection_map
[2] = PAGE_COPY
;
182 protection_map
[3] = PAGE_COPY
;
183 protection_map
[4] = PAGE_READONLY
;
184 protection_map
[5] = PAGE_READONLY
;
185 protection_map
[6] = PAGE_COPY
;
186 protection_map
[7] = PAGE_COPY
;
187 protection_map
[8] = PAGE_NONE
;
188 protection_map
[9] = PAGE_READONLY
;
189 protection_map
[10] = PAGE_SHARED
;
190 protection_map
[11] = PAGE_SHARED
;
191 protection_map
[12] = PAGE_READONLY
;
192 protection_map
[13] = PAGE_READONLY
;
193 protection_map
[14] = PAGE_SHARED
;
194 protection_map
[15] = PAGE_SHARED
;
196 return device_scan(start_mem
);
199 struct cache_palias
*sparc_aliases
;
201 extern void srmmu_frob_mem_map(unsigned long);
203 int physmem_mapped_contig __initdata
= 1;
205 static void __init
taint_real_pages(unsigned long start_mem
, unsigned long end_mem
)
207 unsigned long addr
, tmp2
= 0;
209 if(physmem_mapped_contig
) {
210 for(addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
211 if(addr
>= KERNBASE
&& addr
< start_mem
)
213 for(tmp2
=0; sp_banks
[tmp2
].num_bytes
!= 0; tmp2
++) {
214 unsigned long phys_addr
= (addr
- PAGE_OFFSET
);
215 unsigned long base
= sp_banks
[tmp2
].base_addr
;
216 unsigned long limit
= base
+ sp_banks
[tmp2
].num_bytes
;
218 if((phys_addr
>= base
) && (phys_addr
< limit
) &&
219 ((phys_addr
+ PAGE_SIZE
) < limit
)) {
220 mem_map
[MAP_NR(addr
)].flags
&= ~(1<<PG_reserved
);
221 set_bit(MAP_NR(addr
) >> 8, sparc_valid_addr_bitmap
);
226 if((sparc_cpu_model
== sun4m
) || (sparc_cpu_model
== sun4d
)) {
227 srmmu_frob_mem_map(start_mem
);
229 for(addr
= start_mem
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
230 mem_map
[MAP_NR(addr
)].flags
&= ~(1<<PG_reserved
);
231 set_bit(MAP_NR(addr
) >> 8, sparc_valid_addr_bitmap
);
237 void __init
mem_init(unsigned long start_mem
, unsigned long end_mem
)
244 struct page
*page
, *end
;
246 /* Saves us work later. */
247 memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE
);
249 end_mem
&= PAGE_MASK
;
250 max_mapnr
= MAP_NR(end_mem
);
251 high_memory
= (void *) end_mem
;
253 sparc_valid_addr_bitmap
= (unsigned long *)start_mem
;
254 i
= max_mapnr
>> (8 + 5);
256 memset(sparc_valid_addr_bitmap
, 0, i
<< 2);
259 start_mem
= PAGE_ALIGN(start_mem
);
263 while(addr
< start_mem
) {
264 #ifdef CONFIG_BLK_DEV_INITRD
265 if (initrd_below_start_ok
&& addr
>= initrd_start
&& addr
< initrd_end
)
266 mem_map
[MAP_NR(addr
)].flags
&= ~(1<<PG_reserved
);
269 mem_map
[MAP_NR(addr
)].flags
|= (1<<PG_reserved
);
270 set_bit(MAP_NR(addr
) >> 8, sparc_valid_addr_bitmap
);
274 taint_real_pages(start_mem
, end_mem
);
276 #ifdef FREE_UNUSED_MEM_MAP
277 end
= mem_map
+ max_mapnr
;
278 for (page
= mem_map
; page
< end
; page
++) {
279 if (PageSkip(page
)) {
280 unsigned long low
, high
;
282 /* See srmmu_frob_mem_map() for why this is done. -DaveM */
285 low
= PAGE_ALIGN((unsigned long)(page
+1));
286 if (page
->next_hash
< page
)
287 high
= ((unsigned long)end
) & PAGE_MASK
;
289 high
= ((unsigned long)page
->next_hash
) & PAGE_MASK
;
291 mem_map
[MAP_NR(low
)].flags
&= ~(1<<PG_reserved
);
298 for (addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
299 if (PageSkip(mem_map
+ MAP_NR(addr
))) {
300 unsigned long next
= mem_map
[MAP_NR(addr
)].next_hash
- mem_map
;
302 next
= (next
<< PAGE_SHIFT
) + PAGE_OFFSET
;
303 if (next
< addr
|| next
>= end_mem
)
308 if(PageReserved(mem_map
+ MAP_NR(addr
))) {
309 if ((addr
< (unsigned long) &etext
) && (addr
>= KERNBASE
))
311 else if((addr
>= (unsigned long)&__init_begin
&& addr
< (unsigned long)&__init_end
))
313 else if((addr
< start_mem
) && (addr
>= KERNBASE
))
317 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
318 #ifdef CONFIG_BLK_DEV_INITRD
320 (addr
< initrd_start
|| addr
>= initrd_end
))
325 printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
326 nr_free_pages
<< (PAGE_SHIFT
-10),
327 codepages
<< (PAGE_SHIFT
-10),
328 datapages
<< (PAGE_SHIFT
-10),
329 initpages
<< (PAGE_SHIFT
-10),
330 (unsigned long)PAGE_OFFSET
, end_mem
);
332 /* NOTE NOTE NOTE NOTE
333 * Please keep track of things and make sure this
334 * always matches the code in mm/page_alloc.c -DaveM
336 i
= nr_free_pages
>> 7;
342 freepages
.low
= i
<< 1;
343 freepages
.high
= freepages
.low
+ i
;
346 void free_initmem (void)
350 addr
= (unsigned long)(&__init_begin
);
351 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
352 mem_map
[MAP_NR(addr
)].flags
&= ~(1 << PG_reserved
);
353 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
358 void si_meminfo(struct sysinfo
*val
)
360 struct page
*page
, *end
;
364 val
->freeram
= nr_free_pages
<< PAGE_SHIFT
;
365 val
->bufferram
= atomic_read(&buffermem
);
366 for (page
= mem_map
, end
= mem_map
+ max_mapnr
;
367 page
< end
; page
++) {
368 if (PageSkip(page
)) {
369 if (page
->next_hash
< page
)
371 page
= page
->next_hash
;
373 if (PageReserved(page
))
376 if (!atomic_read(&page
->count
))
378 val
->sharedram
+= atomic_read(&page
->count
) - 1;
380 val
->totalram
<<= PAGE_SHIFT
;
381 val
->sharedram
<<= PAGE_SHIFT
;