1 /* $Id: init.c,v 1.17 2000-04-08 15:38:54+09 gniibe Exp $
3 * linux/arch/sh/mm/init.c
5 * Copyright (C) 1999 Niibe Yutaka
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/config.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #ifdef CONFIG_BLK_DEV_INITRD
25 #include <linux/blk.h>
27 #include <linux/highmem.h>
28 #include <linux/bootmem.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/mmu_context.h>
39 * Cache of MMU context last used.
41 unsigned long mmu_context_cache
;
43 static unsigned long totalram_pages
;
44 static unsigned long totalhigh_pages
;
46 extern unsigned long init_smp_mappings(unsigned long);
49 * BAD_PAGE is the page that is used for page faults when linux
50 * is out-of-memory. Older versions of linux just did a
51 * do_exit(), but using this instead means there is less risk
52 * for a process dying in kernel mode, possibly leaving an inode
55 * BAD_PAGETABLE is the accompanying page-table: it is initialized
56 * to point to BAD_PAGE entries.
58 * ZERO_PAGE is a special page that is used for zero-initialized
62 unsigned long empty_bad_page
[1024];
63 pte_t empty_bad_pte_table
[PTRS_PER_PTE
];
64 extern unsigned long empty_zero_page
[1024];
66 static pte_t
* get_bad_pte_table(void)
71 v
= pte_mkdirty(mk_pte_phys(__pa(empty_bad_page
), PAGE_SHARED
));
73 for (i
= 0; i
< PAGE_SIZE
/sizeof(pte_t
); i
++)
74 empty_bad_pte_table
[i
] = v
;
76 return empty_bad_pte_table
;
79 void __handle_bad_pmd(pmd_t
*pmd
)
82 set_pmd(pmd
, __pmd(_PAGE_TABLE
+ __pa(get_bad_pte_table())));
85 void __handle_bad_pmd_kernel(pmd_t
*pmd
)
88 set_pmd(pmd
, __pmd(_KERNPG_TABLE
+ __pa(get_bad_pte_table())));
91 pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long offset
)
95 pte
= (pte_t
*) __get_free_page(GFP_KERNEL
);
99 set_pmd(pmd
, __pmd(_KERNPG_TABLE
+ __pa(pte
)));
102 set_pmd(pmd
, __pmd(_KERNPG_TABLE
+ __pa(get_bad_pte_table())));
105 free_page((unsigned long)pte
);
107 __handle_bad_pmd_kernel(pmd
);
110 return (pte_t
*) pmd_page(*pmd
) + offset
;
113 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
117 pte
= (unsigned long) __get_free_page(GFP_KERNEL
);
118 if (pmd_none(*pmd
)) {
120 clear_page((void *)pte
);
121 set_pmd(pmd
, __pmd(_PAGE_TABLE
+ __pa(pte
)));
122 return (pte_t
*)pte
+ offset
;
124 set_pmd(pmd
, __pmd(_PAGE_TABLE
+ __pa(get_bad_pte_table())));
129 __handle_bad_pmd(pmd
);
132 return (pte_t
*) pmd_page(*pmd
) + offset
;
135 int do_check_pgt_cache(int low
, int high
)
138 if (pgtable_cache_size
> high
) {
141 free_pgd_slow(get_pgd_fast()), freed
++;
143 free_pmd_slow(get_pmd_fast()), freed
++;
145 free_pte_slow(get_pte_fast()), freed
++;
146 } while (pgtable_cache_size
> low
);
153 int i
, total
= 0, reserved
= 0;
154 int shared
= 0, cached
= 0;
156 printk("Mem-info:\n");
158 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
162 if (PageReserved(mem_map
+i
))
164 else if (PageSwapCache(mem_map
+i
))
166 else if (page_count(mem_map
+i
))
167 shared
+= page_count(mem_map
+i
) - 1;
169 printk("%d pages of RAM\n",total
);
170 printk("%d reserved pages\n",reserved
);
171 printk("%d pages shared\n",shared
);
172 printk("%d pages swap cached\n",cached
);
173 printk("%ld pages in page table cache\n",pgtable_cache_size
);
177 /* References to section boundaries */
179 extern char _text
, _etext
, _edata
, __bss_start
, _end
;
180 extern char __init_begin
, __init_end
;
182 pgd_t swapper_pg_dir
[1024];
184 /* It'd be good if these lines were in the standard header file. */
185 #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
186 #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
189 * paging_init() sets up the page tables
191 * This routines also unmaps the page at virtual kernel address 0, so
192 * that we can trap those pesky NULL-reference errors in the kernel.
194 void __init
paging_init(void)
199 /* We don't need kernel mapping as hardware support that. */
200 pg_dir
= swapper_pg_dir
;
202 for (i
=0; i
< USER_PTRS_PER_PGD
*2; i
++)
203 pgd_val(pg_dir
[i
]) = 0;
206 ctrl_outl(MMU_CONTROL_INIT
, MMUCR
);
208 /* The manual suggests doing some nops after turning on the MMU */
209 asm volatile("nop;nop;nop;nop;nop;nop;");
211 mmu_context_cache
= MMU_CONTEXT_FIRST_VERSION
;
212 set_asid(mmu_context_cache
& MMU_CONTEXT_ASID_MASK
);
215 unsigned long zones_size
[MAX_NR_ZONES
] = {0, 0, 0};
216 unsigned long max_dma
, low
, start_pfn
;
218 start_pfn
= START_PFN
;
219 max_dma
= virt_to_phys((char *)MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
223 zones_size
[ZONE_DMA
] = low
- start_pfn
;
225 zones_size
[ZONE_DMA
] = max_dma
- start_pfn
;
226 zones_size
[ZONE_NORMAL
] = low
- max_dma
;
228 free_area_init_node(0, 0, 0, zones_size
, __MEMORY_START
, 0);
232 void __init
mem_init(void)
234 int codesize
, reservedpages
, datasize
, initsize
;
237 max_mapnr
= num_physpages
= MAX_LOW_PFN
- START_PFN
;
238 high_memory
= (void *)__va(MAX_LOW_PFN
* PAGE_SIZE
);
240 /* clear the zero-page */
241 memset(empty_zero_page
, 0, PAGE_SIZE
);
242 flush_page_to_ram(virt_to_page(empty_zero_page
));
244 /* this will put all low memory onto the freelists */
245 totalram_pages
+= free_all_bootmem();
247 for (tmp
= 0; tmp
< num_physpages
; tmp
++)
249 * Only count reserved RAM pages
251 if (PageReserved(mem_map
+tmp
))
253 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
254 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
255 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
257 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
258 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
259 max_mapnr
<< (PAGE_SHIFT
-10),
261 reservedpages
<< (PAGE_SHIFT
-10),
266 void free_initmem(void)
270 addr
= (unsigned long)(&__init_begin
);
271 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
272 ClearPageReserved(virt_to_page(addr
));
273 set_page_count(virt_to_page(addr
), 1);
277 printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end
- &__init_begin
) >> 10);
280 #ifdef CONFIG_BLK_DEV_INITRD
281 void free_initrd_mem(unsigned long start
, unsigned long end
)
284 for (p
= start
; p
< end
; p
+= PAGE_SIZE
) {
285 ClearPageReserved(virt_to_page(p
));
286 set_page_count(virt_to_page(p
), 1);
290 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
294 void si_meminfo(struct sysinfo
*val
)
296 val
->totalram
= totalram_pages
;
298 val
->freeram
= nr_free_pages();
299 val
->bufferram
= atomic_read(&buffermem_pages
);
300 val
->totalhigh
= totalhigh_pages
;
301 val
->freehigh
= nr_free_highpages();
302 val
->mem_unit
= PAGE_SIZE
;