Import 2.3.9pre5
[davej-history.git] / arch / alpha / mm / init.c
blob7b70d4a4e16f337d048ca78f2cefd729c0be713d
1 /*
2 * linux/arch/alpha/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18 #ifdef CONFIG_BLK_DEV_INITRD
19 #include <linux/blk.h>
20 #endif
22 #include <asm/system.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/hwrpb.h>
26 #include <asm/dma.h>
28 #define DEBUG_POISON 0
30 extern void die_if_kernel(char *,struct pt_regs *,long);
31 extern void show_net_buffers(void);
33 struct thread_struct original_pcb;
35 #ifndef __SMP__
36 struct pgtable_cache_struct quicklists;
37 #endif
39 void
40 __bad_pmd(pgd_t *pgd)
42 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
43 pgd_set(pgd, BAD_PAGETABLE);
46 void
47 __bad_pte(pmd_t *pmd)
49 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
50 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
53 pmd_t *
54 get_pmd_slow(pgd_t *pgd, unsigned long offset)
56 pmd_t *pmd;
58 pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
59 if (pgd_none(*pgd)) {
60 if (pmd) {
61 clear_page((unsigned long)pmd);
62 pgd_set(pgd, pmd);
63 return pmd + offset;
65 pgd_set(pgd, BAD_PAGETABLE);
66 return NULL;
68 free_page((unsigned long)pmd);
69 if (pgd_bad(*pgd)) {
70 __bad_pmd(pgd);
71 return NULL;
73 return (pmd_t *) pgd_page(*pgd) + offset;
76 pte_t *
77 get_pte_slow(pmd_t *pmd, unsigned long offset)
79 pte_t *pte;
81 pte = (pte_t *) __get_free_page(GFP_KERNEL);
82 if (pmd_none(*pmd)) {
83 if (pte) {
84 clear_page((unsigned long)pte);
85 pmd_set(pmd, pte);
86 return pte + offset;
88 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
89 return NULL;
91 free_page((unsigned long)pte);
92 if (pmd_bad(*pmd)) {
93 __bad_pte(pmd);
94 return NULL;
96 return (pte_t *) pmd_page(*pmd) + offset;
99 int do_check_pgt_cache(int low, int high)
101 int freed = 0;
102 if(pgtable_cache_size > high) {
103 do {
104 if(pgd_quicklist)
105 free_pgd_slow(get_pgd_fast()), freed++;
106 if(pmd_quicklist)
107 free_pmd_slow(get_pmd_fast()), freed++;
108 if(pte_quicklist)
109 free_pte_slow(get_pte_fast()), freed++;
110 } while(pgtable_cache_size > low);
112 return freed;
116 * BAD_PAGE is the page that is used for page faults when linux
117 * is out-of-memory. Older versions of linux just did a
118 * do_exit(), but using this instead means there is less risk
119 * for a process dying in kernel mode, possibly leaving an inode
120 * unused etc..
122 * BAD_PAGETABLE is the accompanying page-table: it is initialized
123 * to point to BAD_PAGE entries.
125 * ZERO_PAGE is a special page that is used for zero-initialized
126 * data and COW.
128 pmd_t *
129 __bad_pagetable(void)
131 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
132 return (pmd_t *) EMPTY_PGT;
135 pte_t
136 __bad_page(void)
138 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
139 return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
142 void
143 show_mem(void)
145 long i,free = 0,total = 0,reserved = 0;
146 long shared = 0, cached = 0;
148 printk("\nMem-info:\n");
149 show_free_areas();
150 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
151 i = max_mapnr;
152 while (i-- > 0) {
153 total++;
154 if (PageReserved(mem_map+i))
155 reserved++;
156 else if (PageSwapCache(mem_map+i))
157 cached++;
158 else if (!atomic_read(&mem_map[i].count))
159 free++;
160 else
161 shared += atomic_read(&mem_map[i].count) - 1;
163 printk("%ld pages of RAM\n",total);
164 printk("%ld free pages\n",free);
165 printk("%ld reserved pages\n",reserved);
166 printk("%ld pages shared\n",shared);
167 printk("%ld pages swap cached\n",cached);
168 printk("%ld pages in page table cache\n",pgtable_cache_size);
169 show_buffers();
170 #ifdef CONFIG_NET
171 show_net_buffers();
172 #endif
175 extern unsigned long free_area_init(unsigned long, unsigned long);
177 static inline struct thread_struct *
178 load_PCB(struct thread_struct * pcb)
180 register unsigned long sp __asm__("$30");
181 pcb->ksp = sp;
182 return __reload_tss(pcb);
186 * paging_init() sets up the page tables: in the alpha version this actually
187 * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
189 unsigned long
190 paging_init(unsigned long start_mem, unsigned long end_mem)
192 int i;
193 unsigned long newptbr;
194 struct memclust_struct * cluster;
195 struct memdesc_struct * memdesc;
196 struct thread_struct *original_pcb_ptr;
198 /* initialize mem_map[] */
199 start_mem = free_area_init(start_mem, end_mem);
201 /* find free clusters, update mem_map[] accordingly */
202 memdesc = (struct memdesc_struct *)
203 (hwrpb->mddt_offset + (unsigned long) hwrpb);
204 cluster = memdesc->cluster;
205 for (i = memdesc->numclusters ; i > 0; i--, cluster++) {
206 unsigned long pfn, nr;
208 /* Bit 0 is console/PALcode reserved. Bit 1 is
209 non-volatile memory -- we might want to mark
210 this for later */
211 if (cluster->usage & 3)
212 continue;
213 pfn = cluster->start_pfn;
214 nr = cluster->numpages;
216 while (nr--)
217 clear_bit(PG_reserved, &mem_map[pfn++].flags);
220 /* Initialize the kernel's page tables. Linux puts the vptb in
221 the last slot of the L1 page table. */
222 memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE);
223 memset(swapper_pg_dir, 0, PAGE_SIZE);
224 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
225 pgd_val(swapper_pg_dir[1023]) =
226 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
228 /* Set the vptb. This is often done by the bootloader, but
229 shouldn't be required. */
230 if (hwrpb->vptb != 0xfffffffe00000000) {
231 wrvptptr(0xfffffffe00000000);
232 hwrpb->vptb = 0xfffffffe00000000;
233 hwrpb_update_checksum(hwrpb);
236 /* Also set up the real kernel PCB while we're at it. */
237 init_task.tss.ptbr = newptbr;
238 init_task.tss.pal_flags = 1; /* set FEN, clear everything else */
239 init_task.tss.flags = 0;
240 original_pcb_ptr = load_PCB(&init_task.tss);
241 tbia();
243 /* Save off the contents of the original PCB so that we can
244 restore the original console's page tables for a clean reboot.
246 Note that the PCB is supposed to be a physical address, but
247 since KSEG values also happen to work, folks get confused.
248 Check this here. */
250 if ((unsigned long)original_pcb_ptr < PAGE_OFFSET) {
251 original_pcb_ptr = (struct thread_struct *)
252 phys_to_virt((unsigned long) original_pcb_ptr);
254 original_pcb = *original_pcb_ptr;
256 return start_mem;
259 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
260 void
261 srm_paging_stop (void)
263 /* Move the vptb back to where the SRM console expects it. */
264 swapper_pg_dir[1] = swapper_pg_dir[1023];
265 tbia();
266 wrvptptr(0x200000000);
267 hwrpb->vptb = 0x200000000;
268 hwrpb_update_checksum(hwrpb);
270 /* Reload the page tables that the console had in use. */
271 load_PCB(&original_pcb);
272 tbia();
274 #endif
276 #if DEBUG_POISON
277 static void
278 kill_page(unsigned long pg)
280 unsigned long *p = (unsigned long *)pg;
281 unsigned long i = PAGE_SIZE, v = 0xdeadbeefdeadbeef;
282 do {
283 p[0] = v;
284 p[1] = v;
285 p[2] = v;
286 p[3] = v;
287 p[4] = v;
288 p[5] = v;
289 p[6] = v;
290 p[7] = v;
291 i -= 64;
292 p += 8;
293 } while (i != 0);
295 #else
296 #define kill_page(pg)
297 #endif
299 void
300 mem_init(unsigned long start_mem, unsigned long end_mem)
302 unsigned long tmp;
304 end_mem &= PAGE_MASK;
305 max_mapnr = num_physpages = MAP_NR(end_mem);
306 high_memory = (void *) end_mem;
307 start_mem = PAGE_ALIGN(start_mem);
310 * Mark the pages used by the kernel as reserved.
312 tmp = KERNEL_START;
313 while (tmp < start_mem) {
314 set_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
315 tmp += PAGE_SIZE;
318 for (tmp = PAGE_OFFSET ; tmp < end_mem ; tmp += PAGE_SIZE) {
319 if (tmp >= MAX_DMA_ADDRESS)
320 clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
321 if (PageReserved(mem_map+MAP_NR(tmp)))
322 continue;
323 atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
324 #ifdef CONFIG_BLK_DEV_INITRD
325 if (initrd_start && tmp >= initrd_start && tmp < initrd_end)
326 continue;
327 #endif
328 kill_page(tmp);
329 free_page(tmp);
331 tmp = nr_free_pages << PAGE_SHIFT;
332 printk("Memory: %luk available\n", tmp >> 10);
333 return;
336 void
337 free_initmem (void)
339 extern char __init_begin, __init_end;
340 unsigned long addr;
342 addr = (unsigned long)(&__init_begin);
343 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
344 mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
345 atomic_set(&mem_map[MAP_NR(addr)].count, 1);
346 kill_page(addr);
347 free_page(addr);
349 printk ("Freeing unused kernel memory: %ldk freed\n",
350 (&__init_end - &__init_begin) >> 10);
353 void
354 si_meminfo(struct sysinfo *val)
356 int i;
358 i = max_mapnr;
359 val->totalram = 0;
360 val->sharedram = 0;
361 val->freeram = nr_free_pages << PAGE_SHIFT;
362 val->bufferram = buffermem;
363 while (i-- > 0) {
364 if (PageReserved(mem_map+i))
365 continue;
366 val->totalram++;
367 if (!atomic_read(&mem_map[i].count))
368 continue;
369 val->sharedram += atomic_read(&mem_map[i].count) - 1;
371 val->totalram <<= PAGE_SHIFT;
372 val->sharedram <<= PAGE_SHIFT;
373 return;