2 * linux/arch/alpha/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #ifdef CONFIG_BLK_DEV_INITRD
19 #include <linux/blk.h>
22 #include <asm/system.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/hwrpb.h>
28 #define DEBUG_POISON 0
30 extern void die_if_kernel(char *,struct pt_regs
*,long);
31 extern void show_net_buffers(void);
33 struct thread_struct original_pcb
;
36 struct pgtable_cache_struct quicklists
;
42 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd
));
43 pgd_set(pgd
, BAD_PAGETABLE
);
49 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
50 pmd_set(pmd
, (pte_t
*) BAD_PAGETABLE
);
54 get_pmd_slow(pgd_t
*pgd
, unsigned long offset
)
58 pmd
= (pmd_t
*) __get_free_page(GFP_KERNEL
);
61 clear_page((unsigned long)pmd
);
65 pgd_set(pgd
, BAD_PAGETABLE
);
68 free_page((unsigned long)pmd
);
73 return (pmd_t
*) pgd_page(*pgd
) + offset
;
77 get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
81 pte
= (pte_t
*) __get_free_page(GFP_KERNEL
);
84 clear_page((unsigned long)pte
);
88 pmd_set(pmd
, (pte_t
*) BAD_PAGETABLE
);
91 free_page((unsigned long)pte
);
96 return (pte_t
*) pmd_page(*pmd
) + offset
;
99 int do_check_pgt_cache(int low
, int high
)
102 if(pgtable_cache_size
> high
) {
105 free_pgd_slow(get_pgd_fast()), freed
++;
107 free_pmd_slow(get_pmd_fast()), freed
++;
109 free_pte_slow(get_pte_fast()), freed
++;
110 } while(pgtable_cache_size
> low
);
116 * BAD_PAGE is the page that is used for page faults when linux
117 * is out-of-memory. Older versions of linux just did a
118 * do_exit(), but using this instead means there is less risk
119 * for a process dying in kernel mode, possibly leaving an inode
122 * BAD_PAGETABLE is the accompanying page-table: it is initialized
123 * to point to BAD_PAGE entries.
125 * ZERO_PAGE is a special page that is used for zero-initialized
129 __bad_pagetable(void)
131 memset((void *) EMPTY_PGT
, 0, PAGE_SIZE
);
132 return (pmd_t
*) EMPTY_PGT
;
138 memset((void *) EMPTY_PGE
, 0, PAGE_SIZE
);
139 return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE
, PAGE_SHARED
));
145 long i
,free
= 0,total
= 0,reserved
= 0;
146 long shared
= 0, cached
= 0;
148 printk("\nMem-info:\n");
150 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
154 if (PageReserved(mem_map
+i
))
156 else if (PageSwapCache(mem_map
+i
))
158 else if (!atomic_read(&mem_map
[i
].count
))
161 shared
+= atomic_read(&mem_map
[i
].count
) - 1;
163 printk("%ld pages of RAM\n",total
);
164 printk("%ld free pages\n",free
);
165 printk("%ld reserved pages\n",reserved
);
166 printk("%ld pages shared\n",shared
);
167 printk("%ld pages swap cached\n",cached
);
168 printk("%ld pages in page table cache\n",pgtable_cache_size
);
175 extern unsigned long free_area_init(unsigned long, unsigned long);
177 static inline struct thread_struct
*
178 load_PCB(struct thread_struct
* pcb
)
180 register unsigned long sp
__asm__("$30");
182 return __reload_tss(pcb
);
186 * paging_init() sets up the page tables: in the alpha version this actually
187 * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
190 paging_init(unsigned long start_mem
, unsigned long end_mem
)
193 unsigned long newptbr
;
194 struct memclust_struct
* cluster
;
195 struct memdesc_struct
* memdesc
;
196 struct thread_struct
*original_pcb_ptr
;
198 /* initialize mem_map[] */
199 start_mem
= free_area_init(start_mem
, end_mem
);
201 /* find free clusters, update mem_map[] accordingly */
202 memdesc
= (struct memdesc_struct
*)
203 (hwrpb
->mddt_offset
+ (unsigned long) hwrpb
);
204 cluster
= memdesc
->cluster
;
205 for (i
= memdesc
->numclusters
; i
> 0; i
--, cluster
++) {
206 unsigned long pfn
, nr
;
208 /* Bit 0 is console/PALcode reserved. Bit 1 is
209 non-volatile memory -- we might want to mark
211 if (cluster
->usage
& 3)
213 pfn
= cluster
->start_pfn
;
214 nr
= cluster
->numpages
;
217 clear_bit(PG_reserved
, &mem_map
[pfn
++].flags
);
220 /* Initialize the kernel's page tables. Linux puts the vptb in
221 the last slot of the L1 page table. */
222 memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE
);
223 memset(swapper_pg_dir
, 0, PAGE_SIZE
);
224 newptbr
= ((unsigned long) swapper_pg_dir
- PAGE_OFFSET
) >> PAGE_SHIFT
;
225 pgd_val(swapper_pg_dir
[1023]) =
226 (newptbr
<< 32) | pgprot_val(PAGE_KERNEL
);
228 /* Set the vptb. This is often done by the bootloader, but
229 shouldn't be required. */
230 if (hwrpb
->vptb
!= 0xfffffffe00000000) {
231 wrvptptr(0xfffffffe00000000);
232 hwrpb
->vptb
= 0xfffffffe00000000;
233 hwrpb_update_checksum(hwrpb
);
236 /* Also set up the real kernel PCB while we're at it. */
237 init_task
.tss
.ptbr
= newptbr
;
238 init_task
.tss
.pal_flags
= 1; /* set FEN, clear everything else */
239 init_task
.tss
.flags
= 0;
240 original_pcb_ptr
= load_PCB(&init_task
.tss
);
243 /* Save off the contents of the original PCB so that we can
244 restore the original console's page tables for a clean reboot.
246 Note that the PCB is supposed to be a physical address, but
247 since KSEG values also happen to work, folks get confused.
250 if ((unsigned long)original_pcb_ptr
< PAGE_OFFSET
) {
251 original_pcb_ptr
= (struct thread_struct
*)
252 phys_to_virt((unsigned long) original_pcb_ptr
);
254 original_pcb
= *original_pcb_ptr
;
259 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
261 srm_paging_stop (void)
263 /* Move the vptb back to where the SRM console expects it. */
264 swapper_pg_dir
[1] = swapper_pg_dir
[1023];
266 wrvptptr(0x200000000);
267 hwrpb
->vptb
= 0x200000000;
268 hwrpb_update_checksum(hwrpb
);
270 /* Reload the page tables that the console had in use. */
271 load_PCB(&original_pcb
);
278 kill_page(unsigned long pg
)
280 unsigned long *p
= (unsigned long *)pg
;
281 unsigned long i
= PAGE_SIZE
, v
= 0xdeadbeefdeadbeef;
296 #define kill_page(pg)
300 mem_init(unsigned long start_mem
, unsigned long end_mem
)
304 end_mem
&= PAGE_MASK
;
305 max_mapnr
= num_physpages
= MAP_NR(end_mem
);
306 high_memory
= (void *) end_mem
;
307 start_mem
= PAGE_ALIGN(start_mem
);
310 * Mark the pages used by the kernel as reserved.
313 while (tmp
< start_mem
) {
314 set_bit(PG_reserved
, &mem_map
[MAP_NR(tmp
)].flags
);
318 for (tmp
= PAGE_OFFSET
; tmp
< end_mem
; tmp
+= PAGE_SIZE
) {
319 if (tmp
>= MAX_DMA_ADDRESS
)
320 clear_bit(PG_DMA
, &mem_map
[MAP_NR(tmp
)].flags
);
321 if (PageReserved(mem_map
+MAP_NR(tmp
)))
323 atomic_set(&mem_map
[MAP_NR(tmp
)].count
, 1);
324 #ifdef CONFIG_BLK_DEV_INITRD
325 if (initrd_start
&& tmp
>= initrd_start
&& tmp
< initrd_end
)
331 tmp
= nr_free_pages
<< PAGE_SHIFT
;
332 printk("Memory: %luk available\n", tmp
>> 10);
339 extern char __init_begin
, __init_end
;
342 addr
= (unsigned long)(&__init_begin
);
343 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
344 mem_map
[MAP_NR(addr
)].flags
&= ~(1 << PG_reserved
);
345 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
349 printk ("Freeing unused kernel memory: %ldk freed\n",
350 (&__init_end
- &__init_begin
) >> 10);
354 si_meminfo(struct sysinfo
*val
)
361 val
->freeram
= nr_free_pages
<< PAGE_SHIFT
;
362 val
->bufferram
= buffermem
;
364 if (PageReserved(mem_map
+i
))
367 if (!atomic_read(&mem_map
[i
].count
))
369 val
->sharedram
+= atomic_read(&mem_map
[i
].count
) - 1;
371 val
->totalram
<<= PAGE_SHIFT
;
372 val
->sharedram
<<= PAGE_SHIFT
;