1 /* $Id: init.c,v 1.13 1999/05/01 22:40:40 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 1994 - 1998 by Ralf Baechle
9 #include <linux/config.h>
10 #include <linux/init.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/pagemap.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
21 #include <linux/swap.h>
22 #include <linux/swapctl.h>
23 #ifdef CONFIG_BLK_DEV_INITRD
24 #include <linux/blk.h>
27 #include <asm/bootinfo.h>
28 #include <asm/cachectl.h>
30 #include <asm/jazzdma.h>
31 #include <asm/system.h>
32 #include <asm/pgtable.h>
34 #include <asm/sgialib.h>
36 #include <asm/mmu_context.h>
38 extern void show_net_buffers(void);
40 void __bad_pte_kernel(pmd_t
*pmd
)
42 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd
));
43 pmd_val(*pmd
) = BAD_PAGETABLE
;
46 void __bad_pte(pmd_t
*pmd
)
48 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
49 pmd_val(*pmd
) = BAD_PAGETABLE
;
52 pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long offset
)
56 page
= (pte_t
*) __get_free_page(GFP_USER
);
59 clear_page((unsigned long)page
);
60 pmd_val(*pmd
) = (unsigned long)page
;
63 pmd_val(*pmd
) = BAD_PAGETABLE
;
66 free_page((unsigned long)page
);
68 __bad_pte_kernel(pmd
);
71 return (pte_t
*) pmd_page(*pmd
) + offset
;
74 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
78 page
= (pte_t
*) __get_free_page(GFP_KERNEL
);
81 clear_page((unsigned long)page
);
82 pmd_val(*pmd
) = (unsigned long)page
;
85 pmd_val(*pmd
) = BAD_PAGETABLE
;
88 free_page((unsigned long)page
);
93 return (pte_t
*) pmd_page(*pmd
) + offset
;
97 asmlinkage
int sys_cacheflush(void *addr
, int bytes
, int cache
)
99 /* XXX Just get it working for now... */
105 * We have upto 8 empty zeroed pages so we can map one of the right colour
106 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
107 * where we have to avoid VCED / VECI exceptions for good performance at
108 * any price. Since page is never written to after the initialization we
109 * don't have to care about aliases on other CPUs.
111 unsigned long empty_zero_page
, zero_page_mask
;
113 static inline unsigned long setup_zero_pages(void)
115 unsigned long order
, size
, pg
;
117 switch (mips_cputype
) {
128 empty_zero_page
= __get_free_pages(GFP_KERNEL
, order
);
129 if (!empty_zero_page
)
130 panic("Oh boy, that early out of memory?");
132 pg
= MAP_NR(empty_zero_page
);
133 while(pg
< MAP_NR(empty_zero_page
) + (1 << order
)) {
134 set_bit(PG_reserved
, &mem_map
[pg
].flags
);
135 atomic_set(&mem_map
[pg
].count
, 0);
139 size
= PAGE_SIZE
<< order
;
140 zero_page_mask
= (size
- 1) & PAGE_MASK
;
141 memset((void *)empty_zero_page
, 0, size
);
146 int do_check_pgt_cache(int low
, int high
)
150 if(pgtable_cache_size
> high
) {
153 free_pgd_slow(get_pgd_fast()), freed
++;
155 free_pmd_slow(get_pmd_fast()), freed
++;
157 free_pte_slow(get_pte_fast()), freed
++;
158 } while(pgtable_cache_size
> low
);
164 * BAD_PAGE is the page that is used for page faults when linux
165 * is out-of-memory. Older versions of linux just did a
166 * do_exit(), but using this instead means there is less risk
167 * for a process dying in kernel mode, possibly leaving a inode
170 * BAD_PAGETABLE is the accompanying page-table: it is initialized
171 * to point to BAD_PAGE entries.
173 * ZERO_PAGE is a special page that is used for zero-initialized
176 pte_t
* __bad_pagetable(void)
178 extern char empty_bad_page_table
[PAGE_SIZE
];
180 unsigned long dummy1
, dummy2
;
181 #if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
182 unsigned long dummy3
;
185 page
= (unsigned long) empty_bad_page_table
;
187 * As long as we only save the low 32 bit of the 64 bit wide
188 * R4000 registers on interrupt we cannot use 64 bit memory accesses
189 * to the main memory.
191 #if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
193 * Use 64bit code even for Linux/MIPS 32bit on R4000
195 __asm__
__volatile__(
199 "dsll32\t$1,%2,0\n\t"
200 "dsrl32\t%2,$1,0\n\t"
202 "1:\tsd\t%2,(%0)\n\t"
214 "2" (pte_val(BAD_PAGE
)));
215 #else /* (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) */
216 __asm__
__volatile__(
218 "1:\tsw\t%2,(%0)\n\t"
225 :"r" (pte_val(BAD_PAGE
)),
230 return (pte_t
*)page
;
233 pte_t
__bad_page(void)
235 extern char empty_bad_page
[PAGE_SIZE
];
236 unsigned long page
= (unsigned long)empty_bad_page
;
239 return pte_mkdirty(mk_pte(page
, PAGE_SHARED
));
244 int i
, free
= 0, total
= 0, reserved
= 0;
245 int shared
= 0, cached
= 0;
247 printk("Mem-info:\n");
249 printk("Free swap: %6dkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
253 if (PageReserved(mem_map
+i
))
255 else if (PageSwapCache(mem_map
+i
))
257 else if (!atomic_read(&mem_map
[i
].count
))
260 shared
+= atomic_read(&mem_map
[i
].count
) - 1;
262 printk("%d pages of RAM\n", total
);
263 printk("%d reserved pages\n", reserved
);
264 printk("%d pages shared\n", shared
);
265 printk("%d pages swap cached\n",cached
);
266 printk("%ld pages in page table cache\n",pgtable_cache_size
);
267 printk("%d free pages\n", free
);
274 extern unsigned long free_area_init(unsigned long, unsigned long);
276 __initfunc(unsigned long paging_init(unsigned long start_mem
, unsigned long end_mem
))
278 /* Initialize the entire pgd. */
279 pgd_init((unsigned long)swapper_pg_dir
);
280 pgd_init((unsigned long)swapper_pg_dir
+ PAGE_SIZE
/ 2);
281 return free_area_init(start_mem
, end_mem
);
284 __initfunc(void mem_init(unsigned long start_mem
, unsigned long end_mem
))
289 extern int _etext
, _ftext
;
291 #ifdef CONFIG_MIPS_JAZZ
292 if (mips_machgroup
== MACH_GROUP_JAZZ
)
293 start_mem
= vdma_init(start_mem
, end_mem
);
296 end_mem
&= PAGE_MASK
;
297 max_mapnr
= MAP_NR(end_mem
);
298 high_memory
= (void *)end_mem
;
301 /* mark usable pages in the mem_map[] */
302 start_mem
= PAGE_ALIGN(start_mem
);
304 for(tmp
= MAP_NR(start_mem
);tmp
< max_mapnr
;tmp
++)
305 clear_bit(PG_reserved
, &mem_map
[tmp
].flags
);
307 prom_fixup_mem_map(start_mem
, (unsigned long)high_memory
);
309 for (tmp
= PAGE_OFFSET
; tmp
< end_mem
; tmp
+= PAGE_SIZE
) {
311 * This is only for PC-style DMA. The onboard DMA
312 * of Jazz and Tyne machines is completely different and
313 * not handled via a flag in mem_map_t.
315 if (tmp
>= MAX_DMA_ADDRESS
)
316 clear_bit(PG_DMA
, &mem_map
[MAP_NR(tmp
)].flags
);
317 if (PageReserved(mem_map
+MAP_NR(tmp
))) {
318 if ((tmp
< (unsigned long) &_etext
) &&
319 (tmp
>= (unsigned long) &_ftext
))
321 else if ((tmp
< start_mem
) &&
322 (tmp
> (unsigned long) &_etext
))
327 atomic_set(&mem_map
[MAP_NR(tmp
)].count
, 1);
328 #ifdef CONFIG_BLK_DEV_INITRD
329 if (!initrd_start
|| (tmp
< initrd_start
|| tmp
>=
334 tmp
= nr_free_pages
<< PAGE_SHIFT
;
336 /* Setup zeroed pages. */
337 tmp
-= setup_zero_pages();
339 printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
341 max_mapnr
<< (PAGE_SHIFT
-10),
342 codepages
<< (PAGE_SHIFT
-10),
343 datapages
<< (PAGE_SHIFT
-10));
346 extern char __init_begin
, __init_end
;
348 void free_initmem(void)
352 prom_free_prom_memory ();
354 addr
= (unsigned long)(&__init_begin
);
355 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
356 mem_map
[MAP_NR(addr
)].flags
&= ~(1 << PG_reserved
);
357 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
360 printk("Freeing unused kernel memory: %dk freed\n",
361 (&__init_end
- &__init_begin
) >> 10);
364 void si_meminfo(struct sysinfo
*val
)
368 i
= MAP_NR(high_memory
);
371 val
->freeram
= nr_free_pages
<< PAGE_SHIFT
;
372 val
->bufferram
= buffermem
;
374 if (PageReserved(mem_map
+i
))
377 if (!atomic_read(&mem_map
[i
].count
))
379 val
->sharedram
+= atomic_read(&mem_map
[i
].count
) - 1;
381 val
->totalram
<<= PAGE_SHIFT
;
382 val
->sharedram
<<= PAGE_SHIFT
;
386 /* Fixup an immediate instruction */
387 __initfunc(static void __i_insn_fixup(unsigned int **start
, unsigned int **stop
,
388 unsigned int i_const
))
390 unsigned int **p
, *ip
;
392 for (p
= start
;p
< stop
; p
++) {
394 *ip
= (*ip
& 0xffff0000) | i_const
;
398 #define i_insn_fixup(section, const) \
400 extern unsigned int *__start_ ## section; \
401 extern unsigned int *__stop_ ## section; \
402 __i_insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
405 /* Caller is assumed to flush the caches before the first context switch. */
406 __initfunc(void __asid_setup(unsigned int inc
, unsigned int mask
,
407 unsigned int version_mask
,
408 unsigned int first_version
))
410 i_insn_fixup(__asid_inc
, inc
);
411 i_insn_fixup(__asid_mask
, mask
);
412 i_insn_fixup(__asid_version_mask
, version_mask
);
413 i_insn_fixup(__asid_first_version
, first_version
);
415 asid_cache
= first_version
;