Import 2.3.9pre5
[davej-history.git] / arch / mips / mm / init.c
blob1e8bd25ff6a416ee2d8fd970b01e959a6efb137c
1 /* $Id: init.c,v 1.13 1999/05/01 22:40:40 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
7 * Copyright (C) 1994 - 1998 by Ralf Baechle
8 */
9 #include <linux/config.h>
10 #include <linux/init.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/pagemap.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/swapctl.h>
23 #ifdef CONFIG_BLK_DEV_INITRD
24 #include <linux/blk.h>
25 #endif
27 #include <asm/bootinfo.h>
28 #include <asm/cachectl.h>
29 #include <asm/dma.h>
30 #include <asm/jazzdma.h>
31 #include <asm/system.h>
32 #include <asm/pgtable.h>
33 #ifdef CONFIG_SGI
34 #include <asm/sgialib.h>
35 #endif
36 #include <asm/mmu_context.h>
38 extern void show_net_buffers(void);
40 void __bad_pte_kernel(pmd_t *pmd)
42 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
43 pmd_val(*pmd) = BAD_PAGETABLE;
46 void __bad_pte(pmd_t *pmd)
48 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
49 pmd_val(*pmd) = BAD_PAGETABLE;
52 pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
54 pte_t *page;
56 page = (pte_t *) __get_free_page(GFP_USER);
57 if (pmd_none(*pmd)) {
58 if (page) {
59 clear_page((unsigned long)page);
60 pmd_val(*pmd) = (unsigned long)page;
61 return page + offset;
63 pmd_val(*pmd) = BAD_PAGETABLE;
64 return NULL;
66 free_page((unsigned long)page);
67 if (pmd_bad(*pmd)) {
68 __bad_pte_kernel(pmd);
69 return NULL;
71 return (pte_t *) pmd_page(*pmd) + offset;
74 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
76 pte_t *page;
78 page = (pte_t *) __get_free_page(GFP_KERNEL);
79 if (pmd_none(*pmd)) {
80 if (page) {
81 clear_page((unsigned long)page);
82 pmd_val(*pmd) = (unsigned long)page;
83 return page + offset;
85 pmd_val(*pmd) = BAD_PAGETABLE;
86 return NULL;
88 free_page((unsigned long)page);
89 if (pmd_bad(*pmd)) {
90 __bad_pte(pmd);
91 return NULL;
93 return (pte_t *) pmd_page(*pmd) + offset;
97 asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
99 /* XXX Just get it working for now... */
100 flush_cache_all();
101 return 0;
105 * We have upto 8 empty zeroed pages so we can map one of the right colour
106 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
107 * where we have to avoid VCED / VECI exceptions for good performance at
108 * any price. Since page is never written to after the initialization we
109 * don't have to care about aliases on other CPUs.
111 unsigned long empty_zero_page, zero_page_mask;
113 static inline unsigned long setup_zero_pages(void)
115 unsigned long order, size, pg;
117 switch (mips_cputype) {
118 case CPU_R4000SC:
119 case CPU_R4000MC:
120 case CPU_R4400SC:
121 case CPU_R4400MC:
122 order = 3;
123 break;
124 default:
125 order = 0;
128 empty_zero_page = __get_free_pages(GFP_KERNEL, order);
129 if (!empty_zero_page)
130 panic("Oh boy, that early out of memory?");
132 pg = MAP_NR(empty_zero_page);
133 while(pg < MAP_NR(empty_zero_page) + (1 << order)) {
134 set_bit(PG_reserved, &mem_map[pg].flags);
135 atomic_set(&mem_map[pg].count, 0);
136 pg++;
139 size = PAGE_SIZE << order;
140 zero_page_mask = (size - 1) & PAGE_MASK;
141 memset((void *)empty_zero_page, 0, size);
143 return size;
146 int do_check_pgt_cache(int low, int high)
148 int freed = 0;
150 if(pgtable_cache_size > high) {
151 do {
152 if(pgd_quicklist)
153 free_pgd_slow(get_pgd_fast()), freed++;
154 if(pmd_quicklist)
155 free_pmd_slow(get_pmd_fast()), freed++;
156 if(pte_quicklist)
157 free_pte_slow(get_pte_fast()), freed++;
158 } while(pgtable_cache_size > low);
160 return freed;
164 * BAD_PAGE is the page that is used for page faults when linux
165 * is out-of-memory. Older versions of linux just did a
166 * do_exit(), but using this instead means there is less risk
167 * for a process dying in kernel mode, possibly leaving a inode
168 * unused etc..
170 * BAD_PAGETABLE is the accompanying page-table: it is initialized
171 * to point to BAD_PAGE entries.
173 * ZERO_PAGE is a special page that is used for zero-initialized
174 * data and COW.
176 pte_t * __bad_pagetable(void)
178 extern char empty_bad_page_table[PAGE_SIZE];
179 unsigned long page;
180 unsigned long dummy1, dummy2;
181 #if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
182 unsigned long dummy3;
183 #endif
185 page = (unsigned long) empty_bad_page_table;
187 * As long as we only save the low 32 bit of the 64 bit wide
188 * R4000 registers on interrupt we cannot use 64 bit memory accesses
189 * to the main memory.
191 #if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
193 * Use 64bit code even for Linux/MIPS 32bit on R4000
195 __asm__ __volatile__(
196 ".set\tnoreorder\n"
197 ".set\tnoat\n\t"
198 ".set\tmips3\n\t"
199 "dsll32\t$1,%2,0\n\t"
200 "dsrl32\t%2,$1,0\n\t"
201 "or\t%2,$1\n"
202 "1:\tsd\t%2,(%0)\n\t"
203 "subu\t%1,1\n\t"
204 "bnez\t%1,1b\n\t"
205 "addiu\t%0,8\n\t"
206 ".set\tmips0\n\t"
207 ".set\tat\n"
208 ".set\treorder"
209 :"=r" (dummy1),
210 "=r" (dummy2),
211 "=r" (dummy3)
212 :"0" (page),
213 "1" (PAGE_SIZE/8),
214 "2" (pte_val(BAD_PAGE)));
215 #else /* (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) */
216 __asm__ __volatile__(
217 ".set\tnoreorder\n"
218 "1:\tsw\t%2,(%0)\n\t"
219 "subu\t%1,1\n\t"
220 "bnez\t%1,1b\n\t"
221 "addiu\t%0,4\n\t"
222 ".set\treorder"
223 :"=r" (dummy1),
224 "=r" (dummy2)
225 :"r" (pte_val(BAD_PAGE)),
226 "0" (page),
227 "1" (PAGE_SIZE/4));
228 #endif
230 return (pte_t *)page;
233 pte_t __bad_page(void)
235 extern char empty_bad_page[PAGE_SIZE];
236 unsigned long page = (unsigned long)empty_bad_page;
238 clear_page(page);
239 return pte_mkdirty(mk_pte(page, PAGE_SHARED));
242 void show_mem(void)
244 int i, free = 0, total = 0, reserved = 0;
245 int shared = 0, cached = 0;
247 printk("Mem-info:\n");
248 show_free_areas();
249 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
250 i = max_mapnr;
251 while (i-- > 0) {
252 total++;
253 if (PageReserved(mem_map+i))
254 reserved++;
255 else if (PageSwapCache(mem_map+i))
256 cached++;
257 else if (!atomic_read(&mem_map[i].count))
258 free++;
259 else
260 shared += atomic_read(&mem_map[i].count) - 1;
262 printk("%d pages of RAM\n", total);
263 printk("%d reserved pages\n", reserved);
264 printk("%d pages shared\n", shared);
265 printk("%d pages swap cached\n",cached);
266 printk("%ld pages in page table cache\n",pgtable_cache_size);
267 printk("%d free pages\n", free);
268 show_buffers();
269 #ifdef CONFIG_NET
270 show_net_buffers();
271 #endif
274 extern unsigned long free_area_init(unsigned long, unsigned long);
276 __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_mem))
278 /* Initialize the entire pgd. */
279 pgd_init((unsigned long)swapper_pg_dir);
280 pgd_init((unsigned long)swapper_pg_dir + PAGE_SIZE / 2);
281 return free_area_init(start_mem, end_mem);
284 __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
286 int codepages = 0;
287 int datapages = 0;
288 unsigned long tmp;
289 extern int _etext, _ftext;
291 #ifdef CONFIG_MIPS_JAZZ
292 if (mips_machgroup == MACH_GROUP_JAZZ)
293 start_mem = vdma_init(start_mem, end_mem);
294 #endif
296 end_mem &= PAGE_MASK;
297 max_mapnr = MAP_NR(end_mem);
298 high_memory = (void *)end_mem;
299 num_physpages = 0;
301 /* mark usable pages in the mem_map[] */
302 start_mem = PAGE_ALIGN(start_mem);
304 for(tmp = MAP_NR(start_mem);tmp < max_mapnr;tmp++)
305 clear_bit(PG_reserved, &mem_map[tmp].flags);
307 prom_fixup_mem_map(start_mem, (unsigned long)high_memory);
309 for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) {
311 * This is only for PC-style DMA. The onboard DMA
312 * of Jazz and Tyne machines is completely different and
313 * not handled via a flag in mem_map_t.
315 if (tmp >= MAX_DMA_ADDRESS)
316 clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
317 if (PageReserved(mem_map+MAP_NR(tmp))) {
318 if ((tmp < (unsigned long) &_etext) &&
319 (tmp >= (unsigned long) &_ftext))
320 codepages++;
321 else if ((tmp < start_mem) &&
322 (tmp > (unsigned long) &_etext))
323 datapages++;
324 continue;
326 num_physpages++;
327 atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
328 #ifdef CONFIG_BLK_DEV_INITRD
329 if (!initrd_start || (tmp < initrd_start || tmp >=
330 initrd_end))
331 #endif
332 free_page(tmp);
334 tmp = nr_free_pages << PAGE_SHIFT;
336 /* Setup zeroed pages. */
337 tmp -= setup_zero_pages();
339 printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
340 tmp >> 10,
341 max_mapnr << (PAGE_SHIFT-10),
342 codepages << (PAGE_SHIFT-10),
343 datapages << (PAGE_SHIFT-10));
346 extern char __init_begin, __init_end;
348 void free_initmem(void)
350 unsigned long addr;
352 prom_free_prom_memory ();
354 addr = (unsigned long)(&__init_begin);
355 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
356 mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
357 atomic_set(&mem_map[MAP_NR(addr)].count, 1);
358 free_page(addr);
360 printk("Freeing unused kernel memory: %dk freed\n",
361 (&__init_end - &__init_begin) >> 10);
364 void si_meminfo(struct sysinfo *val)
366 int i;
368 i = MAP_NR(high_memory);
369 val->totalram = 0;
370 val->sharedram = 0;
371 val->freeram = nr_free_pages << PAGE_SHIFT;
372 val->bufferram = buffermem;
373 while (i-- > 0) {
374 if (PageReserved(mem_map+i))
375 continue;
376 val->totalram++;
377 if (!atomic_read(&mem_map[i].count))
378 continue;
379 val->sharedram += atomic_read(&mem_map[i].count) - 1;
381 val->totalram <<= PAGE_SHIFT;
382 val->sharedram <<= PAGE_SHIFT;
383 return;
386 /* Fixup an immediate instruction */
387 __initfunc(static void __i_insn_fixup(unsigned int **start, unsigned int **stop,
388 unsigned int i_const))
390 unsigned int **p, *ip;
392 for (p = start;p < stop; p++) {
393 ip = *p;
394 *ip = (*ip & 0xffff0000) | i_const;
398 #define i_insn_fixup(section, const) \
399 do { \
400 extern unsigned int *__start_ ## section; \
401 extern unsigned int *__stop_ ## section; \
402 __i_insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
403 } while(0)
405 /* Caller is assumed to flush the caches before the first context switch. */
406 __initfunc(void __asid_setup(unsigned int inc, unsigned int mask,
407 unsigned int version_mask,
408 unsigned int first_version))
410 i_insn_fixup(__asid_inc, inc);
411 i_insn_fixup(__asid_mask, mask);
412 i_insn_fixup(__asid_version_mask, version_mask);
413 i_insn_fixup(__asid_first_version, first_version);
415 asid_cache = first_version;