2.2.0-final
[davej-history.git] / arch / m68k / mm / init.c
blob8e520702fca79a82c277c6c478e79e000a88791a
1 /*
2 * linux/arch/m68k/mm/init.c
4 * Copyright (C) 1995 Hamish Macdonald
5 */
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/init.h>
16 #ifdef CONFIG_BLK_DEV_RAM
17 #include <linux/blk.h>
18 #endif
20 #include <asm/setup.h>
21 #include <asm/uaccess.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/system.h>
25 #include <asm/machdep.h>
26 #include <asm/io.h>
27 #ifdef CONFIG_ATARI
28 #include <asm/atari_stram.h>
29 #endif
31 #undef DEBUG
33 extern void die_if_kernel(char *,struct pt_regs *,long);
34 extern void show_net_buffers(void);
36 int do_check_pgt_cache(int low, int high)
38 int freed = 0;
39 if(pgtable_cache_size > high) {
40 do {
41 if(pmd_quicklist)
42 freed += free_pmd_slow(get_pmd_fast());
43 if(pte_quicklist)
44 free_pte_slow(get_pte_fast()), freed++;
45 } while(pgtable_cache_size > low);
47 return freed;
51 * BAD_PAGE is the page that is used for page faults when linux
52 * is out-of-memory. Older versions of linux just did a
53 * do_exit(), but using this instead means there is less risk
54 * for a process dying in kernel mode, possibly leaving an inode
55 * unused etc..
57 * BAD_PAGETABLE is the accompanying page-table: it is initialized
58 * to point to BAD_PAGE entries.
60 * ZERO_PAGE is a special page that is used for zero-initialized
61 * data and COW.
63 static unsigned long empty_bad_page_table;
65 pte_t *__bad_pagetable(void)
67 memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
68 return (pte_t *)empty_bad_page_table;
71 static unsigned long empty_bad_page;
73 pte_t __bad_page(void)
75 memset ((void *)empty_bad_page, 0, PAGE_SIZE);
76 return pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED));
79 unsigned long empty_zero_page;
81 void show_mem(void)
83 unsigned long i;
84 int free = 0, total = 0, reserved = 0, nonshared = 0, shared = 0;
85 int cached = 0;
87 printk("\nMem-info:\n");
88 show_free_areas();
89 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
90 i = max_mapnr;
91 while (i-- > 0) {
92 total++;
93 if (PageReserved(mem_map+i))
94 reserved++;
95 else if (PageSwapCache(mem_map+i))
96 cached++;
97 else if (!atomic_read(&mem_map[i].count))
98 free++;
99 else if (atomic_read(&mem_map[i].count) == 1)
100 nonshared++;
101 else
102 shared += atomic_read(&mem_map[i].count) - 1;
104 printk("%d pages of RAM\n",total);
105 printk("%d free pages\n",free);
106 printk("%d reserved pages\n",reserved);
107 printk("%d pages nonshared\n",nonshared);
108 printk("%d pages shared\n",shared);
109 printk("%d pages swap cached\n",cached);
110 printk("%ld pages in page table cache\n",pgtable_cache_size);
111 show_buffers();
112 #ifdef CONFIG_NET
113 show_net_buffers();
114 #endif
117 #ifndef mm_cachebits
119 * Bits to add to page descriptors for "normal" caching mode.
120 * For 68020/030 this is 0.
121 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
123 unsigned long mm_cachebits = 0;
124 #endif
126 static pte_t *__init kernel_page_table(unsigned long *memavailp)
128 pte_t *ptablep;
130 ptablep = (pte_t *)*memavailp;
131 *memavailp += PAGE_SIZE;
133 clear_page((unsigned long)ptablep);
134 flush_page_to_ram((unsigned long) ptablep);
135 flush_tlb_kernel_page((unsigned long) ptablep);
136 nocache_page ((unsigned long)ptablep);
138 return ptablep;
141 static pmd_t *last_pgtable __initdata = NULL;
143 static pmd_t *__init kernel_ptr_table(unsigned long *memavailp)
145 if (!last_pgtable) {
146 unsigned long pmd, last;
147 int i;
149 last = (unsigned long)kernel_pg_dir;
150 for (i = 0; i < PTRS_PER_PGD; i++) {
151 if (!pgd_val(kernel_pg_dir[i]))
152 continue;
153 pmd = pgd_page(kernel_pg_dir[i]);
154 if (pmd > last)
155 last = pmd;
158 last_pgtable = (pmd_t *)last;
159 #ifdef DEBUG
160 printk("kernel_ptr_init: %p\n", last_pgtable);
161 #endif
164 if (((unsigned long)(last_pgtable + PTRS_PER_PMD) & ~PAGE_MASK) == 0) {
165 last_pgtable = (pmd_t *)*memavailp;
166 *memavailp += PAGE_SIZE;
168 clear_page((unsigned long)last_pgtable);
169 flush_page_to_ram((unsigned long)last_pgtable);
170 flush_tlb_kernel_page((unsigned long)last_pgtable);
171 nocache_page((unsigned long)last_pgtable);
172 } else
173 last_pgtable += PTRS_PER_PMD;
175 return last_pgtable;
178 static unsigned long __init
179 map_chunk (unsigned long addr, long size, unsigned long *memavailp)
181 #define PTRTREESIZE (256*1024)
182 #define ROOTTREESIZE (32*1024*1024)
183 static unsigned long virtaddr = 0;
184 unsigned long physaddr;
185 pgd_t *pgd_dir;
186 pmd_t *pmd_dir;
187 pte_t *pte_dir;
189 physaddr = (addr | m68k_supervisor_cachemode |
190 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
191 if (CPU_IS_040_OR_060)
192 physaddr |= _PAGE_GLOBAL040;
194 while (size > 0) {
195 #ifdef DEBUG
196 if (!(virtaddr & (PTRTREESIZE-1)))
197 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
198 virtaddr);
199 #endif
200 pgd_dir = pgd_offset_k(virtaddr);
201 if (virtaddr && CPU_IS_020_OR_030) {
202 if (!(virtaddr & (ROOTTREESIZE-1)) &&
203 size >= ROOTTREESIZE) {
204 #ifdef DEBUG
205 printk ("[very early term]");
206 #endif
207 pgd_val(*pgd_dir) = physaddr;
208 size -= ROOTTREESIZE;
209 virtaddr += ROOTTREESIZE;
210 physaddr += ROOTTREESIZE;
211 continue;
214 if (!pgd_present(*pgd_dir)) {
215 pmd_dir = kernel_ptr_table(memavailp);
216 #ifdef DEBUG
217 printk ("[new pointer %p]", pmd_dir);
218 #endif
219 pgd_set(pgd_dir, pmd_dir);
220 } else
221 pmd_dir = pmd_offset(pgd_dir, virtaddr);
223 if (CPU_IS_020_OR_030) {
224 if (virtaddr) {
225 #ifdef DEBUG
226 printk ("[early term]");
227 #endif
228 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
229 physaddr += PTRTREESIZE;
230 } else {
231 int i;
232 #ifdef DEBUG
233 printk ("[zero map]");
234 #endif
235 pte_dir = (pte_t *)kernel_ptr_table(memavailp);
236 pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
237 _PAGE_TABLE | _PAGE_ACCESSED;
238 pte_val(*pte_dir++) = 0;
239 physaddr += PAGE_SIZE;
240 for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
241 pte_val(*pte_dir++) = physaddr;
243 size -= PTRTREESIZE;
244 virtaddr += PTRTREESIZE;
245 } else {
246 if (!pmd_present(*pmd_dir)) {
247 #ifdef DEBUG
248 printk ("[new table]");
249 #endif
250 pte_dir = kernel_page_table(memavailp);
251 pmd_set(pmd_dir, pte_dir);
253 pte_dir = pte_offset(pmd_dir, virtaddr);
255 if (virtaddr) {
256 if (!pte_present(*pte_dir))
257 pte_val(*pte_dir) = physaddr;
258 } else
259 pte_val(*pte_dir) = 0;
260 size -= PAGE_SIZE;
261 virtaddr += PAGE_SIZE;
262 physaddr += PAGE_SIZE;
266 #ifdef DEBUG
267 printk("\n");
268 #endif
270 return virtaddr;
273 extern unsigned long free_area_init(unsigned long, unsigned long);
274 extern void init_pointer_table(unsigned long ptable);
276 /* References to section boundaries */
278 extern char _text, _etext, _edata, __bss_start, _end;
279 extern char __init_begin, __init_end;
282 * paging_init() continues the virtual memory environment setup which
283 * was begun by the code in arch/head.S.
285 unsigned long __init paging_init(unsigned long start_mem,
286 unsigned long end_mem)
288 int chunk;
289 unsigned long mem_avail = 0;
291 #ifdef DEBUG
293 extern unsigned long availmem;
294 printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
295 kernel_pg_dir, availmem, start_mem, end_mem);
297 #endif
299 /* Fix the cache mode in the page descriptors for the 680[46]0. */
300 if (CPU_IS_040_OR_060) {
301 int i;
302 #ifndef mm_cachebits
303 mm_cachebits = _PAGE_CACHE040;
304 #endif
305 for (i = 0; i < 16; i++)
306 pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
308 /* Fix the PAGE_NONE value. */
309 if (CPU_IS_040_OR_060) {
310 /* On the 680[46]0 we can use the _PAGE_SUPER bit. */
311 pgprot_val(protection_map[0]) |= _PAGE_SUPER;
312 pgprot_val(protection_map[VM_SHARED]) |= _PAGE_SUPER;
313 } else {
314 /* Otherwise we must fake it. */
315 pgprot_val(protection_map[0]) &= ~_PAGE_PRESENT;
316 pgprot_val(protection_map[0]) |= _PAGE_FAKE_SUPER;
317 pgprot_val(protection_map[VM_SHARED]) &= ~_PAGE_PRESENT;
318 pgprot_val(protection_map[VM_SHARED]) |= _PAGE_FAKE_SUPER;
322 * Map the physical memory available into the kernel virtual
323 * address space. It may allocate some memory for page
324 * tables and thus modify availmem.
327 for (chunk = 0; chunk < m68k_num_memory; chunk++) {
328 mem_avail = map_chunk (m68k_memory[chunk].addr,
329 m68k_memory[chunk].size, &start_mem);
333 flush_tlb_all();
334 #ifdef DEBUG
335 printk ("memory available is %ldKB\n", mem_avail >> 10);
336 printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
337 start_mem, end_mem);
338 #endif
341 * initialize the bad page table and bad page to point
342 * to a couple of allocated pages
344 empty_bad_page_table = start_mem;
345 start_mem += PAGE_SIZE;
346 empty_bad_page = start_mem;
347 start_mem += PAGE_SIZE;
348 empty_zero_page = start_mem;
349 start_mem += PAGE_SIZE;
350 memset((void *)empty_zero_page, 0, PAGE_SIZE);
353 * allocate the "swapper" page directory and
354 * record in task 0 (swapper) tss
356 init_mm.pgd = (pgd_t *)kernel_ptr_table(&start_mem);
357 memset (init_mm.pgd, 0, sizeof(pgd_t)*PTRS_PER_PGD);
359 /* setup CPU root pointer for swapper task */
360 task[0]->tss.crp[0] = 0x80000000 | _PAGE_TABLE;
361 task[0]->tss.crp[1] = virt_to_phys(init_mm.pgd);
363 #ifdef DEBUG
364 printk ("task 0 pagedir at %p virt, %#lx phys\n",
365 swapper_pg_dir, task[0]->tss.crp[1]);
366 #endif
368 if (CPU_IS_040_OR_060)
369 asm __volatile__ (".chip 68040\n\t"
370 "movec %0,%%urp\n\t"
371 ".chip 68k"
372 : /* no outputs */
373 : "r" (task[0]->tss.crp[1]));
374 else
375 asm __volatile__ (".chip 68030\n\t"
376 "pmove %0,%%crp\n\t"
377 ".chip 68k"
378 : /* no outputs */
379 : "m" (task[0]->tss.crp[0]));
380 #ifdef DEBUG
381 printk ("set crp\n");
382 #endif
385 * Set up SFC/DFC registers (user data space)
387 set_fs (USER_DS);
389 #ifdef DEBUG
390 printk ("before free_area_init\n");
391 #endif
392 return PAGE_ALIGN(free_area_init(start_mem, end_mem));
395 void __init mem_init(unsigned long start_mem, unsigned long end_mem)
397 int codepages = 0;
398 int datapages = 0;
399 int initpages = 0;
400 unsigned long tmp;
401 int i;
403 end_mem &= PAGE_MASK;
404 high_memory = (void *) end_mem;
405 max_mapnr = num_physpages = MAP_NR(end_mem);
407 tmp = start_mem = PAGE_ALIGN(start_mem);
408 while (tmp < end_mem) {
409 clear_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
410 tmp += PAGE_SIZE;
413 #ifdef CONFIG_ATARI
414 if (MACH_IS_ATARI)
415 atari_stram_reserve_pages( start_mem );
416 #endif
418 for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
419 if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
420 clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
421 if (PageReserved(mem_map+MAP_NR(tmp))) {
422 if (tmp >= (unsigned long)&_text
423 && tmp < (unsigned long)&_edata) {
424 if (tmp < (unsigned long) &_etext)
425 codepages++;
426 else
427 datapages++;
428 } else if (tmp >= (unsigned long) &__init_begin
429 && tmp < (unsigned long) &__init_end)
430 initpages++;
431 else
432 datapages++;
433 continue;
435 atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
436 #ifdef CONFIG_BLK_DEV_INITRD
437 if (!initrd_start ||
438 (tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
439 #endif
440 free_page(tmp);
443 /* insert pointer tables allocated so far into the tablelist */
444 init_pointer_table((unsigned long)kernel_pg_dir);
445 for (i = 0; i < PTRS_PER_PGD; i++) {
446 if (pgd_val(kernel_pg_dir[i]))
447 init_pointer_table(pgd_page(kernel_pg_dir[i]));
450 printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
451 (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
452 max_mapnr << (PAGE_SHIFT-10),
453 codepages << (PAGE_SHIFT-10),
454 datapages << (PAGE_SHIFT-10),
455 initpages << (PAGE_SHIFT-10));
458 void free_initmem(void)
460 unsigned long addr;
462 addr = (unsigned long)&__init_begin;
463 for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
464 mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
465 atomic_set(&mem_map[MAP_NR(addr)].count, 1);
466 free_page(addr);
470 void si_meminfo(struct sysinfo *val)
472 unsigned long i;
474 i = max_mapnr;
475 val->totalram = 0;
476 val->sharedram = 0;
477 val->freeram = nr_free_pages << PAGE_SHIFT;
478 val->bufferram = buffermem;
479 while (i-- > 0) {
480 if (PageReserved(mem_map+i))
481 continue;
482 val->totalram++;
483 if (!atomic_read(&mem_map[i].count))
484 continue;
485 val->sharedram += atomic_read(&mem_map[i].count) - 1;
487 val->totalram <<= PAGE_SHIFT;
488 val->sharedram <<= PAGE_SHIFT;
489 return;