Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / arch / sparc64 / mm / init.c
blob2890927567f6bbce2cf633941141d871871eeb5f
1 /* $Id: init.c,v 1.161 2000/12/09 20:16:58 davem Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/mm.h>
15 #include <linux/malloc.h>
16 #include <linux/blk.h>
17 #include <linux/swap.h>
18 #include <linux/swapctl.h>
20 #include <asm/head.h>
21 #include <asm/system.h>
22 #include <asm/page.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
25 #include <asm/oplib.h>
26 #include <asm/iommu.h>
27 #include <asm/io.h>
28 #include <asm/uaccess.h>
29 #include <asm/mmu_context.h>
30 #include <asm/vaddrs.h>
31 #include <asm/dma.h>
32 #include <asm/starfire.h>
34 extern void device_scan(void);
36 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
38 unsigned long *sparc64_valid_addr_bitmap;
40 /* Ugly, but necessary... -DaveM */
41 unsigned long phys_base;
43 /* get_new_mmu_context() uses "cache + 1". */
44 spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
45 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
46 #define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
47 unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
49 /* References to section boundaries */
50 extern char __init_begin, __init_end, _start, _end, etext, edata;
52 /* Initial ramdisk setup */
53 extern unsigned int sparc_ramdisk_image;
54 extern unsigned int sparc_ramdisk_size;
56 int do_check_pgt_cache(int low, int high)
58 int freed = 0;
60 if(pgtable_cache_size > high) {
61 do {
62 #ifdef CONFIG_SMP
63 if(pgd_quicklist)
64 free_pgd_slow(get_pgd_fast()), freed++;
65 #endif
66 if(pte_quicklist[0])
67 free_pte_slow(get_pte_fast(0)), freed++;
68 if(pte_quicklist[1])
69 free_pte_slow(get_pte_fast(1)), freed++;
70 } while(pgtable_cache_size > low);
72 #ifndef CONFIG_SMP
73 if (pgd_cache_size > high / 4) {
74 struct page *page, *page2;
75 for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
76 if ((unsigned long)page->pprev_hash == 3) {
77 if (page2)
78 page2->next_hash = page->next_hash;
79 else
80 (struct page *)pgd_quicklist = page->next_hash;
81 page->next_hash = NULL;
82 page->pprev_hash = NULL;
83 pgd_cache_size -= 2;
84 __free_page(page);
85 freed++;
86 if (page2)
87 page = page2->next_hash;
88 else
89 page = (struct page *)pgd_quicklist;
90 if (pgd_cache_size <= low / 4)
91 break;
92 continue;
94 page2 = page;
95 page = page->next_hash;
98 #endif
99 return freed;
102 extern void __update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
104 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
106 struct page *page = pte_page(pte);
108 if (VALID_PAGE(page) && page->mapping &&
109 test_bit(PG_dcache_dirty, &page->flags)) {
110 __flush_dcache_page(page->virtual, 1);
111 clear_bit(PG_dcache_dirty, &page->flags);
113 __update_mmu_cache(vma, address, pte);
116 /* In arch/sparc64/mm/ultra.S */
117 extern void __flush_icache_page(unsigned long);
119 void flush_icache_range(unsigned long start, unsigned long end)
121 unsigned long kaddr;
123 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
124 __flush_icache_page(__get_phys(kaddr));
128 * BAD_PAGE is the page that is used for page faults when linux
129 * is out-of-memory. Older versions of linux just did a
130 * do_exit(), but using this instead means there is less risk
131 * for a process dying in kernel mode, possibly leaving an inode
132 * unused etc..
134 * BAD_PAGETABLE is the accompanying page-table: it is initialized
135 * to point to BAD_PAGE entries.
137 * ZERO_PAGE is a special page that is used for zero-initialized
138 * data and COW.
140 pte_t __bad_page(void)
142 memset((void *) &empty_bad_page, 0, PAGE_SIZE);
143 return pte_mkdirty(mk_pte_phys((((unsigned long) &empty_bad_page)
144 - ((unsigned long)&empty_zero_page)
145 + phys_base),
146 PAGE_SHARED));
149 void show_mem(void)
151 printk("Mem-info:\n");
152 show_free_areas();
153 printk("Free swap: %6dkB\n",
154 nr_swap_pages << (PAGE_SHIFT-10));
155 printk("%ld pages of RAM\n", num_physpages);
156 printk("%d free pages\n", nr_free_pages());
157 printk("%d pages in page table cache\n",pgtable_cache_size);
158 #ifndef CONFIG_SMP
159 printk("%d entries in page dir cache\n",pgd_cache_size);
160 #endif
161 show_buffers();
164 int mmu_info(char *buf)
166 /* We'll do the rest later to make it nice... -DaveM */
167 #if 0
168 if (this_is_cheetah)
169 sprintf(buf, "MMU Type\t: One bad ass cpu\n");
170 else
171 #endif
172 return sprintf(buf, "MMU Type\t: Spitfire\n");
175 struct linux_prom_translation {
176 unsigned long virt;
177 unsigned long size;
178 unsigned long data;
181 extern unsigned long prom_boot_page;
182 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
183 extern int prom_get_mmu_ihandle(void);
184 extern void register_prom_callbacks(void);
186 /* Exported for SMP bootup purposes. */
187 unsigned long kern_locked_tte_data;
189 void __init early_pgtable_allocfail(char *type)
191 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
192 prom_halt();
195 static void inherit_prom_mappings(void)
197 struct linux_prom_translation *trans;
198 unsigned long phys_page, tte_vaddr, tte_data;
199 void (*remap_func)(unsigned long, unsigned long, int);
200 pgd_t *pgdp;
201 pmd_t *pmdp;
202 pte_t *ptep;
203 int node, n, i, tsz;
205 node = prom_finddevice("/virtual-memory");
206 n = prom_getproplen(node, "translations");
207 if (n == 0 || n == -1) {
208 prom_printf("Couldn't get translation property\n");
209 prom_halt();
211 n += 5 * sizeof(struct linux_prom_translation);
212 for (tsz = 1; tsz < n; tsz <<= 1)
213 /* empty */;
214 trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, 0UL);
215 if (trans == NULL) {
216 prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
217 prom_halt();
219 memset(trans, 0, tsz);
221 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
222 prom_printf("Couldn't get translation property\n");
223 prom_halt();
225 n = n / sizeof(*trans);
227 for (i = 0; i < n; i++) {
228 unsigned long vaddr;
230 if (trans[i].virt >= 0xf0000000 && trans[i].virt < 0x100000000) {
231 for (vaddr = trans[i].virt;
232 vaddr < trans[i].virt + trans[i].size;
233 vaddr += PAGE_SIZE) {
234 pgdp = pgd_offset(&init_mm, vaddr);
235 if (pgd_none(*pgdp)) {
236 pmdp = __alloc_bootmem(PMD_TABLE_SIZE,
237 PMD_TABLE_SIZE,
238 0UL);
239 if (pmdp == NULL)
240 early_pgtable_allocfail("pmd");
241 memset(pmdp, 0, PMD_TABLE_SIZE);
242 pgd_set(pgdp, pmdp);
244 pmdp = pmd_offset(pgdp, vaddr);
245 if (pmd_none(*pmdp)) {
246 ptep = __alloc_bootmem(PTE_TABLE_SIZE,
247 PTE_TABLE_SIZE,
248 0UL);
249 if (ptep == NULL)
250 early_pgtable_allocfail("pte");
251 memset(ptep, 0, PTE_TABLE_SIZE);
252 pmd_set(pmdp, ptep);
254 ptep = pte_offset(pmdp, vaddr);
255 set_pte (ptep, __pte(trans[i].data | _PAGE_MODIFIED));
256 trans[i].data += PAGE_SIZE;
261 /* Now fixup OBP's idea about where we really are mapped. */
262 prom_printf("Remapping the kernel... ");
264 /* Spitfire Errata #32 workaround */
265 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
266 "flush %%g6"
267 : /* No outputs */
268 : "r" (0),
269 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
271 phys_page = spitfire_get_dtlb_data(63) & _PAGE_PADDR;
272 phys_page += ((unsigned long)&prom_boot_page -
273 (unsigned long)&empty_zero_page);
275 /* Lock this into i/d tlb entry 59 */
276 __asm__ __volatile__(
277 "stxa %%g0, [%2] %3\n\t"
278 "stxa %0, [%1] %4\n\t"
279 "membar #Sync\n\t"
280 "flush %%g6\n\t"
281 "stxa %%g0, [%2] %5\n\t"
282 "stxa %0, [%1] %6\n\t"
283 "membar #Sync\n\t"
284 "flush %%g6"
285 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
286 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
287 "r" (59 << 3), "r" (TLB_TAG_ACCESS),
288 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
289 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
290 : "memory");
292 tte_vaddr = (unsigned long) &empty_zero_page;
294 /* Spitfire Errata #32 workaround */
295 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
296 "flush %%g6"
297 : /* No outputs */
298 : "r" (0),
299 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
301 kern_locked_tte_data = tte_data = spitfire_get_dtlb_data(63);
303 remap_func = (void *) ((unsigned long) &prom_remap -
304 (unsigned long) &prom_boot_page);
307 /* Spitfire Errata #32 workaround */
308 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
309 "flush %%g6"
310 : /* No outputs */
311 : "r" (0),
312 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
314 remap_func(spitfire_get_dtlb_data(63) & _PAGE_PADDR,
315 (unsigned long) &empty_zero_page,
316 prom_get_mmu_ihandle());
318 /* Flush out that temporary mapping. */
319 spitfire_flush_dtlb_nucleus_page(0x0);
320 spitfire_flush_itlb_nucleus_page(0x0);
322 /* Now lock us back into the TLBs via OBP. */
323 prom_dtlb_load(63, tte_data, tte_vaddr);
324 prom_itlb_load(63, tte_data, tte_vaddr);
326 /* Re-read translations property. */
327 if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
328 prom_printf("Couldn't get translation property\n");
329 prom_halt();
331 n = n / sizeof(*trans);
333 for (i = 0; i < n; i++) {
334 unsigned long vaddr = trans[i].virt;
335 unsigned long size = trans[i].size;
337 if (vaddr < 0xf0000000UL) {
338 unsigned long avoid_start = (unsigned long) &empty_zero_page;
339 unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
341 if (vaddr < avoid_start) {
342 unsigned long top = vaddr + size;
344 if (top > avoid_start)
345 top = avoid_start;
346 prom_unmap(top - vaddr, vaddr);
348 if ((vaddr + size) > avoid_end) {
349 unsigned long bottom = vaddr;
351 if (bottom < avoid_end)
352 bottom = avoid_end;
353 prom_unmap((vaddr + size) - bottom, bottom);
358 prom_printf("done.\n");
360 register_prom_callbacks();
363 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
364 * upwards as reserved for use by the firmware (I wonder if this
365 * will be the same on Cheetah...). We use this virtual address
366 * range for the VPTE table mappings of the nucleus so we need
367 * to zap them when we enter the PROM. -DaveM
369 static void __flush_nucleus_vptes(void)
371 unsigned long prom_reserved_base = 0xfffffffc00000000UL;
372 int i;
374 /* Only DTLB must be checked for VPTE entries. */
375 for(i = 0; i < 63; i++) {
376 unsigned long tag;
378 /* Spitfire Errata #32 workaround */
379 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
380 "flush %%g6"
381 : /* No outputs */
382 : "r" (0),
383 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
385 tag = spitfire_get_dtlb_tag(i);
386 if(((tag & ~(PAGE_MASK)) == 0) &&
387 ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
388 __asm__ __volatile__("stxa %%g0, [%0] %1"
389 : /* no outputs */
390 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
391 membar("#Sync");
392 spitfire_put_dtlb_data(i, 0x0UL);
393 membar("#Sync");
398 static int prom_ditlb_set = 0;
399 struct prom_tlb_entry {
400 int tlb_ent;
401 unsigned long tlb_tag;
402 unsigned long tlb_data;
404 struct prom_tlb_entry prom_itlb[8], prom_dtlb[8];
406 void prom_world(int enter)
408 unsigned long pstate;
409 int i;
411 if (!enter)
412 set_fs(current->thread.current_ds);
414 if (!prom_ditlb_set)
415 return;
417 /* Make sure the following runs atomically. */
418 __asm__ __volatile__("flushw\n\t"
419 "rdpr %%pstate, %0\n\t"
420 "wrpr %0, %1, %%pstate"
421 : "=r" (pstate)
422 : "i" (PSTATE_IE));
424 if (enter) {
425 /* Kick out nucleus VPTEs. */
426 __flush_nucleus_vptes();
428 /* Install PROM world. */
429 for (i = 0; i < 8; i++) {
430 if (prom_dtlb[i].tlb_ent != -1) {
431 __asm__ __volatile__("stxa %0, [%1] %2"
432 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
433 "i" (ASI_DMMU));
434 membar("#Sync");
435 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
436 prom_dtlb[i].tlb_data);
437 membar("#Sync");
440 if (prom_itlb[i].tlb_ent != -1) {
441 __asm__ __volatile__("stxa %0, [%1] %2"
442 : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
443 "i" (ASI_IMMU));
444 membar("#Sync");
445 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
446 prom_itlb[i].tlb_data);
447 membar("#Sync");
450 } else {
451 for (i = 0; i < 8; i++) {
452 if (prom_dtlb[i].tlb_ent != -1) {
453 __asm__ __volatile__("stxa %%g0, [%0] %1"
454 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
455 membar("#Sync");
456 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
457 membar("#Sync");
459 if (prom_itlb[i].tlb_ent != -1) {
460 __asm__ __volatile__("stxa %%g0, [%0] %1"
461 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
462 membar("#Sync");
463 spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
464 membar("#Sync");
468 __asm__ __volatile__("wrpr %0, 0, %%pstate"
469 : : "r" (pstate));
472 void inherit_locked_prom_mappings(int save_p)
474 int i;
475 int dtlb_seen = 0;
476 int itlb_seen = 0;
478 /* Fucking losing PROM has more mappings in the TLB, but
479 * it (conveniently) fails to mention any of these in the
480 * translations property. The only ones that matter are
481 * the locked PROM tlb entries, so we impose the following
482 * irrecovable rule on the PROM, it is allowed 8 locked
483 * entries in the ITLB and 8 in the DTLB.
485 * Supposedly the upper 16GB of the address space is
486 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
487 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
488 * used between the client program and the firmware on sun5
489 * systems to coordinate mmu mappings is also COMPLETELY
490 * UNDOCUMENTED!!!!!! Thanks S(t)un!
492 if (save_p) {
493 for(i = 0; i < 8; i++) {
494 prom_dtlb[i].tlb_ent = -1;
495 prom_itlb[i].tlb_ent = -1;
498 for(i = 0; i < 63; i++) {
499 unsigned long data;
502 /* Spitfire Errata #32 workaround */
503 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
504 "flush %%g6"
505 : /* No outputs */
506 : "r" (0),
507 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
509 data = spitfire_get_dtlb_data(i);
510 if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
511 unsigned long tag;
513 /* Spitfire Errata #32 workaround */
514 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
515 "flush %%g6"
516 : /* No outputs */
517 : "r" (0),
518 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
520 tag = spitfire_get_dtlb_tag(i);
521 if(save_p) {
522 prom_dtlb[dtlb_seen].tlb_ent = i;
523 prom_dtlb[dtlb_seen].tlb_tag = tag;
524 prom_dtlb[dtlb_seen].tlb_data = data;
526 __asm__ __volatile__("stxa %%g0, [%0] %1"
527 : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
528 membar("#Sync");
529 spitfire_put_dtlb_data(i, 0x0UL);
530 membar("#Sync");
532 dtlb_seen++;
533 if(dtlb_seen > 7)
534 break;
537 for(i = 0; i < 63; i++) {
538 unsigned long data;
540 /* Spitfire Errata #32 workaround */
541 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
542 "flush %%g6"
543 : /* No outputs */
544 : "r" (0),
545 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
547 data = spitfire_get_itlb_data(i);
548 if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
549 unsigned long tag;
551 /* Spitfire Errata #32 workaround */
552 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
553 "flush %%g6"
554 : /* No outputs */
555 : "r" (0),
556 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
558 tag = spitfire_get_itlb_tag(i);
559 if(save_p) {
560 prom_itlb[itlb_seen].tlb_ent = i;
561 prom_itlb[itlb_seen].tlb_tag = tag;
562 prom_itlb[itlb_seen].tlb_data = data;
564 __asm__ __volatile__("stxa %%g0, [%0] %1"
565 : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
566 membar("#Sync");
567 spitfire_put_itlb_data(i, 0x0UL);
568 membar("#Sync");
570 itlb_seen++;
571 if(itlb_seen > 7)
572 break;
575 if (save_p)
576 prom_ditlb_set = 1;
579 /* Give PROM back his world, done during reboots... */
580 void prom_reload_locked(void)
582 int i;
584 for (i = 0; i < 8; i++) {
585 if (prom_dtlb[i].tlb_ent != -1) {
586 __asm__ __volatile__("stxa %0, [%1] %2"
587 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
588 "i" (ASI_DMMU));
589 membar("#Sync");
590 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
591 prom_dtlb[i].tlb_data);
592 membar("#Sync");
595 if (prom_itlb[i].tlb_ent != -1) {
596 __asm__ __volatile__("stxa %0, [%1] %2"
597 : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
598 "i" (ASI_IMMU));
599 membar("#Sync");
600 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
601 prom_itlb[i].tlb_data);
602 membar("#Sync");
607 void __flush_dcache_range(unsigned long start, unsigned long end)
609 unsigned long va;
610 int n = 0;
612 for (va = start; va < end; va += 32) {
613 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
614 if (++n >= 512)
615 break;
619 void __flush_cache_all(void)
621 unsigned long va;
623 flushw_all();
624 for(va = 0; va < (PAGE_SIZE << 1); va += 32)
625 spitfire_put_icache_tag(va, 0x0);
628 /* If not locked, zap it. */
629 void __flush_tlb_all(void)
631 unsigned long pstate;
632 int i;
634 __asm__ __volatile__("flushw\n\t"
635 "rdpr %%pstate, %0\n\t"
636 "wrpr %0, %1, %%pstate"
637 : "=r" (pstate)
638 : "i" (PSTATE_IE));
639 for(i = 0; i < 64; i++) {
640 /* Spitfire Errata #32 workaround */
641 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
642 "flush %%g6"
643 : /* No outputs */
644 : "r" (0),
645 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
647 if(!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
648 __asm__ __volatile__("stxa %%g0, [%0] %1"
649 : /* no outputs */
650 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
651 membar("#Sync");
652 spitfire_put_dtlb_data(i, 0x0UL);
653 membar("#Sync");
656 /* Spitfire Errata #32 workaround */
657 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
658 "flush %%g6"
659 : /* No outputs */
660 : "r" (0),
661 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
663 if(!(spitfire_get_itlb_data(i) & _PAGE_L)) {
664 __asm__ __volatile__("stxa %%g0, [%0] %1"
665 : /* no outputs */
666 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
667 membar("#Sync");
668 spitfire_put_itlb_data(i, 0x0UL);
669 membar("#Sync");
672 __asm__ __volatile__("wrpr %0, 0, %%pstate"
673 : : "r" (pstate));
676 /* Caller does TLB context flushing on local CPU if necessary.
678 * We must be careful about boundary cases so that we never
679 * let the user have CTX 0 (nucleus) or we ever use a CTX
680 * version of zero (and thus NO_CONTEXT would not be caught
681 * by version mis-match tests in mmu_context.h).
683 void get_new_mmu_context(struct mm_struct *mm)
685 unsigned long ctx, new_ctx;
687 spin_lock(&ctx_alloc_lock);
688 ctx = CTX_HWBITS(tlb_context_cache + 1);
689 if (ctx == 0)
690 ctx = 1;
691 if (CTX_VALID(mm->context)) {
692 unsigned long nr = CTX_HWBITS(mm->context);
693 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
695 new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);
696 if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {
697 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
698 if (new_ctx >= ctx) {
699 int i;
700 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
701 CTX_FIRST_VERSION;
702 if (new_ctx == 1)
703 new_ctx = CTX_FIRST_VERSION;
705 /* Don't call memset, for 16 entries that's just
706 * plain silly...
708 mmu_context_bmap[0] = 3;
709 mmu_context_bmap[1] = 0;
710 mmu_context_bmap[2] = 0;
711 mmu_context_bmap[3] = 0;
712 for(i = 4; i < CTX_BMAP_SLOTS; i += 4) {
713 mmu_context_bmap[i + 0] = 0;
714 mmu_context_bmap[i + 1] = 0;
715 mmu_context_bmap[i + 2] = 0;
716 mmu_context_bmap[i + 3] = 0;
718 goto out;
721 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
722 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
723 out:
724 tlb_context_cache = new_ctx;
725 spin_unlock(&ctx_alloc_lock);
727 mm->context = new_ctx;
730 #ifndef CONFIG_SMP
731 struct pgtable_cache_struct pgt_quicklists;
732 #endif
734 /* For PMDs we don't care about the color, writes are
735 * only done via Dcache which is write-thru, so non-Dcache
736 * reads will always see correct data.
738 pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
740 pmd_t *pmd;
742 pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
743 if(pmd) {
744 memset(pmd, 0, PAGE_SIZE);
745 pgd_set(pgd, pmd);
746 return pmd + offset;
748 return NULL;
751 /* OK, we have to color these pages because during DTLB
752 * protection faults we set the dirty bit via a non-Dcache
753 * enabled mapping in the VPTE area. The kernel can end
754 * up missing the dirty bit resulting in processes crashing
755 * _iff_ the VPTE mapping of the ptes have a virtual address
756 * bit 13 which is different from bit 13 of the physical address.
758 * The sequence is:
759 * 1) DTLB protection fault, write dirty bit into pte via VPTE
760 * mappings.
761 * 2) Swapper checks pte, does not see dirty bit, frees page.
762 * 3) Process faults back in the page, the old pre-dirtied copy
763 * is provided and here is the corruption.
765 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset, unsigned long color)
767 struct page *page = alloc_pages(GFP_KERNEL, 1);
769 if (page) {
770 unsigned long *to_free;
771 unsigned long paddr;
772 pte_t *pte;
774 set_page_count((page + 1), 1);
775 paddr = (unsigned long) page_address(page);
776 memset((char *)paddr, 0, (PAGE_SIZE << 1));
778 if (!color) {
779 pte = (pte_t *) paddr;
780 to_free = (unsigned long *) (paddr + PAGE_SIZE);
781 } else {
782 pte = (pte_t *) (paddr + PAGE_SIZE);
783 to_free = (unsigned long *) paddr;
786 /* Now free the other one up, adjust cache size. */
787 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
788 pte_quicklist[color ^ 0x1] = to_free;
789 pgtable_cache_size++;
791 pmd_set(pmd, pte);
792 return pte + offset;
794 return NULL;
797 void sparc_ultra_dump_itlb(void)
799 int slot;
801 printk ("Contents of itlb: ");
802 for (slot = 0; slot < 14; slot++) printk (" ");
803 printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
804 for (slot = 1; slot < 64; slot+=3) {
805 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
806 slot, spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
807 slot+1, spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
808 slot+2, spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
812 void sparc_ultra_dump_dtlb(void)
814 int slot;
816 printk ("Contents of dtlb: ");
817 for (slot = 0; slot < 14; slot++) printk (" ");
818 printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0),
819 spitfire_get_dtlb_data(0));
820 for (slot = 1; slot < 64; slot+=3) {
821 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
822 slot, spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
823 slot+1, spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
824 slot+2, spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
828 extern unsigned long cmdline_memory_size;
830 unsigned long __init bootmem_init(unsigned long *pages_avail)
832 unsigned long bootmap_size, start_pfn, end_pfn;
833 unsigned long end_of_phys_memory = 0UL;
834 unsigned long bootmap_pfn, bytes_avail, size;
835 int i;
837 bytes_avail = 0UL;
838 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
839 end_of_phys_memory = sp_banks[i].base_addr +
840 sp_banks[i].num_bytes;
841 bytes_avail += sp_banks[i].num_bytes;
842 if (cmdline_memory_size) {
843 if (bytes_avail > cmdline_memory_size) {
844 unsigned long slack = bytes_avail - cmdline_memory_size;
846 bytes_avail -= slack;
847 end_of_phys_memory -= slack;
849 sp_banks[i].num_bytes -= slack;
850 if (sp_banks[i].num_bytes == 0) {
851 sp_banks[i].base_addr = 0xdeadbeef;
852 } else {
853 sp_banks[i+1].num_bytes = 0;
854 sp_banks[i+1].base_addr = 0xdeadbeef;
856 break;
861 *pages_avail = bytes_avail >> PAGE_SHIFT;
863 /* Start with page aligned address of last symbol in kernel
864 * image. The kernel is hard mapped below PAGE_OFFSET in a
865 * 4MB locked TLB translation.
867 start_pfn = PAGE_ALIGN((unsigned long) &_end) -
868 ((unsigned long) &empty_zero_page);
870 /* Adjust up to the physical address where the kernel begins. */
871 start_pfn += phys_base;
873 /* Now shift down to get the real physical page frame number. */
874 start_pfn >>= PAGE_SHIFT;
876 bootmap_pfn = start_pfn;
878 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
880 #ifdef CONFIG_BLK_DEV_INITRD
881 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
882 if (sparc_ramdisk_image) {
883 if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
884 sparc_ramdisk_image -= KERNBASE;
885 initrd_start = sparc_ramdisk_image + phys_base;
886 initrd_end = initrd_start + sparc_ramdisk_size;
887 if (initrd_end > end_of_phys_memory) {
888 printk(KERN_CRIT "initrd extends beyond end of memory "
889 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
890 initrd_end, end_of_phys_memory);
891 initrd_start = 0;
893 if (initrd_start) {
894 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
895 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
896 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
899 #endif
900 /* Initialize the boot-time allocator. */
901 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, phys_base>>PAGE_SHIFT, end_pfn);
903 /* Now register the available physical memory with the
904 * allocator.
906 for (i = 0; sp_banks[i].num_bytes != 0; i++)
907 free_bootmem(sp_banks[i].base_addr,
908 sp_banks[i].num_bytes);
910 #ifdef CONFIG_BLK_DEV_INITRD
911 if (initrd_start) {
912 size = initrd_end - initrd_start;
914 /* Resert the initrd image area. */
915 reserve_bootmem(initrd_start, size);
916 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
918 initrd_start += PAGE_OFFSET;
919 initrd_end += PAGE_OFFSET;
921 #endif
922 /* Reserve the kernel text/data/bss. */
923 size = (start_pfn << PAGE_SHIFT) - phys_base;
924 reserve_bootmem(phys_base, size);
925 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
927 /* Reserve the bootmem map. We do not account for it
928 * in pages_avail because we will release that memory
929 * in free_all_bootmem.
931 size = bootmap_size;
932 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
933 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
935 return end_pfn;
938 /* paging_init() sets up the page tables */
940 extern void sun_serial_setup(void);
942 static unsigned long last_valid_pfn;
944 void __init paging_init(void)
946 extern pmd_t swapper_pmd_dir[1024];
947 extern unsigned int sparc64_vpte_patchme1[1];
948 extern unsigned int sparc64_vpte_patchme2[1];
949 unsigned long alias_base = phys_base + PAGE_OFFSET;
950 unsigned long second_alias_page = 0;
951 unsigned long pt, flags, end_pfn, pages_avail;
952 unsigned long shift = alias_base - ((unsigned long)&empty_zero_page);
954 set_bit(0, mmu_context_bmap);
955 /* We assume physical memory starts at some 4mb multiple,
956 * if this were not true we wouldn't boot up to this point
957 * anyways.
959 pt = phys_base | _PAGE_VALID | _PAGE_SZ4MB;
960 pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
961 __save_and_cli(flags);
962 __asm__ __volatile__("
963 stxa %1, [%0] %3
964 stxa %2, [%5] %4
965 membar #Sync
966 flush %%g6
969 nop"
970 : /* No outputs */
971 : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
972 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
973 : "memory");
974 if (((unsigned long)&_end) >= KERNBASE + 0x340000) {
975 second_alias_page = alias_base + 0x400000;
976 __asm__ __volatile__("
977 stxa %1, [%0] %3
978 stxa %2, [%5] %4
979 membar #Sync
980 flush %%g6
983 nop"
984 : /* No outputs */
985 : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
986 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
987 : "memory");
989 __restore_flags(flags);
991 /* Now set kernel pgd to upper alias so physical page computations
992 * work.
994 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
996 memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
998 /* Now can init the kernel/bad page tables. */
999 pgd_set(&swapper_pg_dir[0], swapper_pmd_dir + (shift / sizeof(pgd_t)));
1001 sparc64_vpte_patchme1[0] |= (pgd_val(init_mm.pgd[0]) >> 10);
1002 sparc64_vpte_patchme2[0] |= (pgd_val(init_mm.pgd[0]) & 0x3ff);
1003 flushi((long)&sparc64_vpte_patchme1[0]);
1005 /* Setup bootmem... */
1006 pages_avail = 0;
1007 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1009 #ifdef CONFIG_SUN_SERIAL
1010 /* This does not logically belong here, but we need to
1011 * call it at the moment we are able to use the bootmem
1012 * allocator.
1014 sun_serial_setup();
1015 #endif
1017 /* Inherit non-locked OBP mappings. */
1018 inherit_prom_mappings();
1020 /* Ok, we can use our TLB miss and window trap handlers safely.
1021 * We need to do a quick peek here to see if we are on StarFire
1022 * or not, so setup_tba can setup the IRQ globals correctly (it
1023 * needs to get the hard smp processor id correctly).
1026 extern void setup_tba(int);
1027 setup_tba(this_is_starfire);
1030 inherit_locked_prom_mappings(1);
1032 /* We only created DTLB mapping of this stuff. */
1033 spitfire_flush_dtlb_nucleus_page(alias_base);
1034 if (second_alias_page)
1035 spitfire_flush_dtlb_nucleus_page(second_alias_page);
1037 flush_tlb_all();
1040 unsigned long zones_size[MAX_NR_ZONES];
1041 unsigned long zholes_size[MAX_NR_ZONES];
1042 unsigned long npages;
1043 int znum;
1045 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1046 zones_size[znum] = zholes_size[znum] = 0;
1048 npages = end_pfn - (phys_base >> PAGE_SHIFT);
1049 zones_size[ZONE_DMA] = npages;
1050 zholes_size[ZONE_DMA] = npages - pages_avail;
1052 free_area_init_node(0, NULL, NULL, zones_size,
1053 phys_base, zholes_size);
1056 device_scan();
1059 /* Ok, it seems that the prom can allocate some more memory chunks
1060 * as a side effect of some prom calls we perform during the
1061 * boot sequence. My most likely theory is that it is from the
1062 * prom_set_traptable() call, and OBP is allocating a scratchpad
1063 * for saving client program register state etc.
1065 void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1067 int swapi = 0;
1068 int i, mitr;
1069 unsigned long tmpaddr, tmpsize;
1070 unsigned long lowest;
1072 for (i = 0; thislist[i].theres_more != 0; i++) {
1073 lowest = thislist[i].start_adr;
1074 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1075 if (thislist[mitr].start_adr < lowest) {
1076 lowest = thislist[mitr].start_adr;
1077 swapi = mitr;
1079 if (lowest == thislist[i].start_adr)
1080 continue;
1081 tmpaddr = thislist[swapi].start_adr;
1082 tmpsize = thislist[swapi].num_bytes;
1083 for (mitr = swapi; mitr > i; mitr--) {
1084 thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1085 thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1087 thislist[i].start_adr = tmpaddr;
1088 thislist[i].num_bytes = tmpsize;
1092 void __init rescan_sp_banks(void)
1094 struct linux_prom64_registers memlist[64];
1095 struct linux_mlist_p1275 avail[64], *mlist;
1096 unsigned long bytes, base_paddr;
1097 int num_regs, node = prom_finddevice("/memory");
1098 int i;
1100 num_regs = prom_getproperty(node, "available",
1101 (char *) memlist, sizeof(memlist));
1102 num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1103 for (i = 0; i < num_regs; i++) {
1104 avail[i].start_adr = memlist[i].phys_addr;
1105 avail[i].num_bytes = memlist[i].reg_size;
1106 avail[i].theres_more = &avail[i + 1];
1108 avail[i - 1].theres_more = NULL;
1109 sort_memlist(avail);
1111 mlist = &avail[0];
1112 i = 0;
1113 bytes = mlist->num_bytes;
1114 base_paddr = mlist->start_adr;
1116 sp_banks[0].base_addr = base_paddr;
1117 sp_banks[0].num_bytes = bytes;
1119 while (mlist->theres_more != NULL){
1120 i++;
1121 mlist = mlist->theres_more;
1122 bytes = mlist->num_bytes;
1123 if (i >= SPARC_PHYS_BANKS-1) {
1124 printk ("The machine has more banks than "
1125 "this kernel can support\n"
1126 "Increase the SPARC_PHYS_BANKS "
1127 "setting (currently %d)\n",
1128 SPARC_PHYS_BANKS);
1129 i = SPARC_PHYS_BANKS-1;
1130 break;
1133 sp_banks[i].base_addr = mlist->start_adr;
1134 sp_banks[i].num_bytes = mlist->num_bytes;
1137 i++;
1138 sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1139 sp_banks[i].num_bytes = 0;
1141 for (i = 0; sp_banks[i].num_bytes != 0; i++)
1142 sp_banks[i].num_bytes &= PAGE_MASK;
1145 static void __init taint_real_pages(void)
1147 struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1148 int i;
1150 for (i = 0; i < SPARC_PHYS_BANKS; i++) {
1151 saved_sp_banks[i].base_addr =
1152 sp_banks[i].base_addr;
1153 saved_sp_banks[i].num_bytes =
1154 sp_banks[i].num_bytes;
1157 rescan_sp_banks();
1159 /* Find changes discovered in the sp_bank rescan and
1160 * reserve the lost portions in the bootmem maps.
1162 for (i = 0; saved_sp_banks[i].num_bytes; i++) {
1163 unsigned long old_start, old_end;
1165 old_start = saved_sp_banks[i].base_addr;
1166 old_end = old_start +
1167 saved_sp_banks[i].num_bytes;
1168 while (old_start < old_end) {
1169 int n;
1171 for (n = 0; sp_banks[n].num_bytes; n++) {
1172 unsigned long new_start, new_end;
1174 new_start = sp_banks[n].base_addr;
1175 new_end = new_start + sp_banks[n].num_bytes;
1177 if (new_start <= old_start &&
1178 new_end >= (old_start + PAGE_SIZE)) {
1179 set_bit (old_start >> 22,
1180 sparc64_valid_addr_bitmap);
1181 goto do_next_page;
1184 reserve_bootmem(old_start, PAGE_SIZE);
1186 do_next_page:
1187 old_start += PAGE_SIZE;
1192 void __init mem_init(void)
1194 unsigned long codepages, datapages, initpages;
1195 unsigned long addr, last;
1196 int i;
1198 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1199 i += 1;
1200 sparc64_valid_addr_bitmap = (unsigned long *)
1201 __alloc_bootmem(i << 3, SMP_CACHE_BYTES, 0UL);
1202 if (sparc64_valid_addr_bitmap == NULL) {
1203 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1204 prom_halt();
1206 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1208 addr = PAGE_OFFSET + phys_base;
1209 last = PAGE_ALIGN((unsigned long)&_end) -
1210 ((unsigned long) &empty_zero_page);
1211 last += PAGE_OFFSET + phys_base;
1212 while (addr < last) {
1213 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1214 addr += PAGE_SIZE;
1217 taint_real_pages();
1219 max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
1220 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1222 num_physpages = free_all_bootmem();
1223 codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
1224 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1225 datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
1226 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1227 initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
1228 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1230 #ifndef CONFIG_SMP
1232 /* Put empty_pg_dir on pgd_quicklist */
1233 extern pgd_t empty_pg_dir[1024];
1234 unsigned long addr = (unsigned long)empty_pg_dir;
1235 unsigned long alias_base = phys_base + PAGE_OFFSET -
1236 (long)(&empty_zero_page);
1238 memset(empty_pg_dir, 0, sizeof(empty_pg_dir));
1239 addr += alias_base;
1240 free_pgd_fast((pgd_t *)addr);
1241 num_physpages++;
1243 #endif
1245 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1246 nr_free_pages() << (PAGE_SHIFT-10),
1247 codepages << (PAGE_SHIFT-10),
1248 datapages << (PAGE_SHIFT-10),
1249 initpages << (PAGE_SHIFT-10),
1250 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1253 void free_initmem (void)
1255 unsigned long addr;
1257 addr = (unsigned long)(&__init_begin);
1258 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
1259 unsigned long page;
1260 struct page *p;
1262 page = (addr +
1263 ((unsigned long) __va(phys_base)) -
1264 ((unsigned long) &empty_zero_page));
1265 p = virt_to_page(page);
1267 ClearPageReserved(p);
1268 set_page_count(p, 1);
1269 __free_page(p);
1270 num_physpages++;
1274 #ifdef CONFIG_BLK_DEV_INITRD
1275 void free_initrd_mem(unsigned long start, unsigned long end)
1277 if (start < end)
1278 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1279 for (; start < end; start += PAGE_SIZE) {
1280 struct page *p = virt_to_page(start);
1282 ClearPageReserved(p);
1283 set_page_count(p, 1);
1284 __free_page(p);
1285 num_physpages++;
1288 #endif
1290 void si_meminfo(struct sysinfo *val)
1292 val->totalram = num_physpages;
1293 val->sharedram = 0;
1294 val->freeram = nr_free_pages();
1295 val->bufferram = atomic_read(&buffermem_pages);
1297 /* These are always zero on Sparc64. */
1298 val->totalhigh = 0;
1299 val->freehigh = 0;
1301 val->mem_unit = PAGE_SIZE;