2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
11 #include <linux/kernel.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pagemap.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/kdebug.h>
21 #include <linux/log2.h>
22 #include <linux/gfp.h>
24 #include <asm/bitext.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
29 #include <asm/vaddrs.h>
30 #include <asm/traps.h>
33 #include <asm/cache.h>
34 #include <asm/oplib.h>
37 #include <asm/mmu_context.h>
38 #include <asm/io-unit.h>
39 #include <asm/cacheflush.h>
40 #include <asm/tlbflush.h>
42 /* Now the cpu specific definitions. */
43 #include <asm/viking.h>
46 #include <asm/tsunami.h>
47 #include <asm/swift.h>
48 #include <asm/turbosparc.h>
53 enum mbus_module srmmu_modtype
;
54 static unsigned int hwbug_bitmask
;
58 struct ctx_list
*ctx_list_pool
;
59 struct ctx_list ctx_free
;
60 struct ctx_list ctx_used
;
62 extern struct resource sparc_iomap
;
64 extern unsigned long last_valid_pfn
;
66 static pgd_t
*srmmu_swapper_pg_dir
;
68 const struct sparc32_cachetlb_ops
*sparc32_cachetlb_ops
;
71 const struct sparc32_cachetlb_ops
*local_ops
;
73 #define FLUSH_BEGIN(mm)
76 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
80 int flush_page_for_dma_global
= 1;
84 ctxd_t
*srmmu_ctx_table_phys
;
85 static ctxd_t
*srmmu_context_table
;
87 int viking_mxcc_present
;
88 static DEFINE_SPINLOCK(srmmu_context_spinlock
);
90 static int is_hypersparc
;
92 static int srmmu_cache_pagetables
;
94 /* these will be initialized in srmmu_nocache_calcsize() */
95 static unsigned long srmmu_nocache_size
;
96 static unsigned long srmmu_nocache_end
;
98 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
101 /* The context table is a nocache user with the biggest alignment needs. */
102 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
104 void *srmmu_nocache_pool
;
105 void *srmmu_nocache_bitmap
;
106 static struct bit_map srmmu_nocache_map
;
108 static inline int srmmu_pmd_none(pmd_t pmd
)
109 { return !(pmd_val(pmd
) & 0xFFFFFFF); }
111 /* XXX should we hyper_flush_whole_icache here - Anton */
112 static inline void srmmu_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
113 { set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (__nocache_pa((unsigned long) pgdp
) >> 4))); }
115 void pmd_set(pmd_t
*pmdp
, pte_t
*ptep
)
117 unsigned long ptp
; /* Physical address, shifted right by 4 */
120 ptp
= __nocache_pa((unsigned long) ptep
) >> 4;
121 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++) {
122 set_pte((pte_t
*)&pmdp
->pmdv
[i
], SRMMU_ET_PTD
| ptp
);
123 ptp
+= (SRMMU_REAL_PTRS_PER_PTE
*sizeof(pte_t
) >> 4);
127 void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmdp
, struct page
*ptep
)
129 unsigned long ptp
; /* Physical address, shifted right by 4 */
132 ptp
= page_to_pfn(ptep
) << (PAGE_SHIFT
-4); /* watch for overflow */
133 for (i
= 0; i
< PTRS_PER_PTE
/SRMMU_REAL_PTRS_PER_PTE
; i
++) {
134 set_pte((pte_t
*)&pmdp
->pmdv
[i
], SRMMU_ET_PTD
| ptp
);
135 ptp
+= (SRMMU_REAL_PTRS_PER_PTE
*sizeof(pte_t
) >> 4);
139 /* Find an entry in the third-level page table.. */
140 pte_t
*pte_offset_kernel(pmd_t
* dir
, unsigned long address
)
144 pte
= __nocache_va((dir
->pmdv
[0] & SRMMU_PTD_PMASK
) << 4);
145 return (pte_t
*) pte
+
146 ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
150 * size: bytes to allocate in the nocache area.
151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area.
154 static unsigned long __srmmu_get_nocache(int size
, int align
)
158 if (size
< SRMMU_NOCACHE_BITMAP_SHIFT
) {
159 printk("Size 0x%x too small for nocache request\n", size
);
160 size
= SRMMU_NOCACHE_BITMAP_SHIFT
;
162 if (size
& (SRMMU_NOCACHE_BITMAP_SHIFT
-1)) {
163 printk("Size 0x%x unaligned int nocache request\n", size
);
164 size
+= SRMMU_NOCACHE_BITMAP_SHIFT
-1;
166 BUG_ON(align
> SRMMU_NOCACHE_ALIGN_MAX
);
168 offset
= bit_map_string_get(&srmmu_nocache_map
,
169 size
>> SRMMU_NOCACHE_BITMAP_SHIFT
,
170 align
>> SRMMU_NOCACHE_BITMAP_SHIFT
);
172 printk("srmmu: out of nocache %d: %d/%d\n",
173 size
, (int) srmmu_nocache_size
,
174 srmmu_nocache_map
.used
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
178 return (SRMMU_NOCACHE_VADDR
+ (offset
<< SRMMU_NOCACHE_BITMAP_SHIFT
));
181 unsigned long srmmu_get_nocache(int size
, int align
)
185 tmp
= __srmmu_get_nocache(size
, align
);
188 memset((void *)tmp
, 0, size
);
193 void srmmu_free_nocache(unsigned long vaddr
, int size
)
197 if (vaddr
< SRMMU_NOCACHE_VADDR
) {
198 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
199 vaddr
, (unsigned long)SRMMU_NOCACHE_VADDR
);
202 if (vaddr
+size
> srmmu_nocache_end
) {
203 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
204 vaddr
, srmmu_nocache_end
);
207 if (!is_power_of_2(size
)) {
208 printk("Size 0x%x is not a power of 2\n", size
);
211 if (size
< SRMMU_NOCACHE_BITMAP_SHIFT
) {
212 printk("Size 0x%x is too small\n", size
);
215 if (vaddr
& (size
-1)) {
216 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr
, size
);
220 offset
= (vaddr
- SRMMU_NOCACHE_VADDR
) >> SRMMU_NOCACHE_BITMAP_SHIFT
;
221 size
= size
>> SRMMU_NOCACHE_BITMAP_SHIFT
;
223 bit_map_clear(&srmmu_nocache_map
, offset
, size
);
226 static void srmmu_early_allocate_ptable_skeleton(unsigned long start
,
229 extern unsigned long probe_memory(void); /* in fault.c */
232 * Reserve nocache dynamically proportionally to the amount of
233 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
235 static void srmmu_nocache_calcsize(void)
237 unsigned long sysmemavail
= probe_memory() / 1024;
238 int srmmu_nocache_npages
;
240 srmmu_nocache_npages
=
241 sysmemavail
/ SRMMU_NOCACHE_ALCRATIO
/ 1024 * 256;
243 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
244 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
245 if (srmmu_nocache_npages
< SRMMU_MIN_NOCACHE_PAGES
)
246 srmmu_nocache_npages
= SRMMU_MIN_NOCACHE_PAGES
;
248 /* anything above 1280 blows up */
249 if (srmmu_nocache_npages
> SRMMU_MAX_NOCACHE_PAGES
)
250 srmmu_nocache_npages
= SRMMU_MAX_NOCACHE_PAGES
;
252 srmmu_nocache_size
= srmmu_nocache_npages
* PAGE_SIZE
;
253 srmmu_nocache_end
= SRMMU_NOCACHE_VADDR
+ srmmu_nocache_size
;
256 static void __init
srmmu_nocache_init(void)
258 unsigned int bitmap_bits
;
262 unsigned long paddr
, vaddr
;
263 unsigned long pteval
;
265 bitmap_bits
= srmmu_nocache_size
>> SRMMU_NOCACHE_BITMAP_SHIFT
;
267 srmmu_nocache_pool
= __alloc_bootmem(srmmu_nocache_size
,
268 SRMMU_NOCACHE_ALIGN_MAX
, 0UL);
269 memset(srmmu_nocache_pool
, 0, srmmu_nocache_size
);
271 srmmu_nocache_bitmap
= __alloc_bootmem(bitmap_bits
>> 3, SMP_CACHE_BYTES
, 0UL);
272 bit_map_init(&srmmu_nocache_map
, srmmu_nocache_bitmap
, bitmap_bits
);
274 srmmu_swapper_pg_dir
= (pgd_t
*)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE
, SRMMU_PGD_TABLE_SIZE
);
275 memset(__nocache_fix(srmmu_swapper_pg_dir
), 0, SRMMU_PGD_TABLE_SIZE
);
276 init_mm
.pgd
= srmmu_swapper_pg_dir
;
278 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR
, srmmu_nocache_end
);
280 paddr
= __pa((unsigned long)srmmu_nocache_pool
);
281 vaddr
= SRMMU_NOCACHE_VADDR
;
283 while (vaddr
< srmmu_nocache_end
) {
284 pgd
= pgd_offset_k(vaddr
);
285 pmd
= pmd_offset(__nocache_fix(pgd
), vaddr
);
286 pte
= pte_offset_kernel(__nocache_fix(pmd
), vaddr
);
288 pteval
= ((paddr
>> 4) | SRMMU_ET_PTE
| SRMMU_PRIV
);
290 if (srmmu_cache_pagetables
)
291 pteval
|= SRMMU_CACHE
;
293 set_pte(__nocache_fix(pte
), __pte(pteval
));
303 pgd_t
*get_pgd_fast(void)
307 pgd
= (pgd_t
*)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE
, SRMMU_PGD_TABLE_SIZE
);
309 pgd_t
*init
= pgd_offset_k(0);
310 memset(pgd
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
311 memcpy(pgd
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
312 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
319 * Hardware needs alignment to 256 only, but we align to whole page size
320 * to reduce fragmentation problems due to the buddy principle.
321 * XXX Provide actual fragmentation statistics in /proc.
323 * Alignments up to the page size are the same for physical and virtual
324 * addresses of the nocache area.
326 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
331 if ((pte
= (unsigned long)pte_alloc_one_kernel(mm
, address
)) == 0)
333 page
= pfn_to_page( __nocache_pa(pte
) >> PAGE_SHIFT
);
334 pgtable_page_ctor(page
);
338 void pte_free(struct mm_struct
*mm
, pgtable_t pte
)
342 pgtable_page_dtor(pte
);
343 p
= (unsigned long)page_address(pte
); /* Cached address (for test) */
346 p
= page_to_pfn(pte
) << PAGE_SHIFT
; /* Physical address */
347 p
= (unsigned long) __nocache_va(p
); /* Nocached virtual */
348 srmmu_free_nocache(p
, PTE_SIZE
);
353 static inline void alloc_context(struct mm_struct
*old_mm
, struct mm_struct
*mm
)
355 struct ctx_list
*ctxp
;
357 ctxp
= ctx_free
.next
;
358 if(ctxp
!= &ctx_free
) {
359 remove_from_ctx_list(ctxp
);
360 add_to_used_ctxlist(ctxp
);
361 mm
->context
= ctxp
->ctx_number
;
365 ctxp
= ctx_used
.next
;
366 if(ctxp
->ctx_mm
== old_mm
)
368 if(ctxp
== &ctx_used
)
369 panic("out of mmu contexts");
370 flush_cache_mm(ctxp
->ctx_mm
);
371 flush_tlb_mm(ctxp
->ctx_mm
);
372 remove_from_ctx_list(ctxp
);
373 add_to_used_ctxlist(ctxp
);
374 ctxp
->ctx_mm
->context
= NO_CONTEXT
;
376 mm
->context
= ctxp
->ctx_number
;
379 static inline void free_context(int context
)
381 struct ctx_list
*ctx_old
;
383 ctx_old
= ctx_list_pool
+ context
;
384 remove_from_ctx_list(ctx_old
);
385 add_to_free_ctxlist(ctx_old
);
389 void switch_mm(struct mm_struct
*old_mm
, struct mm_struct
*mm
,
390 struct task_struct
*tsk
)
392 if(mm
->context
== NO_CONTEXT
) {
393 spin_lock(&srmmu_context_spinlock
);
394 alloc_context(old_mm
, mm
);
395 spin_unlock(&srmmu_context_spinlock
);
396 srmmu_ctxd_set(&srmmu_context_table
[mm
->context
], mm
->pgd
);
399 if (sparc_cpu_model
== sparc_leon
)
403 hyper_flush_whole_icache();
405 srmmu_set_context(mm
->context
);
408 /* Low level IO area allocation on the SRMMU. */
409 static inline void srmmu_mapioaddr(unsigned long physaddr
,
410 unsigned long virt_addr
, int bus_type
)
417 physaddr
&= PAGE_MASK
;
418 pgdp
= pgd_offset_k(virt_addr
);
419 pmdp
= pmd_offset(pgdp
, virt_addr
);
420 ptep
= pte_offset_kernel(pmdp
, virt_addr
);
421 tmp
= (physaddr
>> 4) | SRMMU_ET_PTE
;
424 * I need to test whether this is consistent over all
425 * sun4m's. The bus_type represents the upper 4 bits of
426 * 36-bit physical address on the I/O space lines...
428 tmp
|= (bus_type
<< 28);
430 __flush_page_to_ram(virt_addr
);
431 set_pte(ptep
, __pte(tmp
));
434 void srmmu_mapiorange(unsigned int bus
, unsigned long xpa
,
435 unsigned long xva
, unsigned int len
)
439 srmmu_mapioaddr(xpa
, xva
, bus
);
446 static inline void srmmu_unmapioaddr(unsigned long virt_addr
)
452 pgdp
= pgd_offset_k(virt_addr
);
453 pmdp
= pmd_offset(pgdp
, virt_addr
);
454 ptep
= pte_offset_kernel(pmdp
, virt_addr
);
456 /* No need to flush uncacheable page. */
460 void srmmu_unmapiorange(unsigned long virt_addr
, unsigned int len
)
464 srmmu_unmapioaddr(virt_addr
);
465 virt_addr
+= PAGE_SIZE
;
471 extern void tsunami_flush_cache_all(void);
472 extern void tsunami_flush_cache_mm(struct mm_struct
*mm
);
473 extern void tsunami_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
474 extern void tsunami_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
475 extern void tsunami_flush_page_to_ram(unsigned long page
);
476 extern void tsunami_flush_page_for_dma(unsigned long page
);
477 extern void tsunami_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
478 extern void tsunami_flush_tlb_all(void);
479 extern void tsunami_flush_tlb_mm(struct mm_struct
*mm
);
480 extern void tsunami_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
481 extern void tsunami_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
482 extern void tsunami_setup_blockops(void);
485 extern void swift_flush_cache_all(void);
486 extern void swift_flush_cache_mm(struct mm_struct
*mm
);
487 extern void swift_flush_cache_range(struct vm_area_struct
*vma
,
488 unsigned long start
, unsigned long end
);
489 extern void swift_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
490 extern void swift_flush_page_to_ram(unsigned long page
);
491 extern void swift_flush_page_for_dma(unsigned long page
);
492 extern void swift_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
493 extern void swift_flush_tlb_all(void);
494 extern void swift_flush_tlb_mm(struct mm_struct
*mm
);
495 extern void swift_flush_tlb_range(struct vm_area_struct
*vma
,
496 unsigned long start
, unsigned long end
);
497 extern void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
499 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
500 void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
505 if ((ctx1
= vma
->vm_mm
->context
) != -1) {
506 cctx
= srmmu_get_context();
507 /* Is context # ever different from current context? P3 */
509 printk("flush ctx %02x curr %02x\n", ctx1
, cctx
);
510 srmmu_set_context(ctx1
);
511 swift_flush_page(page
);
512 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : :
513 "r" (page
), "i" (ASI_M_FLUSH_PROBE
));
514 srmmu_set_context(cctx
);
516 /* Rm. prot. bits from virt. c. */
517 /* swift_flush_cache_all(); */
518 /* swift_flush_cache_page(vma, page); */
519 swift_flush_page(page
);
521 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : :
522 "r" (page
), "i" (ASI_M_FLUSH_PROBE
));
523 /* same as above: srmmu_flush_tlb_page() */
530 * The following are all MBUS based SRMMU modules, and therefore could
531 * be found in a multiprocessor configuration. On the whole, these
532 * chips seems to be much more touchy about DVMA and page tables
533 * with respect to cache coherency.
537 extern void viking_flush_cache_all(void);
538 extern void viking_flush_cache_mm(struct mm_struct
*mm
);
539 extern void viking_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
541 extern void viking_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
542 extern void viking_flush_page_to_ram(unsigned long page
);
543 extern void viking_flush_page_for_dma(unsigned long page
);
544 extern void viking_flush_sig_insns(struct mm_struct
*mm
, unsigned long addr
);
545 extern void viking_flush_page(unsigned long page
);
546 extern void viking_mxcc_flush_page(unsigned long page
);
547 extern void viking_flush_tlb_all(void);
548 extern void viking_flush_tlb_mm(struct mm_struct
*mm
);
549 extern void viking_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
551 extern void viking_flush_tlb_page(struct vm_area_struct
*vma
,
553 extern void sun4dsmp_flush_tlb_all(void);
554 extern void sun4dsmp_flush_tlb_mm(struct mm_struct
*mm
);
555 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
557 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct
*vma
,
561 extern void hypersparc_flush_cache_all(void);
562 extern void hypersparc_flush_cache_mm(struct mm_struct
*mm
);
563 extern void hypersparc_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
564 extern void hypersparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
565 extern void hypersparc_flush_page_to_ram(unsigned long page
);
566 extern void hypersparc_flush_page_for_dma(unsigned long page
);
567 extern void hypersparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
568 extern void hypersparc_flush_tlb_all(void);
569 extern void hypersparc_flush_tlb_mm(struct mm_struct
*mm
);
570 extern void hypersparc_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
571 extern void hypersparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
572 extern void hypersparc_setup_blockops(void);
575 * NOTE: All of this startup code assumes the low 16mb (approx.) of
576 * kernel mappings are done with one single contiguous chunk of
577 * ram. On small ram machines (classics mainly) we only get
578 * around 8mb mapped for us.
581 static void __init
early_pgtable_allocfail(char *type
)
583 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type
);
587 static void __init
srmmu_early_allocate_ptable_skeleton(unsigned long start
,
595 pgdp
= pgd_offset_k(start
);
596 if (pgd_none(*(pgd_t
*)__nocache_fix(pgdp
))) {
597 pmdp
= (pmd_t
*) __srmmu_get_nocache(
598 SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
600 early_pgtable_allocfail("pmd");
601 memset(__nocache_fix(pmdp
), 0, SRMMU_PMD_TABLE_SIZE
);
602 pgd_set(__nocache_fix(pgdp
), pmdp
);
604 pmdp
= pmd_offset(__nocache_fix(pgdp
), start
);
605 if(srmmu_pmd_none(*(pmd_t
*)__nocache_fix(pmdp
))) {
606 ptep
= (pte_t
*)__srmmu_get_nocache(PTE_SIZE
, PTE_SIZE
);
608 early_pgtable_allocfail("pte");
609 memset(__nocache_fix(ptep
), 0, PTE_SIZE
);
610 pmd_set(__nocache_fix(pmdp
), ptep
);
612 if (start
> (0xffffffffUL
- PMD_SIZE
))
614 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
618 static void __init
srmmu_allocate_ptable_skeleton(unsigned long start
,
626 pgdp
= pgd_offset_k(start
);
627 if (pgd_none(*pgdp
)) {
628 pmdp
= (pmd_t
*)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
630 early_pgtable_allocfail("pmd");
631 memset(pmdp
, 0, SRMMU_PMD_TABLE_SIZE
);
634 pmdp
= pmd_offset(pgdp
, start
);
635 if(srmmu_pmd_none(*pmdp
)) {
636 ptep
= (pte_t
*) __srmmu_get_nocache(PTE_SIZE
,
639 early_pgtable_allocfail("pte");
640 memset(ptep
, 0, PTE_SIZE
);
643 if (start
> (0xffffffffUL
- PMD_SIZE
))
645 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
649 /* These flush types are not available on all chips... */
650 static inline unsigned long srmmu_probe(unsigned long vaddr
)
652 unsigned long retval
;
654 if (sparc_cpu_model
!= sparc_leon
) {
657 __asm__
__volatile__("lda [%1] %2, %0\n\t" :
659 "r" (vaddr
| 0x400), "i" (ASI_M_FLUSH_PROBE
));
661 retval
= leon_swprobe(vaddr
, 0);
667 * This is much cleaner than poking around physical address space
668 * looking at the prom's page table directly which is what most
669 * other OS's do. Yuck... this is much better.
671 static void __init
srmmu_inherit_prom_mappings(unsigned long start
,
677 int what
= 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
678 unsigned long prompte
;
680 while(start
<= end
) {
682 break; /* probably wrap around */
683 if(start
== 0xfef00000)
684 start
= KADB_DEBUGGER_BEGVM
;
685 if(!(prompte
= srmmu_probe(start
))) {
690 /* A red snapper, see what it really is. */
693 if(!(start
& ~(SRMMU_REAL_PMD_MASK
))) {
694 if(srmmu_probe((start
-PAGE_SIZE
) + SRMMU_REAL_PMD_SIZE
) == prompte
)
698 if(!(start
& ~(SRMMU_PGDIR_MASK
))) {
699 if(srmmu_probe((start
-PAGE_SIZE
) + SRMMU_PGDIR_SIZE
) ==
704 pgdp
= pgd_offset_k(start
);
706 *(pgd_t
*)__nocache_fix(pgdp
) = __pgd(prompte
);
707 start
+= SRMMU_PGDIR_SIZE
;
710 if (pgd_none(*(pgd_t
*)__nocache_fix(pgdp
))) {
711 pmdp
= (pmd_t
*)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE
, SRMMU_PMD_TABLE_SIZE
);
713 early_pgtable_allocfail("pmd");
714 memset(__nocache_fix(pmdp
), 0, SRMMU_PMD_TABLE_SIZE
);
715 pgd_set(__nocache_fix(pgdp
), pmdp
);
717 pmdp
= pmd_offset(__nocache_fix(pgdp
), start
);
718 if(srmmu_pmd_none(*(pmd_t
*)__nocache_fix(pmdp
))) {
719 ptep
= (pte_t
*) __srmmu_get_nocache(PTE_SIZE
,
722 early_pgtable_allocfail("pte");
723 memset(__nocache_fix(ptep
), 0, PTE_SIZE
);
724 pmd_set(__nocache_fix(pmdp
), ptep
);
728 * We bend the rule where all 16 PTPs in a pmd_t point
729 * inside the same PTE page, and we leak a perfectly
730 * good hardware PTE piece. Alternatives seem worse.
732 unsigned int x
; /* Index of HW PMD in soft cluster */
733 x
= (start
>> PMD_SHIFT
) & 15;
734 *(unsigned long *)__nocache_fix(&pmdp
->pmdv
[x
]) = prompte
;
735 start
+= SRMMU_REAL_PMD_SIZE
;
738 ptep
= pte_offset_kernel(__nocache_fix(pmdp
), start
);
739 *(pte_t
*)__nocache_fix(ptep
) = __pte(prompte
);
744 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
746 /* Create a third-level SRMMU 16MB page mapping. */
747 static void __init
do_large_mapping(unsigned long vaddr
, unsigned long phys_base
)
749 pgd_t
*pgdp
= pgd_offset_k(vaddr
);
750 unsigned long big_pte
;
752 big_pte
= KERNEL_PTE(phys_base
>> 4);
753 *(pgd_t
*)__nocache_fix(pgdp
) = __pgd(big_pte
);
756 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
757 static unsigned long __init
map_spbank(unsigned long vbase
, int sp_entry
)
759 unsigned long pstart
= (sp_banks
[sp_entry
].base_addr
& SRMMU_PGDIR_MASK
);
760 unsigned long vstart
= (vbase
& SRMMU_PGDIR_MASK
);
761 unsigned long vend
= SRMMU_PGDIR_ALIGN(vbase
+ sp_banks
[sp_entry
].num_bytes
);
762 /* Map "low" memory only */
763 const unsigned long min_vaddr
= PAGE_OFFSET
;
764 const unsigned long max_vaddr
= PAGE_OFFSET
+ SRMMU_MAXMEM
;
766 if (vstart
< min_vaddr
|| vstart
>= max_vaddr
)
769 if (vend
> max_vaddr
|| vend
< min_vaddr
)
772 while(vstart
< vend
) {
773 do_large_mapping(vstart
, pstart
);
774 vstart
+= SRMMU_PGDIR_SIZE
; pstart
+= SRMMU_PGDIR_SIZE
;
779 static inline void map_kernel(void)
784 do_large_mapping(PAGE_OFFSET
, phys_base
);
787 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
788 map_spbank((unsigned long)__va(sp_banks
[i
].base_addr
), i
);
792 /* Paging initialization on the Sparc Reference MMU. */
793 extern void sparc_context_init(int);
795 void (*poke_srmmu
)(void) __cpuinitdata
= NULL
;
797 extern unsigned long bootmem_init(unsigned long *pages_avail
);
799 void __init
srmmu_paging_init(void)
807 unsigned long pages_avail
;
809 sparc_iomap
.start
= SUN4M_IOBASE_VADDR
; /* 16MB of IOSPACE on all sun4m's. */
811 if (sparc_cpu_model
== sun4d
)
812 num_contexts
= 65536; /* We know it is Viking */
814 /* Find the number of contexts on the srmmu. */
815 cpunode
= prom_getchild(prom_root_node
);
817 while(cpunode
!= 0) {
818 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
819 if(!strcmp(node_str
, "cpu")) {
820 num_contexts
= prom_getintdefault(cpunode
, "mmu-nctx", 0x8);
823 cpunode
= prom_getsibling(cpunode
);
828 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
833 last_valid_pfn
= bootmem_init(&pages_avail
);
835 srmmu_nocache_calcsize();
836 srmmu_nocache_init();
837 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM
-PAGE_SIZE
));
840 /* ctx table has to be physically aligned to its size */
841 srmmu_context_table
= (ctxd_t
*)__srmmu_get_nocache(num_contexts
*sizeof(ctxd_t
), num_contexts
*sizeof(ctxd_t
));
842 srmmu_ctx_table_phys
= (ctxd_t
*)__nocache_pa((unsigned long)srmmu_context_table
);
844 for(i
= 0; i
< num_contexts
; i
++)
845 srmmu_ctxd_set((ctxd_t
*)__nocache_fix(&srmmu_context_table
[i
]), srmmu_swapper_pg_dir
);
848 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys
);
850 /* Stop from hanging here... */
851 local_ops
->tlb_all();
857 srmmu_allocate_ptable_skeleton(sparc_iomap
.start
, IOBASE_END
);
858 srmmu_allocate_ptable_skeleton(DVMA_VADDR
, DVMA_END
);
860 srmmu_allocate_ptable_skeleton(
861 __fix_to_virt(__end_of_fixed_addresses
- 1), FIXADDR_TOP
);
862 srmmu_allocate_ptable_skeleton(PKMAP_BASE
, PKMAP_END
);
864 pgd
= pgd_offset_k(PKMAP_BASE
);
865 pmd
= pmd_offset(pgd
, PKMAP_BASE
);
866 pte
= pte_offset_kernel(pmd
, PKMAP_BASE
);
867 pkmap_page_table
= pte
;
872 sparc_context_init(num_contexts
);
877 unsigned long zones_size
[MAX_NR_ZONES
];
878 unsigned long zholes_size
[MAX_NR_ZONES
];
879 unsigned long npages
;
882 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
883 zones_size
[znum
] = zholes_size
[znum
] = 0;
885 npages
= max_low_pfn
- pfn_base
;
887 zones_size
[ZONE_DMA
] = npages
;
888 zholes_size
[ZONE_DMA
] = npages
- pages_avail
;
890 npages
= highend_pfn
- max_low_pfn
;
891 zones_size
[ZONE_HIGHMEM
] = npages
;
892 zholes_size
[ZONE_HIGHMEM
] = npages
- calc_highpages();
894 free_area_init_node(0, zones_size
, pfn_base
, zholes_size
);
898 void mmu_info(struct seq_file
*m
)
903 "nocache total\t: %ld\n"
904 "nocache used\t: %d\n",
908 srmmu_nocache_map
.used
<< SRMMU_NOCACHE_BITMAP_SHIFT
);
911 void destroy_context(struct mm_struct
*mm
)
914 if(mm
->context
!= NO_CONTEXT
) {
916 srmmu_ctxd_set(&srmmu_context_table
[mm
->context
], srmmu_swapper_pg_dir
);
918 spin_lock(&srmmu_context_spinlock
);
919 free_context(mm
->context
);
920 spin_unlock(&srmmu_context_spinlock
);
921 mm
->context
= NO_CONTEXT
;
925 /* Init various srmmu chip types. */
926 static void __init
srmmu_is_bad(void)
928 prom_printf("Could not determine SRMMU chip type.\n");
932 static void __init
init_vac_layout(void)
939 unsigned long max_size
= 0;
940 unsigned long min_line_size
= 0x10000000;
943 nd
= prom_getchild(prom_root_node
);
944 while((nd
= prom_getsibling(nd
)) != 0) {
945 prom_getstring(nd
, "device_type", node_str
, sizeof(node_str
));
946 if(!strcmp(node_str
, "cpu")) {
947 vac_line_size
= prom_getint(nd
, "cache-line-size");
948 if (vac_line_size
== -1) {
949 prom_printf("can't determine cache-line-size, "
953 cache_lines
= prom_getint(nd
, "cache-nlines");
954 if (cache_lines
== -1) {
955 prom_printf("can't determine cache-nlines, halting.\n");
959 vac_cache_size
= cache_lines
* vac_line_size
;
961 if(vac_cache_size
> max_size
)
962 max_size
= vac_cache_size
;
963 if(vac_line_size
< min_line_size
)
964 min_line_size
= vac_line_size
;
965 //FIXME: cpus not contiguous!!
967 if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
))
975 prom_printf("No CPU nodes found, halting.\n");
979 vac_cache_size
= max_size
;
980 vac_line_size
= min_line_size
;
982 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
983 (int)vac_cache_size
, (int)vac_line_size
);
986 static void __cpuinit
poke_hypersparc(void)
988 volatile unsigned long clear
;
989 unsigned long mreg
= srmmu_get_mmureg();
991 hyper_flush_unconditional_combined();
993 mreg
&= ~(HYPERSPARC_CWENABLE
);
994 mreg
|= (HYPERSPARC_CENABLE
| HYPERSPARC_WBENABLE
);
995 mreg
|= (HYPERSPARC_CMODE
);
997 srmmu_set_mmureg(mreg
);
999 #if 0 /* XXX I think this is bad news... -DaveM */
1000 hyper_clear_all_tags();
1003 put_ross_icr(HYPERSPARC_ICCR_FTD
| HYPERSPARC_ICCR_ICE
);
1004 hyper_flush_whole_icache();
1005 clear
= srmmu_get_faddr();
1006 clear
= srmmu_get_fstatus();
1009 static const struct sparc32_cachetlb_ops hypersparc_ops
= {
1010 .cache_all
= hypersparc_flush_cache_all
,
1011 .cache_mm
= hypersparc_flush_cache_mm
,
1012 .cache_page
= hypersparc_flush_cache_page
,
1013 .cache_range
= hypersparc_flush_cache_range
,
1014 .tlb_all
= hypersparc_flush_tlb_all
,
1015 .tlb_mm
= hypersparc_flush_tlb_mm
,
1016 .tlb_page
= hypersparc_flush_tlb_page
,
1017 .tlb_range
= hypersparc_flush_tlb_range
,
1018 .page_to_ram
= hypersparc_flush_page_to_ram
,
1019 .sig_insns
= hypersparc_flush_sig_insns
,
1020 .page_for_dma
= hypersparc_flush_page_for_dma
,
1023 static void __init
init_hypersparc(void)
1025 srmmu_name
= "ROSS HyperSparc";
1026 srmmu_modtype
= HyperSparc
;
1031 sparc32_cachetlb_ops
= &hypersparc_ops
;
1033 poke_srmmu
= poke_hypersparc
;
1035 hypersparc_setup_blockops();
1038 static void __cpuinit
poke_swift(void)
1042 /* Clear any crap from the cache or else... */
1043 swift_flush_cache_all();
1045 /* Enable I & D caches */
1046 mreg
= srmmu_get_mmureg();
1047 mreg
|= (SWIFT_IE
| SWIFT_DE
);
1049 * The Swift branch folding logic is completely broken. At
1050 * trap time, if things are just right, if can mistakenly
1051 * think that a trap is coming from kernel mode when in fact
1052 * it is coming from user mode (it mis-executes the branch in
1053 * the trap code). So you see things like crashme completely
1054 * hosing your machine which is completely unacceptable. Turn
1055 * this shit off... nice job Fujitsu.
1057 mreg
&= ~(SWIFT_BF
);
1058 srmmu_set_mmureg(mreg
);
1061 static const struct sparc32_cachetlb_ops swift_ops
= {
1062 .cache_all
= swift_flush_cache_all
,
1063 .cache_mm
= swift_flush_cache_mm
,
1064 .cache_page
= swift_flush_cache_page
,
1065 .cache_range
= swift_flush_cache_range
,
1066 .tlb_all
= swift_flush_tlb_all
,
1067 .tlb_mm
= swift_flush_tlb_mm
,
1068 .tlb_page
= swift_flush_tlb_page
,
1069 .tlb_range
= swift_flush_tlb_range
,
1070 .page_to_ram
= swift_flush_page_to_ram
,
1071 .sig_insns
= swift_flush_sig_insns
,
1072 .page_for_dma
= swift_flush_page_for_dma
,
1075 #define SWIFT_MASKID_ADDR 0x10003018
1076 static void __init
init_swift(void)
1078 unsigned long swift_rev
;
1080 __asm__
__volatile__("lda [%1] %2, %0\n\t"
1081 "srl %0, 0x18, %0\n\t" :
1083 "r" (SWIFT_MASKID_ADDR
), "i" (ASI_M_BYPASS
));
1084 srmmu_name
= "Fujitsu Swift";
1090 srmmu_modtype
= Swift_lots_o_bugs
;
1091 hwbug_bitmask
|= (HWBUG_KERN_ACCBROKEN
| HWBUG_KERN_CBITBROKEN
);
1093 * Gee george, I wonder why Sun is so hush hush about
1094 * this hardware bug... really braindamage stuff going
1095 * on here. However I think we can find a way to avoid
1096 * all of the workaround overhead under Linux. Basically,
1097 * any page fault can cause kernel pages to become user
1098 * accessible (the mmu gets confused and clears some of
1099 * the ACC bits in kernel ptes). Aha, sounds pretty
1100 * horrible eh? But wait, after extensive testing it appears
1101 * that if you use pgd_t level large kernel pte's (like the
1102 * 4MB pages on the Pentium) the bug does not get tripped
1103 * at all. This avoids almost all of the major overhead.
1104 * Welcome to a world where your vendor tells you to,
1105 * "apply this kernel patch" instead of "sorry for the
1106 * broken hardware, send it back and we'll give you
1107 * properly functioning parts"
1112 srmmu_modtype
= Swift_bad_c
;
1113 hwbug_bitmask
|= HWBUG_KERN_CBITBROKEN
;
1115 * You see Sun allude to this hardware bug but never
1116 * admit things directly, they'll say things like,
1117 * "the Swift chip cache problems" or similar.
1121 srmmu_modtype
= Swift_ok
;
1125 sparc32_cachetlb_ops
= &swift_ops
;
1126 flush_page_for_dma_global
= 0;
1129 * Are you now convinced that the Swift is one of the
1130 * biggest VLSI abortions of all time? Bravo Fujitsu!
1131 * Fujitsu, the !#?!%$'d up processor people. I bet if
1132 * you examined the microcode of the Swift you'd find
1133 * XXX's all over the place.
1135 poke_srmmu
= poke_swift
;
1138 static void turbosparc_flush_cache_all(void)
1140 flush_user_windows();
1141 turbosparc_idflash_clear();
1144 static void turbosparc_flush_cache_mm(struct mm_struct
*mm
)
1147 flush_user_windows();
1148 turbosparc_idflash_clear();
1152 static void turbosparc_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1154 FLUSH_BEGIN(vma
->vm_mm
)
1155 flush_user_windows();
1156 turbosparc_idflash_clear();
1160 static void turbosparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1162 FLUSH_BEGIN(vma
->vm_mm
)
1163 flush_user_windows();
1164 if (vma
->vm_flags
& VM_EXEC
)
1165 turbosparc_flush_icache();
1166 turbosparc_flush_dcache();
1170 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1171 static void turbosparc_flush_page_to_ram(unsigned long page
)
1173 #ifdef TURBOSPARC_WRITEBACK
1174 volatile unsigned long clear
;
1176 if (srmmu_probe(page
))
1177 turbosparc_flush_page_cache(page
);
1178 clear
= srmmu_get_fstatus();
1182 static void turbosparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1186 static void turbosparc_flush_page_for_dma(unsigned long page
)
1188 turbosparc_flush_dcache();
1191 static void turbosparc_flush_tlb_all(void)
1193 srmmu_flush_whole_tlb();
1196 static void turbosparc_flush_tlb_mm(struct mm_struct
*mm
)
1199 srmmu_flush_whole_tlb();
1203 static void turbosparc_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1205 FLUSH_BEGIN(vma
->vm_mm
)
1206 srmmu_flush_whole_tlb();
1210 static void turbosparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1212 FLUSH_BEGIN(vma
->vm_mm
)
1213 srmmu_flush_whole_tlb();
1218 static void __cpuinit
poke_turbosparc(void)
1220 unsigned long mreg
= srmmu_get_mmureg();
1221 unsigned long ccreg
;
1223 /* Clear any crap from the cache or else... */
1224 turbosparc_flush_cache_all();
1225 mreg
&= ~(TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* Temporarily disable I & D caches */
1226 mreg
&= ~(TURBOSPARC_PCENABLE
); /* Don't check parity */
1227 srmmu_set_mmureg(mreg
);
1229 ccreg
= turbosparc_get_ccreg();
1231 #ifdef TURBOSPARC_WRITEBACK
1232 ccreg
|= (TURBOSPARC_SNENABLE
); /* Do DVMA snooping in Dcache */
1233 ccreg
&= ~(TURBOSPARC_uS2
| TURBOSPARC_WTENABLE
);
1234 /* Write-back D-cache, emulate VLSI
1235 * abortion number three, not number one */
1237 /* For now let's play safe, optimize later */
1238 ccreg
|= (TURBOSPARC_SNENABLE
| TURBOSPARC_WTENABLE
);
1239 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1240 ccreg
&= ~(TURBOSPARC_uS2
);
1241 /* Emulate VLSI abortion number three, not number one */
1244 switch (ccreg
& 7) {
1245 case 0: /* No SE cache */
1246 case 7: /* Test mode */
1249 ccreg
|= (TURBOSPARC_SCENABLE
);
1251 turbosparc_set_ccreg (ccreg
);
1253 mreg
|= (TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* I & D caches on */
1254 mreg
|= (TURBOSPARC_ICSNOOP
); /* Icache snooping on */
1255 srmmu_set_mmureg(mreg
);
1258 static const struct sparc32_cachetlb_ops turbosparc_ops
= {
1259 .cache_all
= turbosparc_flush_cache_all
,
1260 .cache_mm
= turbosparc_flush_cache_mm
,
1261 .cache_page
= turbosparc_flush_cache_page
,
1262 .cache_range
= turbosparc_flush_cache_range
,
1263 .tlb_all
= turbosparc_flush_tlb_all
,
1264 .tlb_mm
= turbosparc_flush_tlb_mm
,
1265 .tlb_page
= turbosparc_flush_tlb_page
,
1266 .tlb_range
= turbosparc_flush_tlb_range
,
1267 .page_to_ram
= turbosparc_flush_page_to_ram
,
1268 .sig_insns
= turbosparc_flush_sig_insns
,
1269 .page_for_dma
= turbosparc_flush_page_for_dma
,
1272 static void __init
init_turbosparc(void)
1274 srmmu_name
= "Fujitsu TurboSparc";
1275 srmmu_modtype
= TurboSparc
;
1276 sparc32_cachetlb_ops
= &turbosparc_ops
;
1277 poke_srmmu
= poke_turbosparc
;
1280 static void __cpuinit
poke_tsunami(void)
1282 unsigned long mreg
= srmmu_get_mmureg();
1284 tsunami_flush_icache();
1285 tsunami_flush_dcache();
1286 mreg
&= ~TSUNAMI_ITD
;
1287 mreg
|= (TSUNAMI_IENAB
| TSUNAMI_DENAB
);
1288 srmmu_set_mmureg(mreg
);
1291 static const struct sparc32_cachetlb_ops tsunami_ops
= {
1292 .cache_all
= tsunami_flush_cache_all
,
1293 .cache_mm
= tsunami_flush_cache_mm
,
1294 .cache_page
= tsunami_flush_cache_page
,
1295 .cache_range
= tsunami_flush_cache_range
,
1296 .tlb_all
= tsunami_flush_tlb_all
,
1297 .tlb_mm
= tsunami_flush_tlb_mm
,
1298 .tlb_page
= tsunami_flush_tlb_page
,
1299 .tlb_range
= tsunami_flush_tlb_range
,
1300 .page_to_ram
= tsunami_flush_page_to_ram
,
1301 .sig_insns
= tsunami_flush_sig_insns
,
1302 .page_for_dma
= tsunami_flush_page_for_dma
,
1305 static void __init
init_tsunami(void)
1308 * Tsunami's pretty sane, Sun and TI actually got it
1309 * somewhat right this time. Fujitsu should have
1310 * taken some lessons from them.
1313 srmmu_name
= "TI Tsunami";
1314 srmmu_modtype
= Tsunami
;
1315 sparc32_cachetlb_ops
= &tsunami_ops
;
1316 poke_srmmu
= poke_tsunami
;
1318 tsunami_setup_blockops();
1321 static void __cpuinit
poke_viking(void)
1323 unsigned long mreg
= srmmu_get_mmureg();
1324 static int smp_catch
;
1326 if (viking_mxcc_present
) {
1327 unsigned long mxcc_control
= mxcc_get_creg();
1329 mxcc_control
|= (MXCC_CTL_ECE
| MXCC_CTL_PRE
| MXCC_CTL_MCE
);
1330 mxcc_control
&= ~(MXCC_CTL_RRC
);
1331 mxcc_set_creg(mxcc_control
);
1334 * We don't need memory parity checks.
1335 * XXX This is a mess, have to dig out later. ecd.
1336 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1339 /* We do cache ptables on MXCC. */
1340 mreg
|= VIKING_TCENABLE
;
1342 unsigned long bpreg
;
1344 mreg
&= ~(VIKING_TCENABLE
);
1346 /* Must disable mixed-cmd mode here for other cpu's. */
1347 bpreg
= viking_get_bpreg();
1348 bpreg
&= ~(VIKING_ACTION_MIX
);
1349 viking_set_bpreg(bpreg
);
1351 /* Just in case PROM does something funny. */
1356 mreg
|= VIKING_SPENABLE
;
1357 mreg
|= (VIKING_ICENABLE
| VIKING_DCENABLE
);
1358 mreg
|= VIKING_SBENABLE
;
1359 mreg
&= ~(VIKING_ACENABLE
);
1360 srmmu_set_mmureg(mreg
);
1363 static struct sparc32_cachetlb_ops viking_ops
= {
1364 .cache_all
= viking_flush_cache_all
,
1365 .cache_mm
= viking_flush_cache_mm
,
1366 .cache_page
= viking_flush_cache_page
,
1367 .cache_range
= viking_flush_cache_range
,
1368 .tlb_all
= viking_flush_tlb_all
,
1369 .tlb_mm
= viking_flush_tlb_mm
,
1370 .tlb_page
= viking_flush_tlb_page
,
1371 .tlb_range
= viking_flush_tlb_range
,
1372 .page_to_ram
= viking_flush_page_to_ram
,
1373 .sig_insns
= viking_flush_sig_insns
,
1374 .page_for_dma
= viking_flush_page_for_dma
,
1378 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1379 * perform the local TLB flush and all the other cpus will see it.
1380 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1381 * that requires that we add some synchronization to these flushes.
1383 * The bug is that the fifo which keeps track of all the pending TLB
1384 * broadcasts in the system is an entry or two too small, so if we
1385 * have too many going at once we'll overflow that fifo and lose a TLB
1386 * flush resulting in corruption.
1388 * Our workaround is to take a global spinlock around the TLB flushes,
1389 * which guarentees we won't ever have too many pending. It's a big
1390 * hammer, but a semaphore like system to make sure we only have N TLB
1391 * flushes going at once will require SMP locking anyways so there's
1392 * no real value in trying any harder than this.
1394 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops
= {
1395 .cache_all
= viking_flush_cache_all
,
1396 .cache_mm
= viking_flush_cache_mm
,
1397 .cache_page
= viking_flush_cache_page
,
1398 .cache_range
= viking_flush_cache_range
,
1399 .tlb_all
= sun4dsmp_flush_tlb_all
,
1400 .tlb_mm
= sun4dsmp_flush_tlb_mm
,
1401 .tlb_page
= sun4dsmp_flush_tlb_page
,
1402 .tlb_range
= sun4dsmp_flush_tlb_range
,
1403 .page_to_ram
= viking_flush_page_to_ram
,
1404 .sig_insns
= viking_flush_sig_insns
,
1405 .page_for_dma
= viking_flush_page_for_dma
,
1409 static void __init
init_viking(void)
1411 unsigned long mreg
= srmmu_get_mmureg();
1413 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1414 if(mreg
& VIKING_MMODE
) {
1415 srmmu_name
= "TI Viking";
1416 viking_mxcc_present
= 0;
1420 * We need this to make sure old viking takes no hits
1421 * on it's cache for dma snoops to workaround the
1422 * "load from non-cacheable memory" interrupt bug.
1423 * This is only necessary because of the new way in
1424 * which we use the IOMMU.
1426 viking_ops
.page_for_dma
= viking_flush_page
;
1428 viking_sun4d_smp_ops
.page_for_dma
= viking_flush_page
;
1430 flush_page_for_dma_global
= 0;
1432 srmmu_name
= "TI Viking/MXCC";
1433 viking_mxcc_present
= 1;
1434 srmmu_cache_pagetables
= 1;
1437 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1440 if (sparc_cpu_model
== sun4d
)
1441 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1442 &viking_sun4d_smp_ops
;
1445 poke_srmmu
= poke_viking
;
1448 /* Probe for the srmmu chip version. */
1449 static void __init
get_srmmu_type(void)
1451 unsigned long mreg
, psr
;
1452 unsigned long mod_typ
, mod_rev
, psr_typ
, psr_vers
;
1454 srmmu_modtype
= SRMMU_INVAL_MOD
;
1457 mreg
= srmmu_get_mmureg(); psr
= get_psr();
1458 mod_typ
= (mreg
& 0xf0000000) >> 28;
1459 mod_rev
= (mreg
& 0x0f000000) >> 24;
1460 psr_typ
= (psr
>> 28) & 0xf;
1461 psr_vers
= (psr
>> 24) & 0xf;
1463 /* First, check for sparc-leon. */
1464 if (sparc_cpu_model
== sparc_leon
) {
1469 /* Second, check for HyperSparc or Cypress. */
1473 /* UP or MP Hypersparc */
1485 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1493 * Now Fujitsu TurboSparc. It might happen that it is
1494 * in Swift emulation mode, so we will check later...
1496 if (psr_typ
== 0 && psr_vers
== 5) {
1501 /* Next check for Fujitsu Swift. */
1502 if(psr_typ
== 0 && psr_vers
== 4) {
1506 /* Look if it is not a TurboSparc emulating Swift... */
1507 cpunode
= prom_getchild(prom_root_node
);
1508 while((cpunode
= prom_getsibling(cpunode
)) != 0) {
1509 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
1510 if(!strcmp(node_str
, "cpu")) {
1511 if (!prom_getintdefault(cpunode
, "psr-implementation", 1) &&
1512 prom_getintdefault(cpunode
, "psr-version", 1) == 5) {
1524 /* Now the Viking family of srmmu. */
1527 ((psr_vers
== 1) && (mod_typ
== 0) && (mod_rev
== 0)))) {
1532 /* Finally the Tsunami. */
1533 if(psr_typ
== 4 && psr_vers
== 1 && (mod_typ
|| mod_rev
)) {
1543 /* Local cross-calls. */
1544 static void smp_flush_page_for_dma(unsigned long page
)
1546 xc1((smpfunc_t
) local_ops
->page_for_dma
, page
);
1547 local_ops
->page_for_dma(page
);
1550 static void smp_flush_cache_all(void)
1552 xc0((smpfunc_t
) local_ops
->cache_all
);
1553 local_ops
->cache_all();
1556 static void smp_flush_tlb_all(void)
1558 xc0((smpfunc_t
) local_ops
->tlb_all
);
1559 local_ops
->tlb_all();
1562 static void smp_flush_cache_mm(struct mm_struct
*mm
)
1564 if (mm
->context
!= NO_CONTEXT
) {
1566 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1567 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1568 if (!cpumask_empty(&cpu_mask
))
1569 xc1((smpfunc_t
) local_ops
->cache_mm
, (unsigned long) mm
);
1570 local_ops
->cache_mm(mm
);
1574 static void smp_flush_tlb_mm(struct mm_struct
*mm
)
1576 if (mm
->context
!= NO_CONTEXT
) {
1578 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1579 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1580 if (!cpumask_empty(&cpu_mask
)) {
1581 xc1((smpfunc_t
) local_ops
->tlb_mm
, (unsigned long) mm
);
1582 if (atomic_read(&mm
->mm_users
) == 1 && current
->active_mm
== mm
)
1583 cpumask_copy(mm_cpumask(mm
),
1584 cpumask_of(smp_processor_id()));
1586 local_ops
->tlb_mm(mm
);
1590 static void smp_flush_cache_range(struct vm_area_struct
*vma
,
1591 unsigned long start
,
1594 struct mm_struct
*mm
= vma
->vm_mm
;
1596 if (mm
->context
!= NO_CONTEXT
) {
1598 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1599 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1600 if (!cpumask_empty(&cpu_mask
))
1601 xc3((smpfunc_t
) local_ops
->cache_range
,
1602 (unsigned long) vma
, start
, end
);
1603 local_ops
->cache_range(vma
, start
, end
);
1607 static void smp_flush_tlb_range(struct vm_area_struct
*vma
,
1608 unsigned long start
,
1611 struct mm_struct
*mm
= vma
->vm_mm
;
1613 if (mm
->context
!= NO_CONTEXT
) {
1615 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1616 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1617 if (!cpumask_empty(&cpu_mask
))
1618 xc3((smpfunc_t
) local_ops
->tlb_range
,
1619 (unsigned long) vma
, start
, end
);
1620 local_ops
->tlb_range(vma
, start
, end
);
1624 static void smp_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1626 struct mm_struct
*mm
= vma
->vm_mm
;
1628 if (mm
->context
!= NO_CONTEXT
) {
1630 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1631 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1632 if (!cpumask_empty(&cpu_mask
))
1633 xc2((smpfunc_t
) local_ops
->cache_page
,
1634 (unsigned long) vma
, page
);
1635 local_ops
->cache_page(vma
, page
);
1639 static void smp_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1641 struct mm_struct
*mm
= vma
->vm_mm
;
1643 if (mm
->context
!= NO_CONTEXT
) {
1645 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1646 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1647 if (!cpumask_empty(&cpu_mask
))
1648 xc2((smpfunc_t
) local_ops
->tlb_page
,
1649 (unsigned long) vma
, page
);
1650 local_ops
->tlb_page(vma
, page
);
1654 static void smp_flush_page_to_ram(unsigned long page
)
1656 /* Current theory is that those who call this are the one's
1657 * who have just dirtied their cache with the pages contents
1658 * in kernel space, therefore we only run this on local cpu.
1660 * XXX This experiment failed, research further... -DaveM
1663 xc1((smpfunc_t
) local_ops
->page_to_ram
, page
);
1665 local_ops
->page_to_ram(page
);
1668 static void smp_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1671 cpumask_copy(&cpu_mask
, mm_cpumask(mm
));
1672 cpumask_clear_cpu(smp_processor_id(), &cpu_mask
);
1673 if (!cpumask_empty(&cpu_mask
))
1674 xc2((smpfunc_t
) local_ops
->sig_insns
,
1675 (unsigned long) mm
, insn_addr
);
1676 local_ops
->sig_insns(mm
, insn_addr
);
1679 static struct sparc32_cachetlb_ops smp_cachetlb_ops
= {
1680 .cache_all
= smp_flush_cache_all
,
1681 .cache_mm
= smp_flush_cache_mm
,
1682 .cache_page
= smp_flush_cache_page
,
1683 .cache_range
= smp_flush_cache_range
,
1684 .tlb_all
= smp_flush_tlb_all
,
1685 .tlb_mm
= smp_flush_tlb_mm
,
1686 .tlb_page
= smp_flush_tlb_page
,
1687 .tlb_range
= smp_flush_tlb_range
,
1688 .page_to_ram
= smp_flush_page_to_ram
,
1689 .sig_insns
= smp_flush_sig_insns
,
1690 .page_for_dma
= smp_flush_page_for_dma
,
1694 /* Load up routines and constants for sun4m and sun4d mmu */
1695 void __init
load_mmu(void)
1697 extern void ld_mmu_iommu(void);
1698 extern void ld_mmu_iounit(void);
1704 /* El switcheroo... */
1705 local_ops
= sparc32_cachetlb_ops
;
1707 if (sparc_cpu_model
== sun4d
|| sparc_cpu_model
== sparc_leon
) {
1708 smp_cachetlb_ops
.tlb_all
= local_ops
->tlb_all
;
1709 smp_cachetlb_ops
.tlb_mm
= local_ops
->tlb_mm
;
1710 smp_cachetlb_ops
.tlb_range
= local_ops
->tlb_range
;
1711 smp_cachetlb_ops
.tlb_page
= local_ops
->tlb_page
;
1714 if (poke_srmmu
== poke_viking
) {
1715 /* Avoid unnecessary cross calls. */
1716 smp_cachetlb_ops
.cache_all
= local_ops
->cache_all
;
1717 smp_cachetlb_ops
.cache_mm
= local_ops
->cache_mm
;
1718 smp_cachetlb_ops
.cache_range
= local_ops
->cache_range
;
1719 smp_cachetlb_ops
.cache_page
= local_ops
->cache_page
;
1721 smp_cachetlb_ops
.page_to_ram
= local_ops
->page_to_ram
;
1722 smp_cachetlb_ops
.sig_insns
= local_ops
->sig_insns
;
1723 smp_cachetlb_ops
.page_for_dma
= local_ops
->page_for_dma
;
1726 /* It really is const after this point. */
1727 sparc32_cachetlb_ops
= (const struct sparc32_cachetlb_ops
*)
1731 if (sparc_cpu_model
== sun4d
)
1736 if (sparc_cpu_model
== sun4d
)
1738 else if (sparc_cpu_model
== sparc_leon
)