2.2.0-final
[davej-history.git] / arch / sparc / mm / srmmu.c
blobd94fd40836f290f2fc303245055594463154ac2d
1 /* $Id: srmmu.c,v 1.175 1998/08/28 18:57:31 zaitcev Exp $
2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/malloc.h>
14 #include <linux/vmalloc.h>
15 #include <linux/init.h>
17 #include <asm/page.h>
18 #include <asm/pgtable.h>
19 #include <asm/io.h>
20 #include <asm/kdebug.h>
21 #include <asm/vaddrs.h>
22 #include <asm/traps.h>
23 #include <asm/smp.h>
24 #include <asm/mbus.h>
25 #include <asm/cache.h>
26 #include <asm/oplib.h>
27 #include <asm/sbus.h>
28 #include <asm/asi.h>
29 #include <asm/msi.h>
30 #include <asm/a.out.h>
31 #include <asm/mmu_context.h>
32 #include <asm/io-unit.h>
33 #include <asm/spinlock.h>
35 /* Now the cpu specific definitions. */
36 #include <asm/viking.h>
37 #include <asm/mxcc.h>
38 #include <asm/ross.h>
39 #include <asm/tsunami.h>
40 #include <asm/swift.h>
41 #include <asm/turbosparc.h>
43 #include <asm/btfixup.h>
45 /* #define DEBUG_MAP_KERNEL */
46 /* #define PAGESKIP_DEBUG */
48 enum mbus_module srmmu_modtype;
49 unsigned int hwbug_bitmask;
50 int vac_cache_size;
51 int vac_line_size;
52 int vac_badbits;
54 extern unsigned long sparc_iobase_vaddr;
56 #ifdef __SMP__
57 #define FLUSH_BEGIN(mm)
58 #define FLUSH_END
59 #else
60 #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
61 #define FLUSH_END }
62 #endif
64 static int phys_mem_contig;
65 BTFIXUPDEF_SETHI(page_contig_offset)
67 BTFIXUPDEF_CALL(void, ctxd_set, ctxd_t *, pgd_t *)
68 BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
70 #define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp)
71 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
73 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
74 BTFIXUPDEF_CALL(void, flush_chunk, unsigned long)
76 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
77 int flush_page_for_dma_global = 1;
78 #define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk)
79 #ifdef __SMP__
80 BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
82 #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
83 #endif
85 static struct srmmu_stats {
86 int invall;
87 int invpg;
88 int invrnge;
89 int invmm;
90 } module_stats;
92 char *srmmu_name;
94 ctxd_t *srmmu_ctx_table_phys;
95 ctxd_t *srmmu_context_table;
97 /* Don't change this without changing access to this
98 * in arch/sparc/mm/viking.S
100 static struct srmmu_trans {
101 unsigned long vbase;
102 unsigned long pbase;
103 unsigned long size;
104 } srmmu_map[SPARC_PHYS_BANKS];
106 #define SRMMU_HASHSZ 256
108 /* Not static, viking.S uses it. */
109 unsigned long srmmu_v2p_hash[SRMMU_HASHSZ];
110 static unsigned long srmmu_p2v_hash[SRMMU_HASHSZ];
112 #define srmmu_ahashfn(addr) ((addr) >> 24)
114 int viking_mxcc_present = 0;
116 /* Physical memory can be _very_ non-contiguous on the sun4m, especially
117 * the SS10/20 class machines and with the latest openprom revisions.
118 * So we have to do a quick lookup.
119 * We use the same for SS1000/SC2000 as a fall back, when phys memory is
120 * non-contiguous.
122 static inline unsigned long srmmu_v2p(unsigned long vaddr)
124 unsigned long off = srmmu_v2p_hash[srmmu_ahashfn(vaddr)];
126 return (vaddr + off);
129 static inline unsigned long srmmu_p2v(unsigned long paddr)
131 unsigned long off = srmmu_p2v_hash[srmmu_ahashfn(paddr)];
133 if (off != 0xffffffffUL)
134 return (paddr - off);
135 else
136 return 0xffffffffUL;
139 /* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case
140 * as a special case to make things faster.
142 /* FIXME: gcc is stupid here and generates very very bad code in this
143 * heavily used routine. So we help it a bit. */
144 static inline unsigned long srmmu_c_v2p(unsigned long vaddr)
146 #if KERNBASE != 0xf0000000
147 if (vaddr >= KERNBASE) return vaddr - KERNBASE;
148 return vaddr - BTFIXUP_SETHI(page_contig_offset);
149 #else
150 register unsigned long kernbase;
152 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase));
153 return vaddr - ((vaddr >= kernbase) ? kernbase : BTFIXUP_SETHI(page_contig_offset));
154 #endif
157 static inline unsigned long srmmu_c_p2v(unsigned long paddr)
159 #if KERNBASE != 0xf0000000
160 if (paddr < (0xfd000000 - KERNBASE)) return paddr + KERNBASE;
161 return (paddr + BTFIXUP_SETHI(page_contig_offset));
162 #else
163 register unsigned long kernbase;
164 register unsigned long limit;
166 __asm__ ("sethi %%hi(0x0d000000), %0" : "=r"(limit));
167 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase));
169 return paddr + ((paddr < limit) ? kernbase : BTFIXUP_SETHI(page_contig_offset));
170 #endif
173 /* On boxes where there is no lots_of_ram, KERNBASE is mapped to PA<0> and highest
174 PA is below 0x0d000000, we can optimize even more :) */
175 static inline unsigned long srmmu_s_v2p(unsigned long vaddr)
177 return vaddr - PAGE_OFFSET;
180 static inline unsigned long srmmu_s_p2v(unsigned long paddr)
182 return paddr + PAGE_OFFSET;
185 /* In general all page table modifications should use the V8 atomic
186 * swap instruction. This insures the mmu and the cpu are in sync
187 * with respect to ref/mod bits in the page tables.
189 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
191 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
192 return value;
195 /* Functions really use this, not srmmu_swap directly. */
196 #define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry))
198 #ifdef PAGESKIP_DEBUG
199 #define PGSKIP_DEBUG(from,to) prom_printf("PG_skip %ld->%ld\n", (long)(from), (long)(to)); printk("PG_skip %ld->%ld\n", (long)(from), (long)(to))
200 #else
201 #define PGSKIP_DEBUG(from,to) do { } while (0)
202 #endif
204 __initfunc(void srmmu_frob_mem_map(unsigned long start_mem))
206 unsigned long bank_start, bank_end = 0;
207 unsigned long addr;
208 int i;
210 /* First, mark all pages as invalid. */
211 for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE)
212 mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
214 /* Next, pg[0-3] is sun4c cruft, so we can free it... */
215 mem_map[MAP_NR(pg0)].flags &= ~(1<<PG_reserved);
216 mem_map[MAP_NR(pg1)].flags &= ~(1<<PG_reserved);
217 mem_map[MAP_NR(pg2)].flags &= ~(1<<PG_reserved);
218 mem_map[MAP_NR(pg3)].flags &= ~(1<<PG_reserved);
220 start_mem = PAGE_ALIGN(start_mem);
221 for(i = 0; srmmu_map[i].size; i++) {
222 bank_start = srmmu_map[i].vbase;
224 if (i && bank_start - bank_end > 2 * PAGE_SIZE) {
225 mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
226 mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(bank_start);
227 PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(bank_start));
228 if (bank_end > KERNBASE && bank_start < KERNBASE) {
229 mem_map[0].flags |= (1<<PG_skip);
230 mem_map[0].next_hash = mem_map + MAP_NR(bank_start);
231 PGSKIP_DEBUG(0, MAP_NR(bank_start));
235 bank_end = bank_start + srmmu_map[i].size;
236 while(bank_start < bank_end) {
237 if((bank_start >= KERNBASE) &&
238 (bank_start < start_mem)) {
239 bank_start += PAGE_SIZE;
240 continue;
242 mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved);
243 bank_start += PAGE_SIZE;
246 if (bank_end == 0xfd000000)
247 bank_end = PAGE_OFFSET;
250 if (bank_end < KERNBASE) {
251 mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
252 mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(KERNBASE);
253 PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(KERNBASE));
254 } else if (MAP_NR(bank_end) < max_mapnr) {
255 mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
256 if (mem_map[0].flags & (1 << PG_skip)) {
257 mem_map[MAP_NR(bank_end)].next_hash = mem_map[0].next_hash;
258 PGSKIP_DEBUG(MAP_NR(bank_end), mem_map[0].next_hash - mem_map);
259 } else {
260 mem_map[MAP_NR(bank_end)].next_hash = mem_map;
261 PGSKIP_DEBUG(MAP_NR(bank_end), 0);
266 /* The very generic SRMMU page table operations. */
267 static inline int srmmu_device_memory(unsigned long x)
269 return ((x & 0xF0000000) != 0);
272 static unsigned long srmmu_pgd_page(pgd_t pgd)
273 { return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
275 static unsigned long srmmu_pmd_page(pmd_t pmd)
276 { return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
278 static unsigned long srmmu_pte_page(pte_t pte)
279 { return srmmu_device_memory(pte_val(pte))?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
281 static unsigned long srmmu_c_pgd_page(pgd_t pgd)
282 { return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_c_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
284 static unsigned long srmmu_c_pmd_page(pmd_t pmd)
285 { return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_c_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
287 static unsigned long srmmu_c_pte_page(pte_t pte)
288 { return srmmu_device_memory(pte_val(pte))?~0:srmmu_c_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
290 static unsigned long srmmu_s_pgd_page(pgd_t pgd)
291 { return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_s_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
293 static unsigned long srmmu_s_pmd_page(pmd_t pmd)
294 { return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_s_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
296 static unsigned long srmmu_s_pte_page(pte_t pte)
297 { return srmmu_device_memory(pte_val(pte))?~0:srmmu_s_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
299 static inline int srmmu_pte_none(pte_t pte)
300 { return !(pte_val(pte) & 0xFFFFFFF); }
301 static inline int srmmu_pte_present(pte_t pte)
302 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
304 static inline void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
306 static inline int srmmu_pmd_none(pmd_t pmd)
307 { return !(pmd_val(pmd) & 0xFFFFFFF); }
308 static inline int srmmu_pmd_bad(pmd_t pmd)
309 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
311 static inline int srmmu_pmd_present(pmd_t pmd)
312 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
314 static inline void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
316 static inline int srmmu_pgd_none(pgd_t pgd)
317 { return !(pgd_val(pgd) & 0xFFFFFFF); }
319 static inline int srmmu_pgd_bad(pgd_t pgd)
320 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
322 static inline int srmmu_pgd_present(pgd_t pgd)
323 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
325 static inline void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
327 static inline int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
328 static inline int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
329 static inline int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
331 static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
332 static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
333 static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);}
334 static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);}
335 static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);}
336 static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);}
339 * Conversion functions: convert a page and protection to a page entry,
340 * and a page entry and page directory to the page they refer to.
342 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
343 { return __pte(((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot)); }
345 static pte_t srmmu_c_mk_pte(unsigned long page, pgprot_t pgprot)
346 { return __pte(((srmmu_c_v2p(page)) >> 4) | pgprot_val(pgprot)); }
348 static pte_t srmmu_s_mk_pte(unsigned long page, pgprot_t pgprot)
349 { return __pte(((srmmu_s_v2p(page)) >> 4) | pgprot_val(pgprot)); }
351 static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
352 { return __pte(((page) >> 4) | pgprot_val(pgprot)); }
354 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
356 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
359 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
361 set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
364 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
366 set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
369 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
371 set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
374 static void srmmu_c_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
376 set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) pgdp) >> 4)));
379 static void srmmu_c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
381 set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) pmdp) >> 4)));
384 static void srmmu_c_pmd_set(pmd_t * pmdp, pte_t * ptep)
386 set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) ptep) >> 4)));
389 static void srmmu_s_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
391 set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pgdp) >> 4)));
394 static void srmmu_s_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
396 set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pmdp) >> 4)));
399 static void srmmu_s_pmd_set(pmd_t * pmdp, pte_t * ptep)
401 set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) ptep) >> 4)));
404 static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
406 return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot));
409 /* to find an entry in a top-level page table... */
410 static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
412 return mm->pgd + (address >> SRMMU_PGDIR_SHIFT);
415 /* Find an entry in the second-level page table.. */
416 static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
418 return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
421 /* Find an entry in the third-level page table.. */
422 static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
424 return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
427 static inline pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address)
429 return (pmd_t *) srmmu_c_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
432 static inline pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address)
434 return (pte_t *) srmmu_c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
437 static inline pmd_t *srmmu_s_pmd_offset(pgd_t * dir, unsigned long address)
439 return (pmd_t *) srmmu_s_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
442 static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address)
444 return (pte_t *) srmmu_s_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
447 /* This must update the context table entry for this process. */
448 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
450 if(tsk->mm->context != NO_CONTEXT) {
451 flush_cache_mm(tsk->mm);
452 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
453 flush_tlb_mm(tsk->mm);
457 static inline pte_t *srmmu_get_pte_fast(void)
459 struct page *ret;
461 spin_lock(&pte_spinlock);
462 if ((ret = (struct page *)pte_quicklist) != NULL) {
463 unsigned int mask = (unsigned int)ret->pprev_hash;
464 unsigned int tmp, off;
466 if (mask & 0xff)
467 for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 256);
468 else
469 for (tmp = 0x100, off = 2048; (mask & tmp) == 0; tmp <<= 1, off += 256);
470 (unsigned int)ret->pprev_hash = mask & ~tmp;
471 if (!(mask & ~tmp))
472 pte_quicklist = (unsigned long *)ret->next_hash;
473 ret = (struct page *)(page_address(ret) + off);
474 pgtable_cache_size--;
476 spin_unlock(&pte_spinlock);
477 return (pte_t *)ret;
480 static inline pte_t *srmmu_get_pte_slow(void)
482 pte_t *ret;
483 struct page *page;
485 ret = (pte_t *)get_free_page(GFP_KERNEL);
486 if (ret) {
487 page = mem_map + MAP_NR(ret);
488 flush_chunk((unsigned long)ret);
489 (unsigned int)page->pprev_hash = 0xfffe;
490 spin_lock(&pte_spinlock);
491 (unsigned long *)page->next_hash = pte_quicklist;
492 pte_quicklist = (unsigned long *)page;
493 pgtable_cache_size += 15;
495 return ret;
498 static inline pgd_t *srmmu_get_pgd_fast(void)
500 struct page *ret;
502 spin_lock(&pgd_spinlock);
503 if ((ret = (struct page *)pgd_quicklist) != NULL) {
504 unsigned int mask = (unsigned int)ret->pprev_hash;
505 unsigned int tmp, off;
507 for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 1024);
508 (unsigned int)ret->pprev_hash = mask & ~tmp;
509 if (!(mask & ~tmp))
510 pgd_quicklist = (unsigned long *)ret->next_hash;
511 ret = (struct page *)(page_address(ret) + off);
512 pgd_cache_size--;
514 spin_unlock(&pgd_spinlock);
515 return (pgd_t *)ret;
518 static inline pgd_t *srmmu_get_pgd_slow(void)
520 pgd_t *ret;
521 struct page *page;
523 ret = (pgd_t *)__get_free_page(GFP_KERNEL);
524 if (ret) {
525 pgd_t *init = pgd_offset(&init_mm, 0);
526 memset(ret + (0 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
527 memcpy(ret + (0 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
528 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
529 memset(ret + (1 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
530 memcpy(ret + (1 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
531 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
532 memset(ret + (2 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
533 memcpy(ret + (2 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
534 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
535 memset(ret + (3 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
536 memcpy(ret + (3 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
537 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
538 page = mem_map + MAP_NR(ret);
539 flush_chunk((unsigned long)ret);
540 (unsigned int)page->pprev_hash = 0xe;
541 spin_lock(&pgd_spinlock);
542 (unsigned long *)page->next_hash = pgd_quicklist;
543 pgd_quicklist = (unsigned long *)page;
544 pgd_cache_size += 3;
545 spin_unlock(&pgd_spinlock);
547 return ret;
550 static void srmmu_free_pte_slow(pte_t *pte)
554 static void srmmu_free_pgd_slow(pgd_t *pgd)
558 static inline void srmmu_pte_free(pte_t *pte)
560 struct page *page = mem_map + MAP_NR(pte);
562 spin_lock(&pte_spinlock);
563 if (!page->pprev_hash) {
564 (unsigned long *)page->next_hash = pte_quicklist;
565 pte_quicklist = (unsigned long *)page;
567 (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pte) >> 8) & 15));
568 pgtable_cache_size++;
569 spin_unlock(&pte_spinlock);
572 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
574 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
575 if(srmmu_pmd_none(*pmd)) {
576 pte_t *page = srmmu_get_pte_fast();
578 if (page) {
579 pmd_set(pmd, page);
580 return page + address;
582 page = srmmu_get_pte_slow();
583 if(srmmu_pmd_none(*pmd)) {
584 if(page) {
585 spin_unlock(&pte_spinlock);
586 pmd_set(pmd, page);
587 return page + address;
589 pmd_set(pmd, BAD_PAGETABLE);
590 return NULL;
592 if (page) {
593 (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
594 pgtable_cache_size++;
595 spin_unlock(&pte_spinlock);
598 if(srmmu_pmd_bad(*pmd)) {
599 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
600 pmd_set(pmd, BAD_PAGETABLE);
601 return NULL;
603 return ((pte_t *) pmd_page(*pmd)) + address;
606 /* Real three-level page tables on SRMMU. */
607 static void srmmu_pmd_free(pmd_t * pmd)
609 return srmmu_pte_free((pte_t *)pmd);
612 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
614 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
615 if(srmmu_pgd_none(*pgd)) {
616 pmd_t *page = (pmd_t *)srmmu_get_pte_fast();
618 if (page) {
619 pgd_set(pgd, page);
620 return page + address;
622 page = (pmd_t *)srmmu_get_pte_slow();
623 if(srmmu_pgd_none(*pgd)) {
624 if(page) {
625 spin_unlock(&pte_spinlock);
626 pgd_set(pgd, page);
627 return page + address;
629 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
630 return NULL;
632 if (page) {
633 (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
634 pgtable_cache_size++;
635 spin_unlock(&pte_spinlock);
638 if(srmmu_pgd_bad(*pgd)) {
639 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
640 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
641 return NULL;
643 return (pmd_t *) pgd_page(*pgd) + address;
646 static void srmmu_pgd_free(pgd_t *pgd)
648 struct page *page = mem_map + MAP_NR(pgd);
650 spin_lock(&pgd_spinlock);
651 if (!page->pprev_hash) {
652 (unsigned long *)page->next_hash = pgd_quicklist;
653 pgd_quicklist = (unsigned long *)page;
655 (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pgd) >> 10) & 3));
656 pgd_cache_size++;
657 spin_unlock(&pgd_spinlock);
660 static pgd_t *srmmu_pgd_alloc(void)
662 pgd_t *ret;
664 ret = srmmu_get_pgd_fast();
665 if (ret) return ret;
666 return srmmu_get_pgd_slow();
670 static void srmmu_set_pgdir(unsigned long address, pgd_t entry)
672 struct task_struct * p;
673 struct page *page;
675 read_lock(&tasklist_lock);
676 for_each_task(p) {
677 if (!p->mm)
678 continue;
679 *pgd_offset(p->mm,address) = entry;
681 read_unlock(&tasklist_lock);
682 spin_lock(&pgd_spinlock);
683 address >>= SRMMU_PGDIR_SHIFT;
684 for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) {
685 pgd_t *pgd = (pgd_t *)page_address(page);
686 unsigned int mask = (unsigned int)page->pprev_hash;
688 if (mask & 1)
689 pgd[address + 0 * SRMMU_PTRS_PER_PGD] = entry;
690 if (mask & 2)
691 pgd[address + 1 * SRMMU_PTRS_PER_PGD] = entry;
692 if (mask & 4)
693 pgd[address + 2 * SRMMU_PTRS_PER_PGD] = entry;
694 if (mask & 8)
695 pgd[address + 3 * SRMMU_PTRS_PER_PGD] = entry;
696 if (mask)
697 flush_chunk((unsigned long)pgd);
699 spin_unlock(&pgd_spinlock);
702 static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
704 srmmu_set_entry(ptep, pte_val(pteval));
707 static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
709 register unsigned long a, b, c, d, e, f, g;
710 unsigned long line, page;
712 srmmu_set_entry(ptep, pte_val(pteval));
713 page = ((unsigned long)ptep) & PAGE_MASK;
714 line = (page + PAGE_SIZE) - 0x100;
715 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
716 goto inside;
717 do {
718 line -= 0x100;
719 inside:
720 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
721 "sta %%g0, [%0 + %2] %1\n\t"
722 "sta %%g0, [%0 + %3] %1\n\t"
723 "sta %%g0, [%0 + %4] %1\n\t"
724 "sta %%g0, [%0 + %5] %1\n\t"
725 "sta %%g0, [%0 + %6] %1\n\t"
726 "sta %%g0, [%0 + %7] %1\n\t"
727 "sta %%g0, [%0 + %8] %1\n\t" : :
728 "r" (line),
729 "i" (ASI_M_FLUSH_PAGE),
730 "r" (a), "r" (b), "r" (c), "r" (d),
731 "r" (e), "r" (f), "r" (g));
732 } while(line != page);
735 static void srmmu_set_pte_nocache_viking(pte_t *ptep, pte_t pteval)
737 unsigned long vaddr;
738 int set;
739 int i;
741 set = ((unsigned long)ptep >> 5) & 0x7f;
742 vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
743 srmmu_set_entry(ptep, pte_val(pteval));
744 for (i = 0; i < 8; i++) {
745 __asm__ __volatile__ ("ld [%0], %%g0" : : "r" (vaddr));
746 vaddr += PAGE_SIZE;
750 static void srmmu_quick_kernel_fault(unsigned long address)
752 #ifdef __SMP__
753 printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
754 smp_processor_id(), address);
755 while (1) ;
756 #else
757 printk("Kernel faults at addr=0x%08lx\n", address);
758 printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK)));
759 die_if_kernel("SRMMU bolixed...", current->tss.kregs);
760 #endif
763 static inline void alloc_context(struct mm_struct *mm)
765 struct ctx_list *ctxp;
767 ctxp = ctx_free.next;
768 if(ctxp != &ctx_free) {
769 remove_from_ctx_list(ctxp);
770 add_to_used_ctxlist(ctxp);
771 mm->context = ctxp->ctx_number;
772 ctxp->ctx_mm = mm;
773 return;
775 ctxp = ctx_used.next;
776 if(ctxp->ctx_mm == current->mm)
777 ctxp = ctxp->next;
778 if(ctxp == &ctx_used)
779 panic("out of mmu contexts");
780 flush_cache_mm(ctxp->ctx_mm);
781 flush_tlb_mm(ctxp->ctx_mm);
782 remove_from_ctx_list(ctxp);
783 add_to_used_ctxlist(ctxp);
784 ctxp->ctx_mm->context = NO_CONTEXT;
785 ctxp->ctx_mm = mm;
786 mm->context = ctxp->ctx_number;
789 static inline void free_context(int context)
791 struct ctx_list *ctx_old;
793 ctx_old = ctx_list_pool + context;
794 remove_from_ctx_list(ctx_old);
795 add_to_free_ctxlist(ctx_old);
799 static void srmmu_switch_to_context(struct task_struct *tsk)
801 if(tsk->mm->context == NO_CONTEXT) {
802 alloc_context(tsk->mm);
803 flush_cache_mm(tsk->mm);
804 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
805 flush_tlb_mm(tsk->mm);
807 srmmu_set_context(tsk->mm->context);
810 static void srmmu_init_new_context(struct mm_struct *mm)
812 alloc_context(mm);
814 flush_cache_mm(mm);
815 ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
816 flush_tlb_mm(mm);
818 if(mm == current->mm)
819 srmmu_set_context(mm->context);
822 /* Low level IO area allocation on the SRMMU. */
823 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
825 pgd_t *pgdp;
826 pmd_t *pmdp;
827 pte_t *ptep;
828 unsigned long tmp;
830 physaddr &= PAGE_MASK;
831 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
832 pmdp = pmd_offset(pgdp, virt_addr);
833 ptep = pte_offset(pmdp, virt_addr);
834 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
836 /* I need to test whether this is consistent over all
837 * sun4m's. The bus_type represents the upper 4 bits of
838 * 36-bit physical address on the I/O space lines...
840 tmp |= (bus_type << 28);
841 if(rdonly)
842 tmp |= SRMMU_PRIV_RDONLY;
843 else
844 tmp |= SRMMU_PRIV;
845 flush_page_to_ram(virt_addr);
846 set_pte(ptep, __pte(tmp));
847 flush_tlb_all();
850 void srmmu_unmapioaddr(unsigned long virt_addr)
852 pgd_t *pgdp;
853 pmd_t *pmdp;
854 pte_t *ptep;
856 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
857 pmdp = pmd_offset(pgdp, virt_addr);
858 ptep = pte_offset(pmdp, virt_addr);
860 /* No need to flush uncacheable page. */
861 set_pte(ptep, mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
862 flush_tlb_all();
865 /* This is used in many routines below. */
866 #define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
868 /* On the SRMMU we do not have the problems with limited tlb entries
869 * for mapping kernel pages, so we just take things from the free page
870 * pool. As a side effect we are putting a little too much pressure
871 * on the gfp() subsystem. This setup also makes the logic of the
872 * iommu mapping code a lot easier as we can transparently handle
873 * mappings on the kernel stack without any special code as we did
874 * need on the sun4c.
876 struct task_struct *srmmu_alloc_task_struct(void)
878 return (struct task_struct *) __get_free_pages(GFP_KERNEL, 1);
881 static void srmmu_free_task_struct(struct task_struct *tsk)
883 free_pages((unsigned long)tsk, 1);
886 /* tsunami.S */
887 extern void tsunami_flush_cache_all(void);
888 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
889 extern void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);
890 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
891 extern void tsunami_flush_page_to_ram(unsigned long page);
892 extern void tsunami_flush_page_for_dma(unsigned long page);
893 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
894 extern void tsunami_flush_chunk(unsigned long chunk);
895 extern void tsunami_flush_tlb_all(void);
896 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
897 extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
898 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
900 /* Workaround, until we find what's going on with Swift. When low on memory, it sometimes
901 * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/
902 * fault again on the same instruction. I really don't understand it, have checked it and contexts
903 * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj
905 static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
907 static unsigned long last;
909 if (last == address) viking_hwprobe(address);
910 last = address;
913 /* Swift flushes. It has the recommended SRMMU specification flushing
914 * facilities, so we can do things in a more fine grained fashion than we
915 * could on the tsunami. Let's watch out for HARDWARE BUGS...
918 static void swift_flush_cache_all(void)
920 flush_user_windows();
921 swift_idflash_clear();
924 static void swift_flush_cache_mm(struct mm_struct *mm)
926 FLUSH_BEGIN(mm)
927 flush_user_windows();
928 swift_idflash_clear();
929 FLUSH_END
932 static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
934 FLUSH_BEGIN(mm)
935 flush_user_windows();
936 swift_idflash_clear();
937 FLUSH_END
940 static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
942 FLUSH_BEGIN(vma->vm_mm)
943 flush_user_windows();
944 if(vma->vm_flags & VM_EXEC)
945 swift_flush_icache();
946 swift_flush_dcache();
947 FLUSH_END
950 /* Not copy-back on swift. */
951 static void swift_flush_page_to_ram(unsigned long page)
955 /* But not IO coherent either. */
956 static void swift_flush_page_for_dma(unsigned long page)
958 swift_flush_dcache();
961 /* Again, Swift is non-snooping split I/D cache'd just like tsunami,
962 * so have to punt the icache for on-stack signal insns. Only the
963 * icache need be flushed since the dcache is write-through.
965 static void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
967 swift_flush_icache();
970 static void swift_flush_chunk(unsigned long chunk)
974 static void swift_flush_tlb_all(void)
976 srmmu_flush_whole_tlb();
977 module_stats.invall++;
980 static void swift_flush_tlb_mm(struct mm_struct *mm)
982 FLUSH_BEGIN(mm)
983 srmmu_flush_whole_tlb();
984 module_stats.invmm++;
985 FLUSH_END
988 static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
990 FLUSH_BEGIN(mm)
991 srmmu_flush_whole_tlb();
992 module_stats.invrnge++;
993 FLUSH_END
996 static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
998 FLUSH_BEGIN(vma->vm_mm)
999 srmmu_flush_whole_tlb();
1000 module_stats.invpg++;
1001 FLUSH_END
1004 /* The following are all MBUS based SRMMU modules, and therefore could
1005 * be found in a multiprocessor configuration. On the whole, these
1006 * chips seems to be much more touchy about DVMA and page tables
1007 * with respect to cache coherency.
1010 /* Cypress flushes. */
1011 static void cypress_flush_cache_all(void)
1013 volatile unsigned long cypress_sucks;
1014 unsigned long faddr, tagval;
1016 flush_user_windows();
1017 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1018 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1019 "=r" (tagval) :
1020 "r" (faddr), "r" (0x40000),
1021 "i" (ASI_M_DATAC_TAG));
1023 /* If modified and valid, kick it. */
1024 if((tagval & 0x60) == 0x60)
1025 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
1029 static void cypress_flush_cache_mm(struct mm_struct *mm)
1031 register unsigned long a, b, c, d, e, f, g;
1032 unsigned long flags, faddr;
1033 int octx;
1035 FLUSH_BEGIN(mm)
1036 flush_user_windows();
1037 __save_and_cli(flags);
1038 octx = srmmu_get_context();
1039 srmmu_set_context(mm->context);
1040 a = 0x20; b = 0x40; c = 0x60;
1041 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1043 faddr = (0x10000 - 0x100);
1044 goto inside;
1045 do {
1046 faddr -= 0x100;
1047 inside:
1048 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1049 "sta %%g0, [%0 + %2] %1\n\t"
1050 "sta %%g0, [%0 + %3] %1\n\t"
1051 "sta %%g0, [%0 + %4] %1\n\t"
1052 "sta %%g0, [%0 + %5] %1\n\t"
1053 "sta %%g0, [%0 + %6] %1\n\t"
1054 "sta %%g0, [%0 + %7] %1\n\t"
1055 "sta %%g0, [%0 + %8] %1\n\t" : :
1056 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
1057 "r" (a), "r" (b), "r" (c), "r" (d),
1058 "r" (e), "r" (f), "r" (g));
1059 } while(faddr);
1060 srmmu_set_context(octx);
1061 __restore_flags(flags);
1062 FLUSH_END
1065 static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1067 register unsigned long a, b, c, d, e, f, g;
1068 unsigned long flags, faddr;
1069 int octx;
1071 FLUSH_BEGIN(mm)
1072 flush_user_windows();
1073 __save_and_cli(flags);
1074 octx = srmmu_get_context();
1075 srmmu_set_context(mm->context);
1076 a = 0x20; b = 0x40; c = 0x60;
1077 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1079 start &= SRMMU_PMD_MASK;
1080 while(start < end) {
1081 faddr = (start + (0x10000 - 0x100));
1082 goto inside;
1083 do {
1084 faddr -= 0x100;
1085 inside:
1086 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1087 "sta %%g0, [%0 + %2] %1\n\t"
1088 "sta %%g0, [%0 + %3] %1\n\t"
1089 "sta %%g0, [%0 + %4] %1\n\t"
1090 "sta %%g0, [%0 + %5] %1\n\t"
1091 "sta %%g0, [%0 + %6] %1\n\t"
1092 "sta %%g0, [%0 + %7] %1\n\t"
1093 "sta %%g0, [%0 + %8] %1\n\t" : :
1094 "r" (faddr),
1095 "i" (ASI_M_FLUSH_SEG),
1096 "r" (a), "r" (b), "r" (c), "r" (d),
1097 "r" (e), "r" (f), "r" (g));
1098 } while (faddr != start);
1099 start += SRMMU_PMD_SIZE;
1101 srmmu_set_context(octx);
1102 __restore_flags(flags);
1103 FLUSH_END
1106 static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1108 register unsigned long a, b, c, d, e, f, g;
1109 struct mm_struct *mm = vma->vm_mm;
1110 unsigned long flags, line;
1111 int octx;
1113 FLUSH_BEGIN(mm)
1114 flush_user_windows();
1115 __save_and_cli(flags);
1116 octx = srmmu_get_context();
1117 srmmu_set_context(mm->context);
1118 a = 0x20; b = 0x40; c = 0x60;
1119 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1121 page &= PAGE_MASK;
1122 line = (page + PAGE_SIZE) - 0x100;
1123 goto inside;
1124 do {
1125 line -= 0x100;
1126 inside:
1127 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1128 "sta %%g0, [%0 + %2] %1\n\t"
1129 "sta %%g0, [%0 + %3] %1\n\t"
1130 "sta %%g0, [%0 + %4] %1\n\t"
1131 "sta %%g0, [%0 + %5] %1\n\t"
1132 "sta %%g0, [%0 + %6] %1\n\t"
1133 "sta %%g0, [%0 + %7] %1\n\t"
1134 "sta %%g0, [%0 + %8] %1\n\t" : :
1135 "r" (line),
1136 "i" (ASI_M_FLUSH_PAGE),
1137 "r" (a), "r" (b), "r" (c), "r" (d),
1138 "r" (e), "r" (f), "r" (g));
1139 } while(line != page);
1140 srmmu_set_context(octx);
1141 __restore_flags(flags);
1142 FLUSH_END
1145 /* Cypress is copy-back, at least that is how we configure it. */
1146 static void cypress_flush_page_to_ram(unsigned long page)
1148 register unsigned long a, b, c, d, e, f, g;
1149 unsigned long line;
1151 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1152 page &= PAGE_MASK;
1153 line = (page + PAGE_SIZE) - 0x100;
1154 goto inside;
1155 do {
1156 line -= 0x100;
1157 inside:
1158 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1159 "sta %%g0, [%0 + %2] %1\n\t"
1160 "sta %%g0, [%0 + %3] %1\n\t"
1161 "sta %%g0, [%0 + %4] %1\n\t"
1162 "sta %%g0, [%0 + %5] %1\n\t"
1163 "sta %%g0, [%0 + %6] %1\n\t"
1164 "sta %%g0, [%0 + %7] %1\n\t"
1165 "sta %%g0, [%0 + %8] %1\n\t" : :
1166 "r" (line),
1167 "i" (ASI_M_FLUSH_PAGE),
1168 "r" (a), "r" (b), "r" (c), "r" (d),
1169 "r" (e), "r" (f), "r" (g));
1170 } while(line != page);
1173 static void cypress_flush_chunk(unsigned long chunk)
1175 cypress_flush_page_to_ram(chunk);
1178 /* Cypress is also IO cache coherent. */
1179 static void cypress_flush_page_for_dma(unsigned long page)
1183 /* Cypress has unified L2 VIPT, from which both instructions and data
1184 * are stored. It does not have an onboard icache of any sort, therefore
1185 * no flush is necessary.
1187 static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1191 static void cypress_flush_tlb_all(void)
1193 srmmu_flush_whole_tlb();
1194 module_stats.invall++;
1197 static void cypress_flush_tlb_mm(struct mm_struct *mm)
1199 FLUSH_BEGIN(mm)
1200 __asm__ __volatile__("
1201 lda [%0] %3, %%g5
1202 sta %2, [%0] %3
1203 sta %%g0, [%1] %4
1204 sta %%g5, [%0] %3"
1205 : /* no outputs */
1206 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
1207 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
1208 : "g5");
1209 module_stats.invmm++;
1210 FLUSH_END
1213 static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1215 unsigned long size;
1217 FLUSH_BEGIN(mm)
1218 start &= SRMMU_PGDIR_MASK;
1219 size = SRMMU_PGDIR_ALIGN(end) - start;
1220 __asm__ __volatile__("
1221 lda [%0] %5, %%g5
1222 sta %1, [%0] %5
1223 1: subcc %3, %4, %3
1224 bne 1b
1225 sta %%g0, [%2 + %3] %6
1226 sta %%g5, [%0] %5"
1227 : /* no outputs */
1228 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
1229 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
1230 "i" (ASI_M_FLUSH_PROBE)
1231 : "g5", "cc");
1232 module_stats.invrnge++;
1233 FLUSH_END
1236 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1238 struct mm_struct *mm = vma->vm_mm;
1240 FLUSH_BEGIN(mm)
1241 __asm__ __volatile__("
1242 lda [%0] %3, %%g5
1243 sta %1, [%0] %3
1244 sta %%g0, [%2] %4
1245 sta %%g5, [%0] %3"
1246 : /* no outputs */
1247 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
1248 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
1249 : "g5");
1250 module_stats.invpg++;
1251 FLUSH_END
1254 /* viking.S */
1255 extern void viking_flush_cache_all(void);
1256 extern void viking_flush_cache_mm(struct mm_struct *mm);
1257 extern void viking_flush_cache_range(struct mm_struct *mm, unsigned long start,
1258 unsigned long end);
1259 extern void viking_flush_cache_page(struct vm_area_struct *vma,
1260 unsigned long page);
1261 extern void viking_flush_page_to_ram(unsigned long page);
1262 extern void viking_flush_page_for_dma(unsigned long page);
1263 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
1264 extern void viking_flush_page(unsigned long page);
1265 extern void viking_mxcc_flush_page(unsigned long page);
1266 extern void viking_flush_chunk(unsigned long chunk);
1267 extern void viking_c_flush_chunk(unsigned long chunk);
1268 extern void viking_s_flush_chunk(unsigned long chunk);
1269 extern void viking_mxcc_flush_chunk(unsigned long chunk);
1270 extern void viking_flush_tlb_all(void);
1271 extern void viking_flush_tlb_mm(struct mm_struct *mm);
1272 extern void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start,
1273 unsigned long end);
1274 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
1275 unsigned long page);
1277 /* hypersparc.S */
1278 extern void hypersparc_flush_cache_all(void);
1279 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
1280 extern void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);
1281 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
1282 extern void hypersparc_flush_page_to_ram(unsigned long page);
1283 extern void hypersparc_flush_chunk(unsigned long chunk);
1284 extern void hypersparc_flush_page_for_dma(unsigned long page);
1285 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
1286 extern void hypersparc_flush_tlb_all(void);
1287 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
1288 extern void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
1289 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
1290 extern void hypersparc_setup_blockops(void);
1292 static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
1294 unsigned long page = ((unsigned long)ptep) & PAGE_MASK;
1296 srmmu_set_entry(ptep, pte_val(pteval));
1297 hypersparc_flush_page_to_ram(page);
1300 static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
1302 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4))));
1303 hypersparc_flush_page_to_ram((unsigned long)ctxp);
1304 hyper_flush_whole_icache();
1307 static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1309 unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
1311 if(pgdp != swapper_pg_dir)
1312 hypersparc_flush_page_to_ram(page);
1314 if(tsk->mm->context != NO_CONTEXT) {
1315 flush_cache_mm(tsk->mm);
1316 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1317 flush_tlb_mm(tsk->mm);
1321 static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1323 viking_flush_page((unsigned long)pgdp);
1324 if(tsk->mm->context != NO_CONTEXT) {
1325 flush_cache_mm(current->mm);
1326 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1327 flush_tlb_mm(current->mm);
1331 static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1333 register unsigned long a, b, c, d, e, f, g;
1334 unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
1335 unsigned long line;
1337 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1338 page &= PAGE_MASK;
1339 line = (page + PAGE_SIZE) - 0x100;
1340 goto inside;
1341 do {
1342 line -= 0x100;
1343 inside:
1344 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1345 "sta %%g0, [%0 + %2] %1\n\t"
1346 "sta %%g0, [%0 + %3] %1\n\t"
1347 "sta %%g0, [%0 + %4] %1\n\t"
1348 "sta %%g0, [%0 + %5] %1\n\t"
1349 "sta %%g0, [%0 + %6] %1\n\t"
1350 "sta %%g0, [%0 + %7] %1\n\t"
1351 "sta %%g0, [%0 + %8] %1\n\t" : :
1352 "r" (line),
1353 "i" (ASI_M_FLUSH_PAGE),
1354 "r" (a), "r" (b), "r" (c), "r" (d),
1355 "r" (e), "r" (f), "r" (g));
1356 } while(line != page);
1358 if(tsk->mm->context != NO_CONTEXT) {
1359 flush_cache_mm(current->mm);
1360 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1361 flush_tlb_mm(current->mm);
1365 static void hypersparc_switch_to_context(struct task_struct *tsk)
1367 if(tsk->mm->context == NO_CONTEXT) {
1368 ctxd_t *ctxp;
1370 alloc_context(tsk->mm);
1371 ctxp = &srmmu_context_table[tsk->mm->context];
1372 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) tsk->mm->pgd) >> 4))));
1373 hypersparc_flush_page_to_ram((unsigned long)ctxp);
1375 hyper_flush_whole_icache();
1376 srmmu_set_context(tsk->mm->context);
1379 static void hypersparc_init_new_context(struct mm_struct *mm)
1381 ctxd_t *ctxp;
1383 alloc_context(mm);
1385 ctxp = &srmmu_context_table[mm->context];
1386 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4))));
1387 hypersparc_flush_page_to_ram((unsigned long)ctxp);
1389 hyper_flush_whole_icache();
1390 if(mm == current->mm)
1391 srmmu_set_context(mm->context);
1394 static unsigned long mempool;
1396 /* NOTE: All of this startup code assumes the low 16mb (approx.) of
1397 * kernel mappings are done with one single contiguous chunk of
1398 * ram. On small ram machines (classics mainly) we only get
1399 * around 8mb mapped for us.
1402 static unsigned long kbpage;
1404 /* Some dirty hacks to abstract away the painful boot up init. */
1405 static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
1407 return ((vaddr - KERNBASE) + kbpage);
1410 static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
1412 set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4))));
1415 static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
1417 set_pte((pte_t *)pmdp, __pte((SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4))));
1420 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
1422 return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
1425 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
1427 return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
1430 static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
1432 return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
1435 static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
1437 return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
1440 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
1442 pgd_t *pgdp;
1443 pmd_t *pmdp;
1444 pte_t *ptep;
1446 while(start < end) {
1447 pgdp = srmmu_pgd_offset(init_task.mm, start);
1448 if(srmmu_pgd_none(*pgdp)) {
1449 pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1450 srmmu_early_pgd_set(pgdp, pmdp);
1452 pmdp = srmmu_early_pmd_offset(pgdp, start);
1453 if(srmmu_pmd_none(*pmdp)) {
1454 ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1455 srmmu_early_pmd_set(pmdp, ptep);
1457 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
1461 /* This is much cleaner than poking around physical address space
1462 * looking at the prom's page table directly which is what most
1463 * other OS's do. Yuck... this is much better.
1465 __initfunc(void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end))
1467 pgd_t *pgdp;
1468 pmd_t *pmdp;
1469 pte_t *ptep;
1470 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1471 unsigned long prompte;
1473 while(start <= end) {
1474 if (start == 0)
1475 break; /* probably wrap around */
1476 if(start == 0xfef00000)
1477 start = KADB_DEBUGGER_BEGVM;
1478 if(!(prompte = srmmu_hwprobe(start))) {
1479 start += PAGE_SIZE;
1480 continue;
1483 /* A red snapper, see what it really is. */
1484 what = 0;
1486 if(!(start & ~(SRMMU_PMD_MASK))) {
1487 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
1488 what = 1;
1491 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1492 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1493 prompte)
1494 what = 2;
1497 pgdp = srmmu_pgd_offset(init_task.mm, start);
1498 if(what == 2) {
1499 *pgdp = __pgd(prompte);
1500 start += SRMMU_PGDIR_SIZE;
1501 continue;
1503 if(srmmu_pgd_none(*pgdp)) {
1504 pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1505 srmmu_early_pgd_set(pgdp, pmdp);
1507 pmdp = srmmu_early_pmd_offset(pgdp, start);
1508 if(what == 1) {
1509 *pmdp = __pmd(prompte);
1510 start += SRMMU_PMD_SIZE;
1511 continue;
1513 if(srmmu_pmd_none(*pmdp)) {
1514 ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1515 srmmu_early_pmd_set(pmdp, ptep);
1517 ptep = srmmu_early_pte_offset(pmdp, start);
1518 *ptep = __pte(prompte);
1519 start += PAGE_SIZE;
1523 #ifdef DEBUG_MAP_KERNEL
1524 #define MKTRACE(foo) prom_printf foo
1525 #else
1526 #define MKTRACE(foo)
1527 #endif
1529 static int lots_of_ram __initdata = 0;
1530 static int srmmu_low_pa __initdata = 0;
1531 static unsigned long end_of_phys_memory __initdata = 0;
1533 __initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *end_mem_p))
1535 unsigned int sum = 0;
1536 unsigned long last = 0xff000000;
1537 long first, cur;
1538 unsigned long pa;
1539 unsigned long total = 0;
1540 int i;
1542 pa = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1543 pa = (pa & SRMMU_PTE_PMASK) << 4;
1544 if (!sp_banks[0].base_addr && pa == PAGE_SIZE) {
1545 for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1546 if (sp_banks[i].base_addr + sp_banks[i].num_bytes > 0x0d000000)
1547 break;
1549 if (!sp_banks[i].num_bytes) {
1550 srmmu_low_pa = 1;
1551 end_of_phys_memory = SRMMU_PGDIR_ALIGN(sp_banks[i-1].base_addr + sp_banks[i-1].num_bytes);
1552 *end_mem_p = KERNBASE + end_of_phys_memory;
1553 if (sp_banks[0].num_bytes >= (6 * 1024 * 1024) || end_of_phys_memory <= 0x06000000) {
1554 /* Make sure there will be enough memory for the whole mem_map (even if sparse) */
1555 return;
1559 for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1560 pa = sp_banks[i].base_addr;
1561 first = (pa & (~SRMMU_PGDIR_MASK));
1562 cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE);
1563 if (cur < 0) cur = 0;
1564 if (!first || last != (pa & SRMMU_PGDIR_MASK))
1565 total += SRMMU_PGDIR_SIZE;
1566 sum += sp_banks[i].num_bytes;
1567 if (memory_size) {
1568 if (sum > memory_size) {
1569 sp_banks[i].num_bytes -=
1570 (sum - memory_size);
1571 cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE);
1572 if (cur < 0) cur = 0;
1573 total += SRMMU_PGDIR_ALIGN(cur);
1574 sum = memory_size;
1575 sp_banks[++i].base_addr = 0xdeadbeef;
1576 sp_banks[i].num_bytes = 0;
1577 break;
1580 total += SRMMU_PGDIR_ALIGN(cur);
1581 last = (sp_banks[i].base_addr + sp_banks[i].num_bytes - 1) & SRMMU_PGDIR_MASK;
1583 if (total <= 0x0d000000)
1584 *end_mem_p = KERNBASE + total;
1585 else {
1586 *end_mem_p = 0xfd000000;
1587 lots_of_ram = 1;
1589 end_of_phys_memory = total;
1592 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1594 /* Create a third-level SRMMU 16MB page mapping. */
1595 __initfunc(static void do_large_mapping(unsigned long vaddr, unsigned long phys_base))
1597 pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
1598 unsigned long big_pte;
1600 MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr, phys_base));
1601 big_pte = KERNEL_PTE(phys_base >> 4);
1602 *pgdp = __pgd(big_pte);
1605 /* Look in the sp_bank for the given physical page, return the
1606 * index number the entry was found in, or -1 for not found.
1608 static inline int find_in_spbanks(unsigned long phys_page)
1610 int entry;
1612 for(entry = 0; sp_banks[entry].num_bytes; entry++) {
1613 unsigned long start = sp_banks[entry].base_addr;
1614 unsigned long end = start + sp_banks[entry].num_bytes;
1616 if((start <= phys_page) && (phys_page < end))
1617 return entry;
1619 return -1;
1622 /* Find an spbank entry not mapped as of yet, TAKEN_VECTOR is an
1623 * array of char's, each member indicating if that spbank is mapped
1624 * yet or not.
1626 __initfunc(static int find_free_spbank(char *taken_vector))
1628 int entry;
1630 for(entry = 0; sp_banks[entry].num_bytes; entry++)
1631 if(!taken_vector[entry])
1632 break;
1633 return entry;
1636 static unsigned long map_spbank_last_pa __initdata = 0xff000000;
1638 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
1640 __initfunc(static unsigned long map_spbank(unsigned long vbase, int sp_entry))
1642 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1643 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1644 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1645 static int srmmu_bank = 0;
1647 MKTRACE(("map_spbank %d[v<%08lx>p<%08lx>s<%08lx>]", sp_entry, vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes));
1648 MKTRACE(("map_spbank2 %d[p%08lx v%08lx-%08lx]", sp_entry, pstart, vstart, vend));
1649 while(vstart < vend) {
1650 do_large_mapping(vstart, pstart);
1651 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1653 srmmu_map[srmmu_bank].vbase = vbase;
1654 srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr;
1655 srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes;
1656 srmmu_bank++;
1657 map_spbank_last_pa = pstart - SRMMU_PGDIR_SIZE;
1658 return vstart;
1661 static inline void memprobe_error(char *msg)
1663 prom_printf(msg);
1664 prom_printf("Halting now...\n");
1665 prom_halt();
1668 /* Assumptions: The bank given to the kernel from the prom/bootloader
1669 * is part of a full bank which is at least 4MB in size and begins at
1670 * 0xf0000000 (ie. KERNBASE).
1672 static inline void map_kernel(void)
1674 unsigned long raw_pte, physpage;
1675 unsigned long vaddr, low_base;
1676 char etaken[SPARC_PHYS_BANKS];
1677 int entry;
1679 /* Step 1: Clear out sp_banks taken map. */
1680 MKTRACE(("map_kernel: clearing etaken vector... "));
1681 for(entry = 0; entry < SPARC_PHYS_BANKS; entry++)
1682 etaken[entry] = 0;
1684 low_base = KERNBASE;
1686 /* Step 2: Fill in KERNBASE base pgd. Lots of sanity checking here. */
1687 raw_pte = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1688 if((raw_pte & SRMMU_ET_MASK) != SRMMU_ET_PTE)
1689 memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
1690 physpage = (raw_pte & SRMMU_PTE_PMASK) << 4;
1691 physpage -= PAGE_SIZE;
1692 if(physpage & ~(SRMMU_PGDIR_MASK))
1693 memprobe_error("Wheee, kernel not mapped on 16MB physical boundry.\n");
1694 entry = find_in_spbanks(physpage);
1695 if(entry == -1 || (sp_banks[entry].base_addr != physpage))
1696 memprobe_error("Kernel mapped in non-existant memory.\n");
1697 MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE, entry, sp_banks[entry].base_addr, sp_banks[entry].num_bytes));
1698 if (sp_banks[entry].num_bytes > 0x0d000000) {
1699 unsigned long orig_base = sp_banks[entry].base_addr;
1700 unsigned long orig_len = sp_banks[entry].num_bytes;
1701 unsigned long can_map = 0x0d000000;
1703 /* Map a partial bank in this case, adjust the base
1704 * and the length, but don't mark it used.
1706 sp_banks[entry].num_bytes = can_map;
1707 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
1708 vaddr = map_spbank(KERNBASE, entry);
1709 MKTRACE(("vaddr now %08lx ", vaddr));
1710 sp_banks[entry].base_addr = orig_base + can_map;
1711 sp_banks[entry].num_bytes = orig_len - can_map;
1712 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
1713 MKTRACE(("map_kernel: skipping first loop\n"));
1714 goto loop_skip;
1716 vaddr = map_spbank(KERNBASE, entry);
1717 etaken[entry] = 1;
1719 /* Step 3: Map what we can above KERNBASE. */
1720 MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr));
1721 for(;;) {
1722 unsigned long bank_size;
1724 MKTRACE(("map_kernel: ffsp()"));
1725 entry = find_free_spbank(&etaken[0]);
1726 bank_size = sp_banks[entry].num_bytes;
1727 MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
1728 if(!bank_size)
1729 break;
1730 if (srmmu_low_pa)
1731 vaddr = KERNBASE + sp_banks[entry].base_addr;
1732 else if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) {
1733 if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK))
1734 vaddr -= SRMMU_PGDIR_SIZE;
1735 vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK));
1737 if ((vaddr + bank_size - KERNBASE) > 0x0d000000) {
1738 unsigned long orig_base = sp_banks[entry].base_addr;
1739 unsigned long orig_len = sp_banks[entry].num_bytes;
1740 unsigned long can_map = (0xfd000000 - vaddr);
1742 /* Map a partial bank in this case, adjust the base
1743 * and the length, but don't mark it used.
1745 sp_banks[entry].num_bytes = can_map;
1746 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
1747 vaddr = map_spbank(vaddr, entry);
1748 MKTRACE(("vaddr now %08lx ", vaddr));
1749 sp_banks[entry].base_addr = orig_base + can_map;
1750 sp_banks[entry].num_bytes = orig_len - can_map;
1751 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
1752 break;
1755 /* Ok, we can map this one, do it. */
1756 MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr, entry));
1757 vaddr = map_spbank(vaddr, entry);
1758 etaken[entry] = 1;
1759 MKTRACE(("vaddr now %08lx\n", vaddr));
1761 MKTRACE(("\n"));
1762 /* If not lots_of_ram, assume we did indeed map it all above. */
1763 loop_skip:
1764 if(!lots_of_ram)
1765 goto check_and_return;
1767 /* Step 4: Map the rest (if any) right below KERNBASE. */
1768 MKTRACE(("map_kernel: doing low mappings... "));
1769 low_base = (KERNBASE - end_of_phys_memory + 0x0d000000);
1770 MKTRACE(("end_of_phys_memory=%08lx low_base=%08lx\n", end_of_phys_memory, low_base));
1772 /* Ok, now map 'em. */
1773 MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base,KERNBASE));
1774 srmmu_allocate_ptable_skeleton(low_base, KERNBASE);
1775 vaddr = low_base;
1776 map_spbank_last_pa = 0xff000000;
1777 MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr));
1778 for(;;) {
1779 unsigned long bank_size;
1781 entry = find_free_spbank(&etaken[0]);
1782 bank_size = sp_banks[entry].num_bytes;
1783 MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
1784 if(!bank_size)
1785 break;
1786 if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) {
1787 if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK))
1788 vaddr -= SRMMU_PGDIR_SIZE;
1789 vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK));
1791 if((vaddr + bank_size) > KERNBASE)
1792 memprobe_error("Wheee, kernel low mapping overflow.\n");
1793 MKTRACE(("map_spbank(%08lx, %d) ", vaddr, entry));
1794 vaddr = map_spbank(vaddr, entry);
1795 etaken[entry] = 1;
1796 MKTRACE(("Now, vaddr=%08lx end_of_phys_memory=%08lx\n", vaddr, end_of_phys_memory));
1798 MKTRACE(("\n"));
1800 check_and_return:
1801 /* Step 5: Sanity check, make sure we did it all. */
1802 MKTRACE(("check_and_return: "));
1803 for(entry = 0; sp_banks[entry].num_bytes; entry++) {
1804 MKTRACE(("e[%d]=%d ", entry, etaken[entry]));
1805 if(!etaken[entry]) {
1806 MKTRACE(("oops\n"));
1807 memprobe_error("Some bank did not get mapped.\n");
1810 MKTRACE(("success\n"));
1811 init_task.mm->mmap->vm_start = page_offset = low_base;
1812 stack_top = page_offset - PAGE_SIZE;
1813 BTFIXUPSET_SETHI(page_offset, low_base);
1814 BTFIXUPSET_SETHI(stack_top, page_offset - PAGE_SIZE);
1815 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, page_offset / SRMMU_PGDIR_SIZE);
1817 #if 1
1818 for(entry = 0; srmmu_map[entry].size; entry++) {
1819 printk("[%d]: v[%08lx,%08lx](%lx) p[%08lx]\n", entry,
1820 srmmu_map[entry].vbase,
1821 srmmu_map[entry].vbase + srmmu_map[entry].size,
1822 srmmu_map[entry].size,
1823 srmmu_map[entry].pbase);
1825 #endif
1827 /* Now setup the p2v/v2p hash tables. */
1828 for(entry = 0; entry < SRMMU_HASHSZ; entry++)
1829 srmmu_v2p_hash[entry] = ((0xff - entry) << 24);
1830 for(entry = 0; entry < SRMMU_HASHSZ; entry++)
1831 srmmu_p2v_hash[entry] = 0xffffffffUL;
1832 for(entry = 0; srmmu_map[entry].size; entry++) {
1833 unsigned long addr;
1835 for(addr = srmmu_map[entry].vbase;
1836 addr < (srmmu_map[entry].vbase + srmmu_map[entry].size);
1837 addr += (1 << 24))
1838 srmmu_v2p_hash[srmmu_ahashfn(addr)] =
1839 srmmu_map[entry].pbase - srmmu_map[entry].vbase;
1840 for(addr = srmmu_map[entry].pbase;
1841 addr < (srmmu_map[entry].pbase + srmmu_map[entry].size);
1842 addr += (1 << 24))
1843 srmmu_p2v_hash[srmmu_ahashfn(addr)] =
1844 srmmu_map[entry].pbase - srmmu_map[entry].vbase;
1847 BTFIXUPSET_SETHI(page_contig_offset, page_offset - (0xfd000000 - KERNBASE));
1848 if (srmmu_low_pa)
1849 phys_mem_contig = 0;
1850 else {
1851 phys_mem_contig = 1;
1852 for(entry = 0; srmmu_map[entry].size; entry++)
1853 if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) {
1854 phys_mem_contig = 0;
1855 break;
1858 if (phys_mem_contig) {
1859 printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes.\n");
1860 BTFIXUPSET_CALL(pte_page, srmmu_c_pte_page, BTFIXUPCALL_NORM);
1861 BTFIXUPSET_CALL(pmd_page, srmmu_c_pmd_page, BTFIXUPCALL_NORM);
1862 BTFIXUPSET_CALL(pgd_page, srmmu_c_pgd_page, BTFIXUPCALL_NORM);
1863 BTFIXUPSET_CALL(mk_pte, srmmu_c_mk_pte, BTFIXUPCALL_NORM);
1864 BTFIXUPSET_CALL(pte_offset, srmmu_c_pte_offset, BTFIXUPCALL_NORM);
1865 BTFIXUPSET_CALL(pmd_offset, srmmu_c_pmd_offset, BTFIXUPCALL_NORM);
1866 if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set)
1867 BTFIXUPSET_CALL(ctxd_set, srmmu_c_ctxd_set, BTFIXUPCALL_NORM);
1868 BTFIXUPSET_CALL(pgd_set, srmmu_c_pgd_set, BTFIXUPCALL_NORM);
1869 BTFIXUPSET_CALL(pmd_set, srmmu_c_pmd_set, BTFIXUPCALL_NORM);
1870 BTFIXUPSET_CALL(mmu_v2p, srmmu_c_v2p, BTFIXUPCALL_NORM);
1871 BTFIXUPSET_CALL(mmu_p2v, srmmu_c_p2v, BTFIXUPCALL_NORM);
1872 if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk)
1873 BTFIXUPSET_CALL(flush_chunk, viking_c_flush_chunk, BTFIXUPCALL_NORM);
1874 } else if (srmmu_low_pa) {
1875 printk ("SRMMU: Compact physical memory. Using strightforward VA<->PA translations.\n");
1876 BTFIXUPSET_CALL(pte_page, srmmu_s_pte_page, BTFIXUPCALL_NORM);
1877 BTFIXUPSET_CALL(pmd_page, srmmu_s_pmd_page, BTFIXUPCALL_NORM);
1878 BTFIXUPSET_CALL(pgd_page, srmmu_s_pgd_page, BTFIXUPCALL_NORM);
1879 BTFIXUPSET_CALL(mk_pte, srmmu_s_mk_pte, BTFIXUPCALL_NORM);
1880 BTFIXUPSET_CALL(pte_offset, srmmu_s_pte_offset, BTFIXUPCALL_NORM);
1881 BTFIXUPSET_CALL(pmd_offset, srmmu_s_pmd_offset, BTFIXUPCALL_NORM);
1882 if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set)
1883 BTFIXUPSET_CALL(ctxd_set, srmmu_s_ctxd_set, BTFIXUPCALL_NORM);
1884 BTFIXUPSET_CALL(pgd_set, srmmu_s_pgd_set, BTFIXUPCALL_NORM);
1885 BTFIXUPSET_CALL(pmd_set, srmmu_s_pmd_set, BTFIXUPCALL_NORM);
1886 BTFIXUPSET_CALL(mmu_v2p, srmmu_s_v2p, BTFIXUPCALL_NORM);
1887 BTFIXUPSET_CALL(mmu_p2v, srmmu_s_p2v, BTFIXUPCALL_NORM);
1888 if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk)
1889 BTFIXUPSET_CALL(flush_chunk, viking_s_flush_chunk, BTFIXUPCALL_NORM);
1891 btfixup();
1893 return; /* SUCCESS! */
1896 /* Paging initialization on the Sparc Reference MMU. */
1897 extern unsigned long free_area_init(unsigned long, unsigned long);
1898 extern unsigned long sparc_context_init(unsigned long, int);
1900 extern int physmem_mapped_contig;
1901 extern int linux_num_cpus;
1903 void (*poke_srmmu)(void) __initdata = NULL;
1905 __initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem))
1907 unsigned long ptables_start;
1908 int i, cpunode;
1909 char node_str[128];
1911 sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
1912 physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
1914 if (sparc_cpu_model == sun4d)
1915 num_contexts = 65536; /* We know it is Viking */
1916 else {
1917 /* Find the number of contexts on the srmmu. */
1918 cpunode = prom_getchild(prom_root_node);
1919 num_contexts = 0;
1920 while((cpunode = prom_getsibling(cpunode)) != 0) {
1921 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1922 if(!strcmp(node_str, "cpu")) {
1923 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1924 break;
1929 if(!num_contexts) {
1930 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1931 prom_halt();
1934 ptables_start = mempool = PAGE_ALIGN(start_mem);
1935 memset(swapper_pg_dir, 0, PAGE_SIZE);
1936 kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1937 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1938 kbpage -= PAGE_SIZE;
1940 srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
1941 #if CONFIG_SUN_IO
1942 srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr, IOBASE_END);
1943 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1944 #endif
1946 mempool = PAGE_ALIGN(mempool);
1947 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1948 map_kernel();
1949 srmmu_context_table = sparc_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1950 srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
1951 for(i = 0; i < num_contexts; i++)
1952 ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1954 start_mem = PAGE_ALIGN(mempool);
1956 flush_cache_all();
1957 if(BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page) {
1958 unsigned long start = ptables_start;
1959 unsigned long end = start_mem;
1961 while(start < end) {
1962 viking_flush_page(start);
1963 start += PAGE_SIZE;
1966 srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
1967 flush_tlb_all();
1968 poke_srmmu();
1970 start_mem = sparc_context_init(start_mem, num_contexts);
1971 start_mem = free_area_init(start_mem, end_mem);
1973 return PAGE_ALIGN(start_mem);
1976 static int srmmu_mmu_info(char *buf)
1978 return sprintf(buf,
1979 "MMU type\t: %s\n"
1980 "invall\t\t: %d\n"
1981 "invmm\t\t: %d\n"
1982 "invrnge\t\t: %d\n"
1983 "invpg\t\t: %d\n"
1984 "contexts\t: %d\n"
1985 , srmmu_name,
1986 module_stats.invall,
1987 module_stats.invmm,
1988 module_stats.invrnge,
1989 module_stats.invpg,
1990 num_contexts
1994 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1998 static void srmmu_destroy_context(struct mm_struct *mm)
2000 if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
2001 flush_cache_mm(mm);
2002 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
2003 flush_tlb_mm(mm);
2004 free_context(mm->context);
2005 mm->context = NO_CONTEXT;
2009 static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
2010 unsigned long address, pte_t pte)
2012 if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) {
2013 struct vm_area_struct *vmaring;
2014 struct file *file;
2015 struct inode *inode;
2016 unsigned long flags, offset, vaddr, start;
2017 int alias_found = 0;
2018 pgd_t *pgdp;
2019 pmd_t *pmdp;
2020 pte_t *ptep;
2022 __save_and_cli(flags);
2024 file = vma->vm_file;
2025 if (!file)
2026 goto done;
2027 inode = file->f_dentry->d_inode;
2028 offset = (address & PAGE_MASK) - vma->vm_start;
2029 vmaring = inode->i_mmap;
2030 do {
2031 vaddr = vmaring->vm_start + offset;
2033 if ((vaddr ^ address) & vac_badbits) {
2034 alias_found++;
2035 start = vmaring->vm_start;
2036 while (start < vmaring->vm_end) {
2037 pgdp = srmmu_pgd_offset(vmaring->vm_mm, start);
2038 if(!pgdp) goto next;
2039 pmdp = srmmu_pmd_offset(pgdp, start);
2040 if(!pmdp) goto next;
2041 ptep = srmmu_pte_offset(pmdp, start);
2042 if(!ptep) goto next;
2044 if((pte_val(*ptep) & SRMMU_ET_MASK) == SRMMU_VALID) {
2045 #if 1
2046 printk("Fixing USER/USER alias [%ld:%08lx]\n",
2047 vmaring->vm_mm->context, start);
2048 #endif
2049 flush_cache_page(vmaring, start);
2050 set_pte(ptep, __pte((pte_val(*ptep) &
2051 ~SRMMU_CACHE)));
2052 flush_tlb_page(vmaring, start);
2054 next:
2055 start += PAGE_SIZE;
2058 } while ((vmaring = vmaring->vm_next_share) != NULL);
2060 if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
2061 pgdp = srmmu_pgd_offset(vma->vm_mm, address);
2062 ptep = srmmu_pte_offset((pmd_t *) pgdp, address);
2063 flush_cache_page(vma, address);
2064 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE);
2065 flush_tlb_page(vma, address);
2067 done:
2068 __restore_flags(flags);
2072 static void hypersparc_destroy_context(struct mm_struct *mm)
2074 if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
2075 ctxd_t *ctxp;
2077 /* HyperSparc is copy-back, any data for this
2078 * process in a modified cache line is stale
2079 * and must be written back to main memory now
2080 * else we eat shit later big time.
2082 flush_cache_mm(mm);
2084 ctxp = &srmmu_context_table[mm->context];
2085 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) swapper_pg_dir) >> 4))));
2086 hypersparc_flush_page_to_ram((unsigned long)ctxp);
2088 flush_tlb_mm(mm);
2089 free_context(mm->context);
2090 mm->context = NO_CONTEXT;
2094 /* Init various srmmu chip types. */
2095 __initfunc(static void srmmu_is_bad(void))
2097 prom_printf("Could not determine SRMMU chip type.\n");
2098 prom_halt();
2101 __initfunc(static void init_vac_layout(void))
2103 int nd, cache_lines;
2104 char node_str[128];
2105 #ifdef __SMP__
2106 int cpu = 0;
2107 unsigned long max_size = 0;
2108 unsigned long min_line_size = 0x10000000;
2109 #endif
2111 nd = prom_getchild(prom_root_node);
2112 while((nd = prom_getsibling(nd)) != 0) {
2113 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
2114 if(!strcmp(node_str, "cpu")) {
2115 vac_line_size = prom_getint(nd, "cache-line-size");
2116 if (vac_line_size == -1) {
2117 prom_printf("can't determine cache-line-size, "
2118 "halting.\n");
2119 prom_halt();
2121 cache_lines = prom_getint(nd, "cache-nlines");
2122 if (cache_lines == -1) {
2123 prom_printf("can't determine cache-nlines, halting.\n");
2124 prom_halt();
2127 vac_cache_size = cache_lines * vac_line_size;
2128 vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
2129 #ifdef __SMP__
2130 if(vac_cache_size > max_size)
2131 max_size = vac_cache_size;
2132 if(vac_line_size < min_line_size)
2133 min_line_size = vac_line_size;
2134 cpu++;
2135 if(cpu == smp_num_cpus)
2136 break;
2137 #else
2138 break;
2139 #endif
2142 if(nd == 0) {
2143 prom_printf("No CPU nodes found, halting.\n");
2144 prom_halt();
2146 #ifdef __SMP__
2147 vac_cache_size = max_size;
2148 vac_line_size = min_line_size;
2149 vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
2150 #endif
2151 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
2152 (int)vac_cache_size, (int)vac_line_size);
2155 __initfunc(static void poke_hypersparc(void))
2157 volatile unsigned long clear;
2158 unsigned long mreg = srmmu_get_mmureg();
2160 hyper_flush_unconditional_combined();
2162 mreg &= ~(HYPERSPARC_CWENABLE);
2163 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
2164 mreg |= (HYPERSPARC_CMODE);
2166 srmmu_set_mmureg(mreg);
2168 #if 0 /* I think this is bad news... -DaveM */
2169 hyper_clear_all_tags();
2170 #endif
2172 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
2173 hyper_flush_whole_icache();
2174 clear = srmmu_get_faddr();
2175 clear = srmmu_get_fstatus();
2178 __initfunc(static void init_hypersparc(void))
2180 srmmu_name = "ROSS HyperSparc";
2182 init_vac_layout();
2184 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_hyper, BTFIXUPCALL_NORM);
2185 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
2186 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
2187 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
2188 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
2189 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
2190 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
2191 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
2193 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
2194 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
2195 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
2196 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
2198 BTFIXUPSET_CALL(flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
2199 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
2200 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
2202 BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
2204 BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM);
2205 BTFIXUPSET_CALL(switch_to_context, hypersparc_switch_to_context, BTFIXUPCALL_NORM);
2206 BTFIXUPSET_CALL(init_new_context, hypersparc_init_new_context, BTFIXUPCALL_NORM);
2207 BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM);
2208 BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
2209 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, hypersparc_update_rootmmu_dir, BTFIXUPCALL_NORM);
2210 poke_srmmu = poke_hypersparc;
2212 hypersparc_setup_blockops();
2215 __initfunc(static void poke_cypress(void))
2217 unsigned long mreg = srmmu_get_mmureg();
2218 unsigned long faddr, tagval;
2219 volatile unsigned long cypress_sucks;
2220 volatile unsigned long clear;
2222 clear = srmmu_get_faddr();
2223 clear = srmmu_get_fstatus();
2225 if (!(mreg & CYPRESS_CENABLE)) {
2226 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
2227 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
2228 "sta %%g0, [%0] %2\n\t" : :
2229 "r" (faddr), "r" (0x40000),
2230 "i" (ASI_M_DATAC_TAG));
2232 } else {
2233 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
2234 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
2235 "=r" (tagval) :
2236 "r" (faddr), "r" (0x40000),
2237 "i" (ASI_M_DATAC_TAG));
2239 /* If modified and valid, kick it. */
2240 if((tagval & 0x60) == 0x60)
2241 cypress_sucks = *(unsigned long *)
2242 (0xf0020000 + faddr);
2246 /* And one more, for our good neighbor, Mr. Broken Cypress. */
2247 clear = srmmu_get_faddr();
2248 clear = srmmu_get_fstatus();
2250 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
2251 srmmu_set_mmureg(mreg);
2254 __initfunc(static void init_cypress_common(void))
2256 init_vac_layout();
2258 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_cypress, BTFIXUPCALL_NORM);
2259 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
2260 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
2261 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
2262 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
2263 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
2264 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
2265 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
2267 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
2268 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
2269 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
2270 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
2272 BTFIXUPSET_CALL(flush_chunk, cypress_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
2274 BTFIXUPSET_CALL(flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
2275 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
2276 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
2277 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, cypress_update_rootmmu_dir, BTFIXUPCALL_NORM);
2279 BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
2280 poke_srmmu = poke_cypress;
2283 __initfunc(static void init_cypress_604(void))
2285 srmmu_name = "ROSS Cypress-604(UP)";
2286 srmmu_modtype = Cypress;
2287 init_cypress_common();
2290 __initfunc(static void init_cypress_605(unsigned long mrev))
2292 srmmu_name = "ROSS Cypress-605(MP)";
2293 if(mrev == 0xe) {
2294 srmmu_modtype = Cypress_vE;
2295 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
2296 } else {
2297 if(mrev == 0xd) {
2298 srmmu_modtype = Cypress_vD;
2299 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
2300 } else {
2301 srmmu_modtype = Cypress;
2304 init_cypress_common();
2307 __initfunc(static void poke_swift(void))
2309 unsigned long mreg = srmmu_get_mmureg();
2311 /* Clear any crap from the cache or else... */
2312 swift_idflash_clear();
2313 mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
2315 /* The Swift branch folding logic is completely broken. At
2316 * trap time, if things are just right, if can mistakenly
2317 * think that a trap is coming from kernel mode when in fact
2318 * it is coming from user mode (it mis-executes the branch in
2319 * the trap code). So you see things like crashme completely
2320 * hosing your machine which is completely unacceptable. Turn
2321 * this shit off... nice job Fujitsu.
2323 mreg &= ~(SWIFT_BF);
2324 srmmu_set_mmureg(mreg);
2327 #define SWIFT_MASKID_ADDR 0x10003018
2328 __initfunc(static void init_swift(void))
2330 unsigned long swift_rev;
2332 __asm__ __volatile__("lda [%1] %2, %0\n\t"
2333 "srl %0, 0x18, %0\n\t" :
2334 "=r" (swift_rev) :
2335 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
2336 srmmu_name = "Fujitsu Swift";
2337 switch(swift_rev) {
2338 case 0x11:
2339 case 0x20:
2340 case 0x23:
2341 case 0x30:
2342 srmmu_modtype = Swift_lots_o_bugs;
2343 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
2344 /* Gee george, I wonder why Sun is so hush hush about
2345 * this hardware bug... really braindamage stuff going
2346 * on here. However I think we can find a way to avoid
2347 * all of the workaround overhead under Linux. Basically,
2348 * any page fault can cause kernel pages to become user
2349 * accessible (the mmu gets confused and clears some of
2350 * the ACC bits in kernel ptes). Aha, sounds pretty
2351 * horrible eh? But wait, after extensive testing it appears
2352 * that if you use pgd_t level large kernel pte's (like the
2353 * 4MB pages on the Pentium) the bug does not get tripped
2354 * at all. This avoids almost all of the major overhead.
2355 * Welcome to a world where your vendor tells you to,
2356 * "apply this kernel patch" instead of "sorry for the
2357 * broken hardware, send it back and we'll give you
2358 * properly functioning parts"
2360 break;
2361 case 0x25:
2362 case 0x31:
2363 srmmu_modtype = Swift_bad_c;
2364 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
2365 /* You see Sun allude to this hardware bug but never
2366 * admit things directly, they'll say things like,
2367 * "the Swift chip cache problems" or similar.
2369 break;
2370 default:
2371 srmmu_modtype = Swift_ok;
2372 break;
2375 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
2376 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
2377 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
2378 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
2380 BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
2382 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
2383 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
2384 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
2385 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
2387 BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP);
2388 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
2389 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
2391 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
2393 /* Are you now convinced that the Swift is one of the
2394 * biggest VLSI abortions of all time? Bravo Fujitsu!
2395 * Fujitsu, the !#?!%$'d up processor people. I bet if
2396 * you examined the microcode of the Swift you'd find
2397 * XXX's all over the place.
2399 poke_srmmu = poke_swift;
2402 static void turbosparc_flush_cache_all(void)
2404 flush_user_windows();
2405 turbosparc_idflash_clear();
2408 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
2410 FLUSH_BEGIN(mm)
2411 flush_user_windows();
2412 turbosparc_idflash_clear();
2413 FLUSH_END
2416 static void turbosparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
2418 FLUSH_BEGIN(mm)
2419 flush_user_windows();
2420 turbosparc_idflash_clear();
2421 FLUSH_END
2424 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
2426 FLUSH_BEGIN(vma->vm_mm)
2427 flush_user_windows();
2428 if (vma->vm_flags & VM_EXEC)
2429 turbosparc_flush_icache();
2430 turbosparc_flush_dcache();
2431 FLUSH_END
2434 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
2435 static void turbosparc_flush_page_to_ram(unsigned long page)
2437 #ifdef TURBOSPARC_WRITEBACK
2438 volatile unsigned long clear;
2440 if (srmmu_hwprobe(page))
2441 turbosparc_flush_page_cache(page);
2442 clear = srmmu_get_fstatus();
2443 #endif
2446 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
2450 static void turbosparc_flush_page_for_dma(unsigned long page)
2452 turbosparc_flush_dcache();
2455 static void turbosparc_flush_chunk(unsigned long chunk)
2459 static void turbosparc_flush_tlb_all(void)
2461 srmmu_flush_whole_tlb();
2462 module_stats.invall++;
2465 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
2467 FLUSH_BEGIN(mm)
2468 srmmu_flush_whole_tlb();
2469 module_stats.invmm++;
2470 FLUSH_END
2473 static void turbosparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
2475 FLUSH_BEGIN(mm)
2476 srmmu_flush_whole_tlb();
2477 module_stats.invrnge++;
2478 FLUSH_END
2481 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2483 FLUSH_BEGIN(vma->vm_mm)
2484 srmmu_flush_whole_tlb();
2485 module_stats.invpg++;
2486 FLUSH_END
2490 __initfunc(static void poke_turbosparc(void))
2492 unsigned long mreg = srmmu_get_mmureg();
2493 unsigned long ccreg;
2495 /* Clear any crap from the cache or else... */
2496 turbosparc_flush_cache_all();
2497 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
2498 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
2499 srmmu_set_mmureg(mreg);
2501 ccreg = turbosparc_get_ccreg();
2503 #ifdef TURBOSPARC_WRITEBACK
2504 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
2505 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
2506 /* Write-back D-cache, emulate VLSI
2507 * abortion number three, not number one */
2508 #else
2509 /* For now let's play safe, optimize later */
2510 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
2511 /* Do DVMA snooping in Dcache, Write-thru D-cache */
2512 ccreg &= ~(TURBOSPARC_uS2);
2513 /* Emulate VLSI abortion number three, not number one */
2514 #endif
2516 switch (ccreg & 7) {
2517 case 0: /* No SE cache */
2518 case 7: /* Test mode */
2519 break;
2520 default:
2521 ccreg |= (TURBOSPARC_SCENABLE);
2523 turbosparc_set_ccreg (ccreg);
2525 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
2526 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
2527 srmmu_set_mmureg(mreg);
2530 __initfunc(static void init_turbosparc(void))
2532 srmmu_name = "Fujitsu TurboSparc";
2533 srmmu_modtype = TurboSparc;
2535 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
2536 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
2537 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
2538 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
2540 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
2541 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
2542 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
2543 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
2545 BTFIXUPSET_CALL(flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
2546 BTFIXUPSET_CALL(flush_chunk, turbosparc_flush_chunk, BTFIXUPCALL_NORM);
2548 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
2549 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NOP);
2551 poke_srmmu = poke_turbosparc;
2554 __initfunc(static void poke_tsunami(void))
2556 unsigned long mreg = srmmu_get_mmureg();
2558 tsunami_flush_icache();
2559 tsunami_flush_dcache();
2560 mreg &= ~TSUNAMI_ITD;
2561 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
2562 srmmu_set_mmureg(mreg);
2565 __initfunc(static void init_tsunami(void))
2567 /* Tsunami's pretty sane, Sun and TI actually got it
2568 * somewhat right this time. Fujitsu should have
2569 * taken some lessons from them.
2572 srmmu_name = "TI Tsunami";
2573 srmmu_modtype = Tsunami;
2575 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
2576 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
2577 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
2578 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
2580 BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
2582 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
2583 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
2584 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
2585 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
2587 BTFIXUPSET_CALL(flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
2588 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
2589 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
2591 poke_srmmu = poke_tsunami;
2594 __initfunc(static void poke_viking(void))
2596 unsigned long mreg = srmmu_get_mmureg();
2597 static int smp_catch = 0;
2599 if(viking_mxcc_present) {
2600 unsigned long mxcc_control = mxcc_get_creg();
2602 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
2603 mxcc_control &= ~(MXCC_CTL_RRC);
2604 mxcc_set_creg(mxcc_control);
2606 /* We don't need memory parity checks.
2607 * XXX This is a mess, have to dig out later. ecd.
2608 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
2611 /* We do cache ptables on MXCC. */
2612 mreg |= VIKING_TCENABLE;
2613 } else {
2614 unsigned long bpreg;
2616 mreg &= ~(VIKING_TCENABLE);
2617 if(smp_catch++) {
2618 /* Must disable mixed-cmd mode here for
2619 * other cpu's.
2621 bpreg = viking_get_bpreg();
2622 bpreg &= ~(VIKING_ACTION_MIX);
2623 viking_set_bpreg(bpreg);
2625 /* Just in case PROM does something funny. */
2626 msi_set_sync();
2630 mreg |= VIKING_SPENABLE;
2631 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
2632 mreg |= VIKING_SBENABLE;
2633 mreg &= ~(VIKING_ACENABLE);
2634 srmmu_set_mmureg(mreg);
2636 #ifdef __SMP__
2637 /* Avoid unnecessary cross calls. */
2638 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
2639 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
2640 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
2641 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
2642 BTFIXUPCOPY_CALL(flush_page_to_ram, local_flush_page_to_ram);
2643 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
2644 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
2645 btfixup();
2646 #endif
2649 __initfunc(static void init_viking(void))
2651 unsigned long mreg = srmmu_get_mmureg();
2653 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
2654 if(mreg & VIKING_MMODE) {
2655 unsigned long bpreg;
2657 srmmu_name = "TI Viking";
2658 viking_mxcc_present = 0;
2660 bpreg = viking_get_bpreg();
2661 bpreg &= ~(VIKING_ACTION_MIX);
2662 viking_set_bpreg(bpreg);
2664 msi_set_sync();
2666 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_viking, BTFIXUPCALL_NORM);
2667 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
2668 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
2669 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
2670 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, viking_update_rootmmu_dir, BTFIXUPCALL_NORM);
2672 BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
2674 /* We need this to make sure old viking takes no hits
2675 * on it's cache for dma snoops to workaround the
2676 * "load from non-cacheable memory" interrupt bug.
2677 * This is only necessary because of the new way in
2678 * which we use the IOMMU.
2680 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
2681 /* Also, this is so far the only chip which actually uses
2682 the page argument to flush_page_for_dma */
2683 flush_page_for_dma_global = 0;
2684 } else {
2685 srmmu_name = "TI Viking/MXCC";
2686 viking_mxcc_present = 1;
2688 BTFIXUPSET_CALL(flush_chunk, viking_mxcc_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
2690 /* MXCC vikings lack the DMA snooping bug. */
2691 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
2694 /* flush_cache_* are nops */
2695 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NOP);
2696 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NOP);
2697 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NOP);
2698 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NOP);
2700 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
2701 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
2702 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
2703 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
2705 BTFIXUPSET_CALL(flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
2706 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
2708 poke_srmmu = poke_viking;
2711 /* Probe for the srmmu chip version. */
2712 __initfunc(static void get_srmmu_type(void))
2714 unsigned long mreg, psr;
2715 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
2717 srmmu_modtype = SRMMU_INVAL_MOD;
2718 hwbug_bitmask = 0;
2720 mreg = srmmu_get_mmureg(); psr = get_psr();
2721 mod_typ = (mreg & 0xf0000000) >> 28;
2722 mod_rev = (mreg & 0x0f000000) >> 24;
2723 psr_typ = (psr >> 28) & 0xf;
2724 psr_vers = (psr >> 24) & 0xf;
2726 /* First, check for HyperSparc or Cypress. */
2727 if(mod_typ == 1) {
2728 switch(mod_rev) {
2729 case 7:
2730 /* UP or MP Hypersparc */
2731 init_hypersparc();
2732 break;
2733 case 0:
2734 case 2:
2735 /* Uniprocessor Cypress */
2736 init_cypress_604();
2737 break;
2738 case 10:
2739 case 11:
2740 case 12:
2741 /* _REALLY OLD_ Cypress MP chips... */
2742 case 13:
2743 case 14:
2744 case 15:
2745 /* MP Cypress mmu/cache-controller */
2746 init_cypress_605(mod_rev);
2747 break;
2748 default:
2749 /* Some other Cypress revision, assume a 605. */
2750 init_cypress_605(mod_rev);
2751 break;
2753 return;
2756 /* Now Fujitsu TurboSparc. It might happen that it is
2757 in Swift emulation mode, so we will check later... */
2758 if (psr_typ == 0 && psr_vers == 5) {
2759 init_turbosparc();
2760 return;
2763 /* Next check for Fujitsu Swift. */
2764 if(psr_typ == 0 && psr_vers == 4) {
2765 int cpunode;
2766 char node_str[128];
2768 /* Look if it is not a TurboSparc emulating Swift... */
2769 cpunode = prom_getchild(prom_root_node);
2770 while((cpunode = prom_getsibling(cpunode)) != 0) {
2771 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
2772 if(!strcmp(node_str, "cpu")) {
2773 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
2774 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
2775 init_turbosparc();
2776 return;
2778 break;
2782 init_swift();
2783 return;
2786 /* Now the Viking family of srmmu. */
2787 if(psr_typ == 4 &&
2788 ((psr_vers == 0) ||
2789 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2790 init_viking();
2791 return;
2794 /* Finally the Tsunami. */
2795 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2796 init_tsunami();
2797 return;
2800 /* Oh well */
2801 srmmu_is_bad();
2804 static int srmmu_check_pgt_cache(int low, int high)
2806 struct page *page, *page2;
2807 int freed = 0;
2809 if (pgtable_cache_size > high) {
2810 spin_lock(&pte_spinlock);
2811 for (page2 = NULL, page = (struct page *)pte_quicklist; page;) {
2812 if ((unsigned int)page->pprev_hash == 0xffff) {
2813 if (page2)
2814 page2->next_hash = page->next_hash;
2815 else
2816 (struct page *)pte_quicklist = page->next_hash;
2817 page->next_hash = NULL;
2818 page->pprev_hash = NULL;
2819 pgtable_cache_size -= 16;
2820 __free_page(page);
2821 freed++;
2822 if (page2)
2823 page = page2->next_hash;
2824 else
2825 page = (struct page *)pte_quicklist;
2826 if (pgtable_cache_size <= low)
2827 break;
2828 continue;
2830 page2 = page;
2831 page = page->next_hash;
2833 spin_unlock(&pte_spinlock);
2835 if (pgd_cache_size > high / 4) {
2836 spin_lock(&pgd_spinlock);
2837 for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
2838 if ((unsigned int)page->pprev_hash == 0xf) {
2839 if (page2)
2840 page2->next_hash = page->next_hash;
2841 else
2842 (struct page *)pgd_quicklist = page->next_hash;
2843 page->next_hash = NULL;
2844 page->pprev_hash = NULL;
2845 pgd_cache_size -= 4;
2846 __free_page(page);
2847 freed++;
2848 if (page2)
2849 page = page2->next_hash;
2850 else
2851 page = (struct page *)pgd_quicklist;
2852 if (pgd_cache_size <= low / 4)
2853 break;
2854 continue;
2856 page2 = page;
2857 page = page->next_hash;
2859 spin_unlock(&pgd_spinlock);
2861 return freed;
2864 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2865 tsetup_mmu_patchme, rtrap_mmu_patchme;
2867 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2868 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2870 extern unsigned long srmmu_fault;
2872 #define PATCH_BRANCH(insn, dest) do { \
2873 iaddr = &(insn); \
2874 daddr = &(dest); \
2875 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2876 } while(0);
2878 __initfunc(static void patch_window_trap_handlers(void))
2880 unsigned long *iaddr, *daddr;
2882 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2883 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2884 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2885 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2886 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2887 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2888 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2891 #ifdef __SMP__
2892 /* Local cross-calls. */
2893 static void smp_flush_page_for_dma(unsigned long page)
2895 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
2898 #endif
2900 /* Load up routines and constants for sun4m and sun4d mmu */
2901 __initfunc(void ld_mmu_srmmu(void))
2903 extern void ld_mmu_iommu(void);
2904 extern void ld_mmu_iounit(void);
2905 extern void ___xchg32_sun4md(void);
2907 /* First the constants */
2908 BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT);
2909 BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE);
2910 BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK);
2911 BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
2912 BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
2913 BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
2915 BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE);
2916 BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
2917 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
2919 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
2920 BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
2921 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2922 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2923 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
2924 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2926 /* Functions */
2927 #ifndef __SMP__
2928 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2929 #endif
2930 BTFIXUPSET_CALL(get_pte_fast, srmmu_get_pte_fast, BTFIXUPCALL_RETINT(0));
2931 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
2932 BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP);
2933 BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP);
2934 BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM);
2936 BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM);
2938 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_cacheable, BTFIXUPCALL_SWAPO0O1);
2939 BTFIXUPSET_CALL(init_new_context, srmmu_init_new_context, BTFIXUPCALL_NORM);
2940 BTFIXUPSET_CALL(switch_to_context, srmmu_switch_to_context, BTFIXUPCALL_NORM);
2942 BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM);
2943 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
2944 BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
2946 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, srmmu_update_rootmmu_dir, BTFIXUPCALL_NORM);
2948 BTFIXUPSET_SETHI(none_mask, 0xF0000000);
2950 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
2951 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
2953 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
2954 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
2955 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
2957 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
2958 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
2959 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
2960 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
2962 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
2963 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
2964 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
2965 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
2967 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2968 BTFIXUPSET_CALL(pgd_offset, srmmu_pgd_offset, BTFIXUPCALL_NORM);
2969 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2970 BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM);
2971 BTFIXUPSET_CALL(pte_free_kernel, srmmu_pte_free, BTFIXUPCALL_NORM);
2972 BTFIXUPSET_CALL(pmd_free_kernel, srmmu_pmd_free, BTFIXUPCALL_NORM);
2973 BTFIXUPSET_CALL(pte_alloc_kernel, srmmu_pte_alloc, BTFIXUPCALL_NORM);
2974 BTFIXUPSET_CALL(pmd_alloc_kernel, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
2975 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
2976 BTFIXUPSET_CALL(pte_alloc, srmmu_pte_alloc, BTFIXUPCALL_NORM);
2977 BTFIXUPSET_CALL(pmd_free, srmmu_pmd_free, BTFIXUPCALL_NORM);
2978 BTFIXUPSET_CALL(pmd_alloc, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
2979 BTFIXUPSET_CALL(pgd_free, srmmu_pgd_free, BTFIXUPCALL_NORM);
2980 BTFIXUPSET_CALL(pgd_alloc, srmmu_pgd_alloc, BTFIXUPCALL_NORM);
2982 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
2983 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
2984 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
2985 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
2986 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
2987 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
2988 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
2989 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
2990 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
2991 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2992 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2994 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2995 BTFIXUPSET_CALL(mmu_v2p, srmmu_v2p, BTFIXUPCALL_NORM);
2996 BTFIXUPSET_CALL(mmu_p2v, srmmu_p2v, BTFIXUPCALL_NORM);
2998 /* Task struct and kernel stack allocating/freeing. */
2999 BTFIXUPSET_CALL(alloc_task_struct, srmmu_alloc_task_struct, BTFIXUPCALL_NORM);
3000 BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM);
3002 BTFIXUPSET_CALL(quick_kernel_fault, srmmu_quick_kernel_fault, BTFIXUPCALL_NORM);
3004 /* SRMMU specific. */
3005 BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM);
3006 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
3008 get_srmmu_type();
3009 patch_window_trap_handlers();
3011 #ifdef __SMP__
3012 /* El switcheroo... */
3014 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
3015 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
3016 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
3017 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
3018 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
3019 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
3020 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
3021 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
3022 BTFIXUPCOPY_CALL(local_flush_page_to_ram, flush_page_to_ram);
3023 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
3024 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
3026 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
3027 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
3028 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
3029 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
3030 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
3031 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
3032 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
3033 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
3034 BTFIXUPSET_CALL(flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
3035 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
3036 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
3037 #endif
3038 if (sparc_cpu_model == sun4d)
3039 ld_mmu_iounit();
3040 else
3041 ld_mmu_iommu();
3042 #ifdef __SMP__
3043 if (sparc_cpu_model == sun4d)
3044 sun4d_init_smp();
3045 else
3046 sun4m_init_smp();
3047 #endif