Import 2.2.5pre2
[davej-history.git] / arch / sparc / mm / srmmu.c
blob406bb81cbf10668c6e6c45f62986da2b517350c1
1 /* $Id: srmmu.c,v 1.185 1999/03/24 11:42:35 davem Exp $
2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/malloc.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/init.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/io.h>
21 #include <asm/kdebug.h>
22 #include <asm/vaddrs.h>
23 #include <asm/traps.h>
24 #include <asm/smp.h>
25 #include <asm/mbus.h>
26 #include <asm/cache.h>
27 #include <asm/oplib.h>
28 #include <asm/sbus.h>
29 #include <asm/asi.h>
30 #include <asm/msi.h>
31 #include <asm/a.out.h>
32 #include <asm/mmu_context.h>
33 #include <asm/io-unit.h>
34 #include <asm/spinlock.h>
36 /* Now the cpu specific definitions. */
37 #include <asm/viking.h>
38 #include <asm/mxcc.h>
39 #include <asm/ross.h>
40 #include <asm/tsunami.h>
41 #include <asm/swift.h>
42 #include <asm/turbosparc.h>
44 #include <asm/btfixup.h>
46 /* #define DEBUG_MAP_KERNEL */
47 /* #define PAGESKIP_DEBUG */
49 enum mbus_module srmmu_modtype;
50 unsigned int hwbug_bitmask;
51 int vac_cache_size;
52 int vac_line_size;
53 int vac_badbits;
55 extern unsigned long sparc_iobase_vaddr;
57 #ifdef __SMP__
58 #define FLUSH_BEGIN(mm)
59 #define FLUSH_END
60 #else
61 #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
62 #define FLUSH_END }
63 #endif
65 static int phys_mem_contig;
66 BTFIXUPDEF_SETHI(page_contig_offset)
68 BTFIXUPDEF_CALL(void, ctxd_set, ctxd_t *, pgd_t *)
69 BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
71 #define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp)
72 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
74 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
75 BTFIXUPDEF_CALL(void, flush_chunk, unsigned long)
77 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78 int flush_page_for_dma_global = 1;
79 #define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk)
80 #ifdef __SMP__
81 BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
83 #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
84 #endif
86 static struct srmmu_stats {
87 int invall;
88 int invpg;
89 int invrnge;
90 int invmm;
91 } module_stats;
93 char *srmmu_name;
95 ctxd_t *srmmu_ctx_table_phys;
96 ctxd_t *srmmu_context_table;
98 /* Don't change this without changing access to this
99 * in arch/sparc/mm/viking.S
101 static struct srmmu_trans {
102 unsigned long vbase;
103 unsigned long pbase;
104 unsigned long size;
105 } srmmu_map[SPARC_PHYS_BANKS];
107 #define SRMMU_HASHSZ 256
109 /* Not static, viking.S uses it. */
110 unsigned long srmmu_v2p_hash[SRMMU_HASHSZ];
111 static unsigned long srmmu_p2v_hash[SRMMU_HASHSZ];
113 #define srmmu_ahashfn(addr) ((addr) >> 24)
115 int viking_mxcc_present = 0;
117 /* Physical memory can be _very_ non-contiguous on the sun4m, especially
118 * the SS10/20 class machines and with the latest openprom revisions.
119 * So we have to do a quick lookup.
120 * We use the same for SS1000/SC2000 as a fall back, when phys memory is
121 * non-contiguous.
123 static inline unsigned long srmmu_v2p(unsigned long vaddr)
125 unsigned long off = srmmu_v2p_hash[srmmu_ahashfn(vaddr)];
127 return (vaddr + off);
130 static inline unsigned long srmmu_p2v(unsigned long paddr)
132 unsigned long off = srmmu_p2v_hash[srmmu_ahashfn(paddr)];
134 if (off != 0xffffffffUL)
135 return (paddr - off);
136 else
137 return 0xffffffffUL;
140 /* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case
141 * as a special case to make things faster.
143 /* FIXME: gcc is stupid here and generates very very bad code in this
144 * heavily used routine. So we help it a bit. */
145 static inline unsigned long srmmu_c_v2p(unsigned long vaddr)
147 #if KERNBASE != 0xf0000000
148 if (vaddr >= KERNBASE) return vaddr - KERNBASE;
149 return vaddr - BTFIXUP_SETHI(page_contig_offset);
150 #else
151 register unsigned long kernbase;
153 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase));
154 return vaddr - ((vaddr >= kernbase) ? kernbase : BTFIXUP_SETHI(page_contig_offset));
155 #endif
158 static inline unsigned long srmmu_c_p2v(unsigned long paddr)
160 #if KERNBASE != 0xf0000000
161 if (paddr < (0xfd000000 - KERNBASE)) return paddr + KERNBASE;
162 return (paddr + BTFIXUP_SETHI(page_contig_offset));
163 #else
164 register unsigned long kernbase;
165 register unsigned long limit;
167 __asm__ ("sethi %%hi(0x0d000000), %0" : "=r"(limit));
168 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase));
170 return paddr + ((paddr < limit) ? kernbase : BTFIXUP_SETHI(page_contig_offset));
171 #endif
174 /* On boxes where there is no lots_of_ram, KERNBASE is mapped to PA<0> and highest
175 PA is below 0x0d000000, we can optimize even more :) */
176 static inline unsigned long srmmu_s_v2p(unsigned long vaddr)
178 return vaddr - PAGE_OFFSET;
181 static inline unsigned long srmmu_s_p2v(unsigned long paddr)
183 return paddr + PAGE_OFFSET;
186 /* In general all page table modifications should use the V8 atomic
187 * swap instruction. This insures the mmu and the cpu are in sync
188 * with respect to ref/mod bits in the page tables.
190 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
192 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
193 return value;
196 /* Functions really use this, not srmmu_swap directly. */
197 #define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry))
199 #ifdef PAGESKIP_DEBUG
200 #define PGSKIP_DEBUG(from,to) prom_printf("PG_skip %ld->%ld\n", (long)(from), (long)(to)); printk("PG_skip %ld->%ld\n", (long)(from), (long)(to))
201 #else
202 #define PGSKIP_DEBUG(from,to) do { } while (0)
203 #endif
205 __initfunc(void srmmu_frob_mem_map(unsigned long start_mem))
207 unsigned long bank_start, bank_end = 0;
208 unsigned long addr;
209 int i;
211 /* First, mark all pages as invalid. */
212 for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE)
213 mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
215 /* Next, pg[0-3] is sun4c cruft, so we can free it... */
216 mem_map[MAP_NR(pg0)].flags &= ~(1<<PG_reserved);
217 mem_map[MAP_NR(pg1)].flags &= ~(1<<PG_reserved);
218 mem_map[MAP_NR(pg2)].flags &= ~(1<<PG_reserved);
219 mem_map[MAP_NR(pg3)].flags &= ~(1<<PG_reserved);
221 start_mem = PAGE_ALIGN(start_mem);
222 for(i = 0; srmmu_map[i].size; i++) {
223 bank_start = srmmu_map[i].vbase;
225 /* Making a one or two pages PG_skip holes
226 * is not necessary. We add one more because
227 * we must set the PG_skip flag on the first
228 * two mem_map[] entries for the hole. Go and
229 * see the mm/filemap.c:shrink_mmap() loop for
230 * details. -DaveM
232 if (i && bank_start - bank_end > 3 * PAGE_SIZE) {
233 mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
234 mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(bank_start);
235 mem_map[MAP_NR(bank_end)+1UL].flags |= (1<<PG_skip);
236 mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map + MAP_NR(bank_start);
237 PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(bank_start));
238 if (bank_end > KERNBASE && bank_start < KERNBASE) {
239 mem_map[0].flags |= (1<<PG_skip);
240 mem_map[0].next_hash = mem_map + MAP_NR(bank_start);
241 mem_map[1].flags |= (1<<PG_skip);
242 mem_map[1].next_hash = mem_map + MAP_NR(bank_start);
243 PGSKIP_DEBUG(0, MAP_NR(bank_start));
247 bank_end = bank_start + srmmu_map[i].size;
248 while(bank_start < bank_end) {
249 set_bit(MAP_NR(bank_start) >> 8, sparc_valid_addr_bitmap);
250 if((bank_start >= KERNBASE) &&
251 (bank_start < start_mem)) {
252 bank_start += PAGE_SIZE;
253 continue;
255 mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved);
256 bank_start += PAGE_SIZE;
259 if (bank_end == 0xfd000000)
260 bank_end = PAGE_OFFSET;
263 if (bank_end < KERNBASE) {
264 mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
265 mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(KERNBASE);
266 mem_map[MAP_NR(bank_end)+1UL].flags |= (1<<PG_skip);
267 mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map + MAP_NR(KERNBASE);
268 PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(KERNBASE));
269 } else if (MAP_NR(bank_end) < max_mapnr) {
270 mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip);
271 mem_map[MAP_NR(bank_end)+1UL].flags |= (1<<PG_skip);
272 if (mem_map[0].flags & (1 << PG_skip)) {
273 mem_map[MAP_NR(bank_end)].next_hash = mem_map[0].next_hash;
274 mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map[0].next_hash;
275 PGSKIP_DEBUG(MAP_NR(bank_end), mem_map[0].next_hash - mem_map);
276 } else {
277 mem_map[MAP_NR(bank_end)].next_hash = mem_map;
278 mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map;
279 PGSKIP_DEBUG(MAP_NR(bank_end), 0);
284 /* The very generic SRMMU page table operations. */
285 static inline int srmmu_device_memory(unsigned long x)
287 return ((x & 0xF0000000) != 0);
290 static unsigned long srmmu_pgd_page(pgd_t pgd)
291 { return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
293 static unsigned long srmmu_pmd_page(pmd_t pmd)
294 { return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
296 static unsigned long srmmu_pte_page(pte_t pte)
297 { return srmmu_device_memory(pte_val(pte))?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
299 static unsigned long srmmu_c_pgd_page(pgd_t pgd)
300 { return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_c_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
302 static unsigned long srmmu_c_pmd_page(pmd_t pmd)
303 { return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_c_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
305 static unsigned long srmmu_c_pte_page(pte_t pte)
306 { return srmmu_device_memory(pte_val(pte))?~0:srmmu_c_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
308 static unsigned long srmmu_s_pgd_page(pgd_t pgd)
309 { return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_s_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
311 static unsigned long srmmu_s_pmd_page(pmd_t pmd)
312 { return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_s_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
314 static unsigned long srmmu_s_pte_page(pte_t pte)
315 { return srmmu_device_memory(pte_val(pte))?~0:srmmu_s_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
317 static inline int srmmu_pte_none(pte_t pte)
318 { return !(pte_val(pte) & 0xFFFFFFF); }
319 static inline int srmmu_pte_present(pte_t pte)
320 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
322 static inline void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
324 static inline int srmmu_pmd_none(pmd_t pmd)
325 { return !(pmd_val(pmd) & 0xFFFFFFF); }
326 static inline int srmmu_pmd_bad(pmd_t pmd)
327 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
329 static inline int srmmu_pmd_present(pmd_t pmd)
330 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
332 static inline void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
334 static inline int srmmu_pgd_none(pgd_t pgd)
335 { return !(pgd_val(pgd) & 0xFFFFFFF); }
337 static inline int srmmu_pgd_bad(pgd_t pgd)
338 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
340 static inline int srmmu_pgd_present(pgd_t pgd)
341 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
343 static inline void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
345 static inline int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
346 static inline int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
347 static inline int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
349 static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
350 static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
351 static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);}
352 static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);}
353 static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);}
354 static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);}
357 * Conversion functions: convert a page and protection to a page entry,
358 * and a page entry and page directory to the page they refer to.
360 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
361 { return __pte(((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot)); }
363 static pte_t srmmu_c_mk_pte(unsigned long page, pgprot_t pgprot)
364 { return __pte(((srmmu_c_v2p(page)) >> 4) | pgprot_val(pgprot)); }
366 static pte_t srmmu_s_mk_pte(unsigned long page, pgprot_t pgprot)
367 { return __pte(((srmmu_s_v2p(page)) >> 4) | pgprot_val(pgprot)); }
369 static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
370 { return __pte(((page) >> 4) | pgprot_val(pgprot)); }
372 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
374 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
377 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
379 set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
382 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
384 set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
387 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
389 set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
392 static void srmmu_c_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
394 set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) pgdp) >> 4)));
397 static void srmmu_c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
399 set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) pmdp) >> 4)));
402 static void srmmu_c_pmd_set(pmd_t * pmdp, pte_t * ptep)
404 set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) ptep) >> 4)));
407 static void srmmu_s_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
409 set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pgdp) >> 4)));
412 static void srmmu_s_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
414 set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pmdp) >> 4)));
417 static void srmmu_s_pmd_set(pmd_t * pmdp, pte_t * ptep)
419 set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) ptep) >> 4)));
422 static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
424 return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot));
427 /* to find an entry in a top-level page table... */
428 static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
430 return mm->pgd + (address >> SRMMU_PGDIR_SHIFT);
433 /* Find an entry in the second-level page table.. */
434 static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
436 return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
439 /* Find an entry in the third-level page table.. */
440 static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
442 return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
445 static inline pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address)
447 return (pmd_t *) srmmu_c_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
450 static inline pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address)
452 return (pte_t *) srmmu_c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
455 static inline pmd_t *srmmu_s_pmd_offset(pgd_t * dir, unsigned long address)
457 return (pmd_t *) srmmu_s_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
460 static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address)
462 return (pte_t *) srmmu_s_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
465 /* This must update the context table entry for this process. */
466 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
468 if(tsk->mm->context != NO_CONTEXT &&
469 tsk->mm->pgd != pgdp) {
470 flush_cache_mm(tsk->mm);
471 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
472 flush_tlb_mm(tsk->mm);
476 static inline pte_t *srmmu_get_pte_fast(void)
478 struct page *ret;
480 spin_lock(&pte_spinlock);
481 if ((ret = (struct page *)pte_quicklist) != NULL) {
482 unsigned int mask = (unsigned int)ret->pprev_hash;
483 unsigned int tmp, off;
485 if (mask & 0xff)
486 for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 256);
487 else
488 for (tmp = 0x100, off = 2048; (mask & tmp) == 0; tmp <<= 1, off += 256);
489 (unsigned int)ret->pprev_hash = mask & ~tmp;
490 if (!(mask & ~tmp))
491 pte_quicklist = (unsigned long *)ret->next_hash;
492 ret = (struct page *)(page_address(ret) + off);
493 pgtable_cache_size--;
495 spin_unlock(&pte_spinlock);
496 return (pte_t *)ret;
499 static inline pte_t *srmmu_get_pte_slow(void)
501 pte_t *ret;
502 struct page *page;
504 ret = (pte_t *)get_free_page(GFP_KERNEL);
505 if (ret) {
506 page = mem_map + MAP_NR(ret);
507 flush_chunk((unsigned long)ret);
508 (unsigned int)page->pprev_hash = 0xfffe;
509 spin_lock(&pte_spinlock);
510 (unsigned long *)page->next_hash = pte_quicklist;
511 pte_quicklist = (unsigned long *)page;
512 pgtable_cache_size += 15;
514 return ret;
517 static inline pgd_t *srmmu_get_pgd_fast(void)
519 struct page *ret;
521 spin_lock(&pgd_spinlock);
522 if ((ret = (struct page *)pgd_quicklist) != NULL) {
523 unsigned int mask = (unsigned int)ret->pprev_hash;
524 unsigned int tmp, off;
526 for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 1024);
527 (unsigned int)ret->pprev_hash = mask & ~tmp;
528 if (!(mask & ~tmp))
529 pgd_quicklist = (unsigned long *)ret->next_hash;
530 ret = (struct page *)(page_address(ret) + off);
531 pgd_cache_size--;
533 spin_unlock(&pgd_spinlock);
534 return (pgd_t *)ret;
537 static inline pgd_t *srmmu_get_pgd_slow(void)
539 pgd_t *ret;
540 struct page *page;
542 ret = (pgd_t *)__get_free_page(GFP_KERNEL);
543 if (ret) {
544 pgd_t *init = pgd_offset(&init_mm, 0);
545 memset(ret + (0 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
546 memcpy(ret + (0 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
547 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
548 memset(ret + (1 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
549 memcpy(ret + (1 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
550 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
551 memset(ret + (2 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
552 memcpy(ret + (2 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
553 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
554 memset(ret + (3 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
555 memcpy(ret + (3 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
556 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
557 page = mem_map + MAP_NR(ret);
558 flush_chunk((unsigned long)ret);
559 (unsigned int)page->pprev_hash = 0xe;
560 spin_lock(&pgd_spinlock);
561 (unsigned long *)page->next_hash = pgd_quicklist;
562 pgd_quicklist = (unsigned long *)page;
563 pgd_cache_size += 3;
564 spin_unlock(&pgd_spinlock);
566 return ret;
569 static void srmmu_free_pte_slow(pte_t *pte)
573 static void srmmu_free_pgd_slow(pgd_t *pgd)
577 static inline void srmmu_pte_free(pte_t *pte)
579 struct page *page = mem_map + MAP_NR(pte);
581 spin_lock(&pte_spinlock);
582 if (!page->pprev_hash) {
583 (unsigned long *)page->next_hash = pte_quicklist;
584 pte_quicklist = (unsigned long *)page;
586 (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pte) >> 8) & 15));
587 pgtable_cache_size++;
588 spin_unlock(&pte_spinlock);
591 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
593 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
594 if(srmmu_pmd_none(*pmd)) {
595 pte_t *page = srmmu_get_pte_fast();
597 if (page) {
598 pmd_set(pmd, page);
599 return page + address;
601 page = srmmu_get_pte_slow();
602 if(srmmu_pmd_none(*pmd)) {
603 if(page) {
604 spin_unlock(&pte_spinlock);
605 pmd_set(pmd, page);
606 return page + address;
608 pmd_set(pmd, BAD_PAGETABLE);
609 return NULL;
611 if (page) {
612 (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
613 pgtable_cache_size++;
614 spin_unlock(&pte_spinlock);
617 if(srmmu_pmd_bad(*pmd)) {
618 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
619 pmd_set(pmd, BAD_PAGETABLE);
620 return NULL;
622 return ((pte_t *) pmd_page(*pmd)) + address;
625 /* Real three-level page tables on SRMMU. */
626 static void srmmu_pmd_free(pmd_t * pmd)
628 return srmmu_pte_free((pte_t *)pmd);
631 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
633 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
634 if(srmmu_pgd_none(*pgd)) {
635 pmd_t *page = (pmd_t *)srmmu_get_pte_fast();
637 if (page) {
638 pgd_set(pgd, page);
639 return page + address;
641 page = (pmd_t *)srmmu_get_pte_slow();
642 if(srmmu_pgd_none(*pgd)) {
643 if(page) {
644 spin_unlock(&pte_spinlock);
645 pgd_set(pgd, page);
646 return page + address;
648 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
649 return NULL;
651 if (page) {
652 (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
653 pgtable_cache_size++;
654 spin_unlock(&pte_spinlock);
657 if(srmmu_pgd_bad(*pgd)) {
658 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
659 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
660 return NULL;
662 return (pmd_t *) pgd_page(*pgd) + address;
665 static void srmmu_pgd_free(pgd_t *pgd)
667 struct page *page = mem_map + MAP_NR(pgd);
669 spin_lock(&pgd_spinlock);
670 if (!page->pprev_hash) {
671 (unsigned long *)page->next_hash = pgd_quicklist;
672 pgd_quicklist = (unsigned long *)page;
674 (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pgd) >> 10) & 3));
675 pgd_cache_size++;
676 spin_unlock(&pgd_spinlock);
679 static pgd_t *srmmu_pgd_alloc(void)
681 pgd_t *ret;
683 ret = srmmu_get_pgd_fast();
684 if (ret) return ret;
685 return srmmu_get_pgd_slow();
689 static void srmmu_set_pgdir(unsigned long address, pgd_t entry)
691 struct task_struct * p;
692 struct page *page;
694 read_lock(&tasklist_lock);
695 for_each_task(p) {
696 if (!p->mm)
697 continue;
698 *pgd_offset(p->mm,address) = entry;
700 read_unlock(&tasklist_lock);
701 spin_lock(&pgd_spinlock);
702 address >>= SRMMU_PGDIR_SHIFT;
703 for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) {
704 pgd_t *pgd = (pgd_t *)page_address(page);
705 unsigned int mask = (unsigned int)page->pprev_hash;
707 if (mask & 1)
708 pgd[address + 0 * SRMMU_PTRS_PER_PGD] = entry;
709 if (mask & 2)
710 pgd[address + 1 * SRMMU_PTRS_PER_PGD] = entry;
711 if (mask & 4)
712 pgd[address + 2 * SRMMU_PTRS_PER_PGD] = entry;
713 if (mask & 8)
714 pgd[address + 3 * SRMMU_PTRS_PER_PGD] = entry;
715 if (mask)
716 flush_chunk((unsigned long)pgd);
718 spin_unlock(&pgd_spinlock);
721 static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
723 srmmu_set_entry(ptep, pte_val(pteval));
726 static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
728 register unsigned long a, b, c, d, e, f, g;
729 unsigned long line, page;
731 srmmu_set_entry(ptep, pte_val(pteval));
732 page = ((unsigned long)ptep) & PAGE_MASK;
733 line = (page + PAGE_SIZE) - 0x100;
734 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
735 goto inside;
736 do {
737 line -= 0x100;
738 inside:
739 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
740 "sta %%g0, [%0 + %2] %1\n\t"
741 "sta %%g0, [%0 + %3] %1\n\t"
742 "sta %%g0, [%0 + %4] %1\n\t"
743 "sta %%g0, [%0 + %5] %1\n\t"
744 "sta %%g0, [%0 + %6] %1\n\t"
745 "sta %%g0, [%0 + %7] %1\n\t"
746 "sta %%g0, [%0 + %8] %1\n\t" : :
747 "r" (line),
748 "i" (ASI_M_FLUSH_PAGE),
749 "r" (a), "r" (b), "r" (c), "r" (d),
750 "r" (e), "r" (f), "r" (g));
751 } while(line != page);
754 static void srmmu_set_pte_nocache_viking(pte_t *ptep, pte_t pteval)
756 unsigned long vaddr;
757 int set;
758 int i;
760 set = ((unsigned long)ptep >> 5) & 0x7f;
761 vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
762 srmmu_set_entry(ptep, pte_val(pteval));
763 for (i = 0; i < 8; i++) {
764 __asm__ __volatile__ ("ld [%0], %%g0" : : "r" (vaddr));
765 vaddr += PAGE_SIZE;
769 static void srmmu_quick_kernel_fault(unsigned long address)
771 #ifdef __SMP__
772 printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
773 smp_processor_id(), address);
774 while (1) ;
775 #else
776 printk("Kernel faults at addr=0x%08lx\n", address);
777 printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK)));
778 die_if_kernel("SRMMU bolixed...", current->tss.kregs);
779 #endif
782 static inline void alloc_context(struct mm_struct *mm)
784 struct ctx_list *ctxp;
786 ctxp = ctx_free.next;
787 if(ctxp != &ctx_free) {
788 remove_from_ctx_list(ctxp);
789 add_to_used_ctxlist(ctxp);
790 mm->context = ctxp->ctx_number;
791 ctxp->ctx_mm = mm;
792 return;
794 ctxp = ctx_used.next;
795 if(ctxp->ctx_mm == current->mm)
796 ctxp = ctxp->next;
797 if(ctxp == &ctx_used)
798 panic("out of mmu contexts");
799 flush_cache_mm(ctxp->ctx_mm);
800 flush_tlb_mm(ctxp->ctx_mm);
801 remove_from_ctx_list(ctxp);
802 add_to_used_ctxlist(ctxp);
803 ctxp->ctx_mm->context = NO_CONTEXT;
804 ctxp->ctx_mm = mm;
805 mm->context = ctxp->ctx_number;
808 static inline void free_context(int context)
810 struct ctx_list *ctx_old;
812 ctx_old = ctx_list_pool + context;
813 remove_from_ctx_list(ctx_old);
814 add_to_free_ctxlist(ctx_old);
818 static void srmmu_switch_to_context(struct task_struct *tsk)
820 if(tsk->mm->context == NO_CONTEXT) {
821 alloc_context(tsk->mm);
822 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
824 srmmu_set_context(tsk->mm->context);
827 static void srmmu_init_new_context(struct mm_struct *mm)
829 alloc_context(mm);
831 flush_cache_mm(mm);
832 ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
833 flush_tlb_mm(mm);
835 if(mm == current->mm)
836 srmmu_set_context(mm->context);
839 /* Low level IO area allocation on the SRMMU. */
840 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
842 pgd_t *pgdp;
843 pmd_t *pmdp;
844 pte_t *ptep;
845 unsigned long tmp;
847 physaddr &= PAGE_MASK;
848 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
849 pmdp = pmd_offset(pgdp, virt_addr);
850 ptep = pte_offset(pmdp, virt_addr);
851 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
853 /* I need to test whether this is consistent over all
854 * sun4m's. The bus_type represents the upper 4 bits of
855 * 36-bit physical address on the I/O space lines...
857 tmp |= (bus_type << 28);
858 if(rdonly)
859 tmp |= SRMMU_PRIV_RDONLY;
860 else
861 tmp |= SRMMU_PRIV;
862 flush_page_to_ram(virt_addr);
863 set_pte(ptep, __pte(tmp));
864 flush_tlb_all();
867 void srmmu_unmapioaddr(unsigned long virt_addr)
869 pgd_t *pgdp;
870 pmd_t *pmdp;
871 pte_t *ptep;
873 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
874 pmdp = pmd_offset(pgdp, virt_addr);
875 ptep = pte_offset(pmdp, virt_addr);
877 /* No need to flush uncacheable page. */
878 set_pte(ptep, mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
879 flush_tlb_all();
882 /* This is used in many routines below. */
883 #define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
885 /* On the SRMMU we do not have the problems with limited tlb entries
886 * for mapping kernel pages, so we just take things from the free page
887 * pool. As a side effect we are putting a little too much pressure
888 * on the gfp() subsystem. This setup also makes the logic of the
889 * iommu mapping code a lot easier as we can transparently handle
890 * mappings on the kernel stack without any special code as we did
891 * need on the sun4c.
893 struct task_struct *srmmu_alloc_task_struct(void)
895 return (struct task_struct *) __get_free_pages(GFP_KERNEL, 1);
898 static void srmmu_free_task_struct(struct task_struct *tsk)
900 free_pages((unsigned long)tsk, 1);
903 /* tsunami.S */
904 extern void tsunami_flush_cache_all(void);
905 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
906 extern void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);
907 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
908 extern void tsunami_flush_page_to_ram(unsigned long page);
909 extern void tsunami_flush_page_for_dma(unsigned long page);
910 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
911 extern void tsunami_flush_chunk(unsigned long chunk);
912 extern void tsunami_flush_tlb_all(void);
913 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
914 extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
915 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
917 /* Workaround, until we find what's going on with Swift. When low on memory, it sometimes
918 * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/
919 * fault again on the same instruction. I really don't understand it, have checked it and contexts
920 * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj
922 static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
924 static unsigned long last;
926 if (last == address) viking_hwprobe(address);
927 last = address;
930 /* Swift flushes. It has the recommended SRMMU specification flushing
931 * facilities, so we can do things in a more fine grained fashion than we
932 * could on the tsunami. Let's watch out for HARDWARE BUGS...
935 static void swift_flush_cache_all(void)
937 flush_user_windows();
938 swift_idflash_clear();
941 static void swift_flush_cache_mm(struct mm_struct *mm)
943 FLUSH_BEGIN(mm)
944 flush_user_windows();
945 swift_idflash_clear();
946 FLUSH_END
949 static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
951 FLUSH_BEGIN(mm)
952 flush_user_windows();
953 swift_idflash_clear();
954 FLUSH_END
957 static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
959 FLUSH_BEGIN(vma->vm_mm)
960 flush_user_windows();
961 if(vma->vm_flags & VM_EXEC)
962 swift_flush_icache();
963 swift_flush_dcache();
964 FLUSH_END
967 /* Not copy-back on swift. */
968 static void swift_flush_page_to_ram(unsigned long page)
972 /* But not IO coherent either. */
973 static void swift_flush_page_for_dma(unsigned long page)
975 swift_flush_dcache();
978 /* Again, Swift is non-snooping split I/D cache'd just like tsunami,
979 * so have to punt the icache for on-stack signal insns. Only the
980 * icache need be flushed since the dcache is write-through.
982 static void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
984 swift_flush_icache();
987 static void swift_flush_chunk(unsigned long chunk)
991 static void swift_flush_tlb_all(void)
993 srmmu_flush_whole_tlb();
994 module_stats.invall++;
997 static void swift_flush_tlb_mm(struct mm_struct *mm)
999 FLUSH_BEGIN(mm)
1000 srmmu_flush_whole_tlb();
1001 module_stats.invmm++;
1002 FLUSH_END
1005 static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1007 FLUSH_BEGIN(mm)
1008 srmmu_flush_whole_tlb();
1009 module_stats.invrnge++;
1010 FLUSH_END
1013 static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1015 FLUSH_BEGIN(vma->vm_mm)
1016 srmmu_flush_whole_tlb();
1017 module_stats.invpg++;
1018 FLUSH_END
1021 /* The following are all MBUS based SRMMU modules, and therefore could
1022 * be found in a multiprocessor configuration. On the whole, these
1023 * chips seems to be much more touchy about DVMA and page tables
1024 * with respect to cache coherency.
1027 /* Cypress flushes. */
1028 static void cypress_flush_cache_all(void)
1030 volatile unsigned long cypress_sucks;
1031 unsigned long faddr, tagval;
1033 flush_user_windows();
1034 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1035 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1036 "=r" (tagval) :
1037 "r" (faddr), "r" (0x40000),
1038 "i" (ASI_M_DATAC_TAG));
1040 /* If modified and valid, kick it. */
1041 if((tagval & 0x60) == 0x60)
1042 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
1046 static void cypress_flush_cache_mm(struct mm_struct *mm)
1048 register unsigned long a, b, c, d, e, f, g;
1049 unsigned long flags, faddr;
1050 int octx;
1052 FLUSH_BEGIN(mm)
1053 flush_user_windows();
1054 __save_and_cli(flags);
1055 octx = srmmu_get_context();
1056 srmmu_set_context(mm->context);
1057 a = 0x20; b = 0x40; c = 0x60;
1058 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1060 faddr = (0x10000 - 0x100);
1061 goto inside;
1062 do {
1063 faddr -= 0x100;
1064 inside:
1065 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1066 "sta %%g0, [%0 + %2] %1\n\t"
1067 "sta %%g0, [%0 + %3] %1\n\t"
1068 "sta %%g0, [%0 + %4] %1\n\t"
1069 "sta %%g0, [%0 + %5] %1\n\t"
1070 "sta %%g0, [%0 + %6] %1\n\t"
1071 "sta %%g0, [%0 + %7] %1\n\t"
1072 "sta %%g0, [%0 + %8] %1\n\t" : :
1073 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
1074 "r" (a), "r" (b), "r" (c), "r" (d),
1075 "r" (e), "r" (f), "r" (g));
1076 } while(faddr);
1077 srmmu_set_context(octx);
1078 __restore_flags(flags);
1079 FLUSH_END
1082 static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1084 register unsigned long a, b, c, d, e, f, g;
1085 unsigned long flags, faddr;
1086 int octx;
1088 FLUSH_BEGIN(mm)
1089 flush_user_windows();
1090 __save_and_cli(flags);
1091 octx = srmmu_get_context();
1092 srmmu_set_context(mm->context);
1093 a = 0x20; b = 0x40; c = 0x60;
1094 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1096 start &= SRMMU_PMD_MASK;
1097 while(start < end) {
1098 faddr = (start + (0x10000 - 0x100));
1099 goto inside;
1100 do {
1101 faddr -= 0x100;
1102 inside:
1103 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1104 "sta %%g0, [%0 + %2] %1\n\t"
1105 "sta %%g0, [%0 + %3] %1\n\t"
1106 "sta %%g0, [%0 + %4] %1\n\t"
1107 "sta %%g0, [%0 + %5] %1\n\t"
1108 "sta %%g0, [%0 + %6] %1\n\t"
1109 "sta %%g0, [%0 + %7] %1\n\t"
1110 "sta %%g0, [%0 + %8] %1\n\t" : :
1111 "r" (faddr),
1112 "i" (ASI_M_FLUSH_SEG),
1113 "r" (a), "r" (b), "r" (c), "r" (d),
1114 "r" (e), "r" (f), "r" (g));
1115 } while (faddr != start);
1116 start += SRMMU_PMD_SIZE;
1118 srmmu_set_context(octx);
1119 __restore_flags(flags);
1120 FLUSH_END
1123 static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1125 register unsigned long a, b, c, d, e, f, g;
1126 struct mm_struct *mm = vma->vm_mm;
1127 unsigned long flags, line;
1128 int octx;
1130 FLUSH_BEGIN(mm)
1131 flush_user_windows();
1132 __save_and_cli(flags);
1133 octx = srmmu_get_context();
1134 srmmu_set_context(mm->context);
1135 a = 0x20; b = 0x40; c = 0x60;
1136 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1138 page &= PAGE_MASK;
1139 line = (page + PAGE_SIZE) - 0x100;
1140 goto inside;
1141 do {
1142 line -= 0x100;
1143 inside:
1144 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1145 "sta %%g0, [%0 + %2] %1\n\t"
1146 "sta %%g0, [%0 + %3] %1\n\t"
1147 "sta %%g0, [%0 + %4] %1\n\t"
1148 "sta %%g0, [%0 + %5] %1\n\t"
1149 "sta %%g0, [%0 + %6] %1\n\t"
1150 "sta %%g0, [%0 + %7] %1\n\t"
1151 "sta %%g0, [%0 + %8] %1\n\t" : :
1152 "r" (line),
1153 "i" (ASI_M_FLUSH_PAGE),
1154 "r" (a), "r" (b), "r" (c), "r" (d),
1155 "r" (e), "r" (f), "r" (g));
1156 } while(line != page);
1157 srmmu_set_context(octx);
1158 __restore_flags(flags);
1159 FLUSH_END
1162 /* Cypress is copy-back, at least that is how we configure it. */
1163 static void cypress_flush_page_to_ram(unsigned long page)
1165 register unsigned long a, b, c, d, e, f, g;
1166 unsigned long line;
1168 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1169 page &= PAGE_MASK;
1170 line = (page + PAGE_SIZE) - 0x100;
1171 goto inside;
1172 do {
1173 line -= 0x100;
1174 inside:
1175 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1176 "sta %%g0, [%0 + %2] %1\n\t"
1177 "sta %%g0, [%0 + %3] %1\n\t"
1178 "sta %%g0, [%0 + %4] %1\n\t"
1179 "sta %%g0, [%0 + %5] %1\n\t"
1180 "sta %%g0, [%0 + %6] %1\n\t"
1181 "sta %%g0, [%0 + %7] %1\n\t"
1182 "sta %%g0, [%0 + %8] %1\n\t" : :
1183 "r" (line),
1184 "i" (ASI_M_FLUSH_PAGE),
1185 "r" (a), "r" (b), "r" (c), "r" (d),
1186 "r" (e), "r" (f), "r" (g));
1187 } while(line != page);
1190 static void cypress_flush_chunk(unsigned long chunk)
1192 cypress_flush_page_to_ram(chunk);
1195 /* Cypress is also IO cache coherent. */
1196 static void cypress_flush_page_for_dma(unsigned long page)
1200 /* Cypress has unified L2 VIPT, from which both instructions and data
1201 * are stored. It does not have an onboard icache of any sort, therefore
1202 * no flush is necessary.
1204 static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1208 static void cypress_flush_tlb_all(void)
1210 srmmu_flush_whole_tlb();
1211 module_stats.invall++;
1214 static void cypress_flush_tlb_mm(struct mm_struct *mm)
1216 FLUSH_BEGIN(mm)
1217 __asm__ __volatile__("
1218 lda [%0] %3, %%g5
1219 sta %2, [%0] %3
1220 sta %%g0, [%1] %4
1221 sta %%g5, [%0] %3"
1222 : /* no outputs */
1223 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
1224 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
1225 : "g5");
1226 module_stats.invmm++;
1227 FLUSH_END
1230 static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1232 unsigned long size;
1234 FLUSH_BEGIN(mm)
1235 start &= SRMMU_PGDIR_MASK;
1236 size = SRMMU_PGDIR_ALIGN(end) - start;
1237 __asm__ __volatile__("
1238 lda [%0] %5, %%g5
1239 sta %1, [%0] %5
1240 1: subcc %3, %4, %3
1241 bne 1b
1242 sta %%g0, [%2 + %3] %6
1243 sta %%g5, [%0] %5"
1244 : /* no outputs */
1245 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
1246 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
1247 "i" (ASI_M_FLUSH_PROBE)
1248 : "g5", "cc");
1249 module_stats.invrnge++;
1250 FLUSH_END
1253 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1255 struct mm_struct *mm = vma->vm_mm;
1257 FLUSH_BEGIN(mm)
1258 __asm__ __volatile__("
1259 lda [%0] %3, %%g5
1260 sta %1, [%0] %3
1261 sta %%g0, [%2] %4
1262 sta %%g5, [%0] %3"
1263 : /* no outputs */
1264 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
1265 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
1266 : "g5");
1267 module_stats.invpg++;
1268 FLUSH_END
1271 /* viking.S */
1272 extern void viking_flush_cache_all(void);
1273 extern void viking_flush_cache_mm(struct mm_struct *mm);
1274 extern void viking_flush_cache_range(struct mm_struct *mm, unsigned long start,
1275 unsigned long end);
1276 extern void viking_flush_cache_page(struct vm_area_struct *vma,
1277 unsigned long page);
1278 extern void viking_flush_page_to_ram(unsigned long page);
1279 extern void viking_flush_page_for_dma(unsigned long page);
1280 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
1281 extern void viking_flush_page(unsigned long page);
1282 extern void viking_mxcc_flush_page(unsigned long page);
1283 extern void viking_flush_chunk(unsigned long chunk);
1284 extern void viking_c_flush_chunk(unsigned long chunk);
1285 extern void viking_s_flush_chunk(unsigned long chunk);
1286 extern void viking_mxcc_flush_chunk(unsigned long chunk);
1287 extern void viking_flush_tlb_all(void);
1288 extern void viking_flush_tlb_mm(struct mm_struct *mm);
1289 extern void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start,
1290 unsigned long end);
1291 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
1292 unsigned long page);
1293 extern void sun4dsmp_flush_tlb_all(void);
1294 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
1295 extern void sun4dsmp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
1296 unsigned long end);
1297 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
1298 unsigned long page);
1300 /* hypersparc.S */
1301 extern void hypersparc_flush_cache_all(void);
1302 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
1303 extern void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);
1304 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
1305 extern void hypersparc_flush_page_to_ram(unsigned long page);
1306 extern void hypersparc_flush_chunk(unsigned long chunk);
1307 extern void hypersparc_flush_page_for_dma(unsigned long page);
1308 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
1309 extern void hypersparc_flush_tlb_all(void);
1310 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
1311 extern void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
1312 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
1313 extern void hypersparc_setup_blockops(void);
1315 static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
1317 unsigned long page = ((unsigned long)ptep) & PAGE_MASK;
1319 srmmu_set_entry(ptep, pte_val(pteval));
1320 hypersparc_flush_page_to_ram(page);
1323 static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
1325 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4))));
1326 hypersparc_flush_page_to_ram((unsigned long)ctxp);
1327 hyper_flush_whole_icache();
1330 static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1332 unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
1334 if(pgdp != swapper_pg_dir)
1335 hypersparc_flush_page_to_ram(page);
1337 if(tsk->mm->context != NO_CONTEXT &&
1338 tsk->mm->pgd != pgdp) {
1339 flush_cache_mm(tsk->mm);
1340 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1341 flush_tlb_mm(tsk->mm);
1345 static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1347 if(pgdp != swapper_pg_dir)
1348 flush_chunk((unsigned long)pgdp);
1349 if(tsk->mm->context != NO_CONTEXT &&
1350 tsk->mm->pgd != pgdp) {
1351 flush_cache_mm(tsk->mm);
1352 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1353 flush_tlb_mm(tsk->mm);
1357 static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1359 register unsigned long a, b, c, d, e, f, g;
1360 unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
1361 unsigned long line;
1363 if(pgdp == swapper_pg_dir)
1364 goto skip_flush;
1366 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
1367 page &= PAGE_MASK;
1368 line = (page + PAGE_SIZE) - 0x100;
1369 goto inside;
1370 do {
1371 line -= 0x100;
1372 inside:
1373 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
1374 "sta %%g0, [%0 + %2] %1\n\t"
1375 "sta %%g0, [%0 + %3] %1\n\t"
1376 "sta %%g0, [%0 + %4] %1\n\t"
1377 "sta %%g0, [%0 + %5] %1\n\t"
1378 "sta %%g0, [%0 + %6] %1\n\t"
1379 "sta %%g0, [%0 + %7] %1\n\t"
1380 "sta %%g0, [%0 + %8] %1\n\t" : :
1381 "r" (line),
1382 "i" (ASI_M_FLUSH_PAGE),
1383 "r" (a), "r" (b), "r" (c), "r" (d),
1384 "r" (e), "r" (f), "r" (g));
1385 } while(line != page);
1386 skip_flush:
1387 if(tsk->mm->context != NO_CONTEXT &&
1388 tsk->mm->pgd != pgdp) {
1389 flush_cache_mm(tsk->mm);
1390 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1391 flush_tlb_mm(tsk->mm);
1395 static void hypersparc_switch_to_context(struct task_struct *tsk)
1397 if(tsk->mm->context == NO_CONTEXT) {
1398 ctxd_t *ctxp;
1400 alloc_context(tsk->mm);
1401 ctxp = &srmmu_context_table[tsk->mm->context];
1402 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) tsk->mm->pgd) >> 4))));
1403 hypersparc_flush_page_to_ram((unsigned long)ctxp);
1405 hyper_flush_whole_icache();
1406 srmmu_set_context(tsk->mm->context);
1409 static void hypersparc_init_new_context(struct mm_struct *mm)
1411 ctxd_t *ctxp;
1413 alloc_context(mm);
1415 ctxp = &srmmu_context_table[mm->context];
1416 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4))));
1417 hypersparc_flush_page_to_ram((unsigned long)ctxp);
1419 if(mm == current->mm) {
1420 hyper_flush_whole_icache();
1421 srmmu_set_context(mm->context);
1425 static unsigned long mempool;
1427 /* NOTE: All of this startup code assumes the low 16mb (approx.) of
1428 * kernel mappings are done with one single contiguous chunk of
1429 * ram. On small ram machines (classics mainly) we only get
1430 * around 8mb mapped for us.
1433 static unsigned long kbpage;
1435 /* Some dirty hacks to abstract away the painful boot up init. */
1436 static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
1438 return ((vaddr - KERNBASE) + kbpage);
1441 static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
1443 set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4))));
1446 static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
1448 set_pte((pte_t *)pmdp, __pte((SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4))));
1451 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
1453 return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
1456 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
1458 return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
1461 static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
1463 return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
1466 static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
1468 return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
1471 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
1473 pgd_t *pgdp;
1474 pmd_t *pmdp;
1475 pte_t *ptep;
1477 while(start < end) {
1478 pgdp = srmmu_pgd_offset(init_task.mm, start);
1479 if(srmmu_pgd_none(*pgdp)) {
1480 pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1481 srmmu_early_pgd_set(pgdp, pmdp);
1483 pmdp = srmmu_early_pmd_offset(pgdp, start);
1484 if(srmmu_pmd_none(*pmdp)) {
1485 ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1486 srmmu_early_pmd_set(pmdp, ptep);
1488 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
1492 /* This is much cleaner than poking around physical address space
1493 * looking at the prom's page table directly which is what most
1494 * other OS's do. Yuck... this is much better.
1496 __initfunc(void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end))
1498 pgd_t *pgdp;
1499 pmd_t *pmdp;
1500 pte_t *ptep;
1501 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1502 unsigned long prompte;
1504 while(start <= end) {
1505 if (start == 0)
1506 break; /* probably wrap around */
1507 if(start == 0xfef00000)
1508 start = KADB_DEBUGGER_BEGVM;
1509 if(!(prompte = srmmu_hwprobe(start))) {
1510 start += PAGE_SIZE;
1511 continue;
1514 /* A red snapper, see what it really is. */
1515 what = 0;
1517 if(!(start & ~(SRMMU_PMD_MASK))) {
1518 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
1519 what = 1;
1522 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1523 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1524 prompte)
1525 what = 2;
1528 pgdp = srmmu_pgd_offset(init_task.mm, start);
1529 if(what == 2) {
1530 *pgdp = __pgd(prompte);
1531 start += SRMMU_PGDIR_SIZE;
1532 continue;
1534 if(srmmu_pgd_none(*pgdp)) {
1535 pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1536 srmmu_early_pgd_set(pgdp, pmdp);
1538 pmdp = srmmu_early_pmd_offset(pgdp, start);
1539 if(what == 1) {
1540 *pmdp = __pmd(prompte);
1541 start += SRMMU_PMD_SIZE;
1542 continue;
1544 if(srmmu_pmd_none(*pmdp)) {
1545 ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1546 srmmu_early_pmd_set(pmdp, ptep);
1548 ptep = srmmu_early_pte_offset(pmdp, start);
1549 *ptep = __pte(prompte);
1550 start += PAGE_SIZE;
1554 #ifdef DEBUG_MAP_KERNEL
1555 #define MKTRACE(foo) prom_printf foo
1556 #else
1557 #define MKTRACE(foo)
1558 #endif
1560 static int lots_of_ram __initdata = 0;
1561 static int srmmu_low_pa __initdata = 0;
1562 static unsigned long end_of_phys_memory __initdata = 0;
1564 __initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *end_mem_p))
1566 unsigned int sum = 0;
1567 unsigned long last = 0xff000000;
1568 long first, cur;
1569 unsigned long pa;
1570 unsigned long total = 0;
1571 int i;
1573 pa = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1574 pa = (pa & SRMMU_PTE_PMASK) << 4;
1575 if (!sp_banks[0].base_addr && pa == PAGE_SIZE) {
1576 for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1577 if (sp_banks[i].base_addr + sp_banks[i].num_bytes > 0x0d000000)
1578 break;
1580 if (!sp_banks[i].num_bytes) {
1581 srmmu_low_pa = 1;
1582 end_of_phys_memory = SRMMU_PGDIR_ALIGN(sp_banks[i-1].base_addr + sp_banks[i-1].num_bytes);
1583 *end_mem_p = KERNBASE + end_of_phys_memory;
1584 if (sp_banks[0].num_bytes >= (6 * 1024 * 1024) || end_of_phys_memory <= 0x06000000) {
1585 /* Make sure there will be enough memory for the whole mem_map (even if sparse) */
1586 return;
1590 for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1591 pa = sp_banks[i].base_addr;
1592 first = (pa & (~SRMMU_PGDIR_MASK));
1593 cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE);
1594 if (cur < 0) cur = 0;
1595 if (!first || last != (pa & SRMMU_PGDIR_MASK))
1596 total += SRMMU_PGDIR_SIZE;
1597 sum += sp_banks[i].num_bytes;
1598 if (memory_size) {
1599 if (sum > memory_size) {
1600 sp_banks[i].num_bytes -=
1601 (sum - memory_size);
1602 cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE);
1603 if (cur < 0) cur = 0;
1604 total += SRMMU_PGDIR_ALIGN(cur);
1605 sum = memory_size;
1606 sp_banks[++i].base_addr = 0xdeadbeef;
1607 sp_banks[i].num_bytes = 0;
1608 break;
1611 total += SRMMU_PGDIR_ALIGN(cur);
1612 last = (sp_banks[i].base_addr + sp_banks[i].num_bytes - 1) & SRMMU_PGDIR_MASK;
1614 if (total <= 0x0d000000)
1615 *end_mem_p = KERNBASE + total;
1616 else {
1617 *end_mem_p = 0xfd000000;
1618 lots_of_ram = 1;
1620 end_of_phys_memory = total;
1623 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1625 /* Create a third-level SRMMU 16MB page mapping. */
1626 __initfunc(static void do_large_mapping(unsigned long vaddr, unsigned long phys_base))
1628 pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
1629 unsigned long big_pte;
1631 MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr, phys_base));
1632 big_pte = KERNEL_PTE(phys_base >> 4);
1633 *pgdp = __pgd(big_pte);
1636 /* Look in the sp_bank for the given physical page, return the
1637 * index number the entry was found in, or -1 for not found.
1639 static inline int find_in_spbanks(unsigned long phys_page)
1641 int entry;
1643 for(entry = 0; sp_banks[entry].num_bytes; entry++) {
1644 unsigned long start = sp_banks[entry].base_addr;
1645 unsigned long end = start + sp_banks[entry].num_bytes;
1647 if((start <= phys_page) && (phys_page < end))
1648 return entry;
1650 return -1;
1653 /* Find an spbank entry not mapped as of yet, TAKEN_VECTOR is an
1654 * array of char's, each member indicating if that spbank is mapped
1655 * yet or not.
1657 __initfunc(static int find_free_spbank(char *taken_vector))
1659 int entry;
1661 for(entry = 0; sp_banks[entry].num_bytes; entry++)
1662 if(!taken_vector[entry])
1663 break;
1664 return entry;
1667 static unsigned long map_spbank_last_pa __initdata = 0xff000000;
1669 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
1671 __initfunc(static unsigned long map_spbank(unsigned long vbase, int sp_entry))
1673 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1674 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1675 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1676 static int srmmu_bank = 0;
1678 MKTRACE(("map_spbank %d[v<%08lx>p<%08lx>s<%08lx>]", sp_entry, vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes));
1679 MKTRACE(("map_spbank2 %d[p%08lx v%08lx-%08lx]", sp_entry, pstart, vstart, vend));
1680 while(vstart < vend) {
1681 do_large_mapping(vstart, pstart);
1682 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1684 srmmu_map[srmmu_bank].vbase = vbase;
1685 srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr;
1686 srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes;
1687 srmmu_bank++;
1688 map_spbank_last_pa = pstart - SRMMU_PGDIR_SIZE;
1689 return vstart;
1692 static inline void memprobe_error(char *msg)
1694 prom_printf(msg);
1695 prom_printf("Halting now...\n");
1696 prom_halt();
1699 /* Assumptions: The bank given to the kernel from the prom/bootloader
1700 * is part of a full bank which is at least 4MB in size and begins at
1701 * 0xf0000000 (ie. KERNBASE).
1703 static inline void map_kernel(void)
1705 unsigned long raw_pte, physpage;
1706 unsigned long vaddr, low_base;
1707 char etaken[SPARC_PHYS_BANKS];
1708 int entry;
1710 /* Step 1: Clear out sp_banks taken map. */
1711 MKTRACE(("map_kernel: clearing etaken vector... "));
1712 for(entry = 0; entry < SPARC_PHYS_BANKS; entry++)
1713 etaken[entry] = 0;
1715 low_base = KERNBASE;
1717 /* Step 2: Fill in KERNBASE base pgd. Lots of sanity checking here. */
1718 raw_pte = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1719 if((raw_pte & SRMMU_ET_MASK) != SRMMU_ET_PTE)
1720 memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
1721 physpage = (raw_pte & SRMMU_PTE_PMASK) << 4;
1722 physpage -= PAGE_SIZE;
1723 if(physpage & ~(SRMMU_PGDIR_MASK))
1724 memprobe_error("Wheee, kernel not mapped on 16MB physical boundry.\n");
1725 entry = find_in_spbanks(physpage);
1726 if(entry == -1 || (sp_banks[entry].base_addr != physpage))
1727 memprobe_error("Kernel mapped in non-existant memory.\n");
1728 MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE, entry, sp_banks[entry].base_addr, sp_banks[entry].num_bytes));
1729 if (sp_banks[entry].num_bytes > 0x0d000000) {
1730 unsigned long orig_base = sp_banks[entry].base_addr;
1731 unsigned long orig_len = sp_banks[entry].num_bytes;
1732 unsigned long can_map = 0x0d000000;
1734 /* Map a partial bank in this case, adjust the base
1735 * and the length, but don't mark it used.
1737 sp_banks[entry].num_bytes = can_map;
1738 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
1739 vaddr = map_spbank(KERNBASE, entry);
1740 MKTRACE(("vaddr now %08lx ", vaddr));
1741 sp_banks[entry].base_addr = orig_base + can_map;
1742 sp_banks[entry].num_bytes = orig_len - can_map;
1743 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
1744 MKTRACE(("map_kernel: skipping first loop\n"));
1745 goto loop_skip;
1747 vaddr = map_spbank(KERNBASE, entry);
1748 etaken[entry] = 1;
1750 /* Step 3: Map what we can above KERNBASE. */
1751 MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr));
1752 for(;;) {
1753 unsigned long bank_size;
1755 MKTRACE(("map_kernel: ffsp()"));
1756 entry = find_free_spbank(&etaken[0]);
1757 bank_size = sp_banks[entry].num_bytes;
1758 MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
1759 if(!bank_size)
1760 break;
1761 if (srmmu_low_pa)
1762 vaddr = KERNBASE + sp_banks[entry].base_addr;
1763 else if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) {
1764 if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK))
1765 vaddr -= SRMMU_PGDIR_SIZE;
1766 vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK));
1768 if ((vaddr + bank_size - KERNBASE) > 0x0d000000) {
1769 unsigned long orig_base = sp_banks[entry].base_addr;
1770 unsigned long orig_len = sp_banks[entry].num_bytes;
1771 unsigned long can_map = (0xfd000000 - vaddr);
1773 /* Map a partial bank in this case, adjust the base
1774 * and the length, but don't mark it used.
1776 sp_banks[entry].num_bytes = can_map;
1777 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
1778 vaddr = map_spbank(vaddr, entry);
1779 MKTRACE(("vaddr now %08lx ", vaddr));
1780 sp_banks[entry].base_addr = orig_base + can_map;
1781 sp_banks[entry].num_bytes = orig_len - can_map;
1782 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
1783 break;
1786 /* Ok, we can map this one, do it. */
1787 MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr, entry));
1788 vaddr = map_spbank(vaddr, entry);
1789 etaken[entry] = 1;
1790 MKTRACE(("vaddr now %08lx\n", vaddr));
1792 MKTRACE(("\n"));
1793 /* If not lots_of_ram, assume we did indeed map it all above. */
1794 loop_skip:
1795 if(!lots_of_ram)
1796 goto check_and_return;
1798 /* Step 4: Map the rest (if any) right below KERNBASE. */
1799 MKTRACE(("map_kernel: doing low mappings... "));
1800 low_base = (KERNBASE - end_of_phys_memory + 0x0d000000);
1801 MKTRACE(("end_of_phys_memory=%08lx low_base=%08lx\n", end_of_phys_memory, low_base));
1803 /* Ok, now map 'em. */
1804 MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base,KERNBASE));
1805 srmmu_allocate_ptable_skeleton(low_base, KERNBASE);
1806 vaddr = low_base;
1807 map_spbank_last_pa = 0xff000000;
1808 MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr));
1809 for(;;) {
1810 unsigned long bank_size;
1812 entry = find_free_spbank(&etaken[0]);
1813 bank_size = sp_banks[entry].num_bytes;
1814 MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
1815 if(!bank_size)
1816 break;
1817 if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) {
1818 if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK))
1819 vaddr -= SRMMU_PGDIR_SIZE;
1820 vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK));
1822 if((vaddr + bank_size) > KERNBASE)
1823 memprobe_error("Wheee, kernel low mapping overflow.\n");
1824 MKTRACE(("map_spbank(%08lx, %d) ", vaddr, entry));
1825 vaddr = map_spbank(vaddr, entry);
1826 etaken[entry] = 1;
1827 MKTRACE(("Now, vaddr=%08lx end_of_phys_memory=%08lx\n", vaddr, end_of_phys_memory));
1829 MKTRACE(("\n"));
1831 check_and_return:
1832 /* Step 5: Sanity check, make sure we did it all. */
1833 MKTRACE(("check_and_return: "));
1834 for(entry = 0; sp_banks[entry].num_bytes; entry++) {
1835 MKTRACE(("e[%d]=%d ", entry, etaken[entry]));
1836 if(!etaken[entry]) {
1837 MKTRACE(("oops\n"));
1838 memprobe_error("Some bank did not get mapped.\n");
1841 MKTRACE(("success\n"));
1842 init_task.mm->mmap->vm_start = page_offset = low_base;
1843 stack_top = page_offset - PAGE_SIZE;
1844 BTFIXUPSET_SETHI(page_offset, low_base);
1845 BTFIXUPSET_SETHI(stack_top, page_offset - PAGE_SIZE);
1846 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, page_offset / SRMMU_PGDIR_SIZE);
1848 #if 1
1849 for(entry = 0; srmmu_map[entry].size; entry++) {
1850 printk("[%d]: v[%08lx,%08lx](%lx) p[%08lx]\n", entry,
1851 srmmu_map[entry].vbase,
1852 srmmu_map[entry].vbase + srmmu_map[entry].size,
1853 srmmu_map[entry].size,
1854 srmmu_map[entry].pbase);
1856 #endif
1858 /* Now setup the p2v/v2p hash tables. */
1859 for(entry = 0; entry < SRMMU_HASHSZ; entry++)
1860 srmmu_v2p_hash[entry] = ((0xff - entry) << 24);
1861 for(entry = 0; entry < SRMMU_HASHSZ; entry++)
1862 srmmu_p2v_hash[entry] = 0xffffffffUL;
1863 for(entry = 0; srmmu_map[entry].size; entry++) {
1864 unsigned long addr;
1866 for(addr = srmmu_map[entry].vbase;
1867 addr < (srmmu_map[entry].vbase + srmmu_map[entry].size);
1868 addr += (1 << 24))
1869 srmmu_v2p_hash[srmmu_ahashfn(addr)] =
1870 srmmu_map[entry].pbase - srmmu_map[entry].vbase;
1871 for(addr = srmmu_map[entry].pbase;
1872 addr < (srmmu_map[entry].pbase + srmmu_map[entry].size);
1873 addr += (1 << 24))
1874 srmmu_p2v_hash[srmmu_ahashfn(addr)] =
1875 srmmu_map[entry].pbase - srmmu_map[entry].vbase;
1878 BTFIXUPSET_SETHI(page_contig_offset, page_offset - (0xfd000000 - KERNBASE));
1879 if (srmmu_low_pa)
1880 phys_mem_contig = 0;
1881 else {
1882 phys_mem_contig = 1;
1883 for(entry = 0; srmmu_map[entry].size; entry++)
1884 if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) {
1885 phys_mem_contig = 0;
1886 break;
1889 if (phys_mem_contig) {
1890 printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes.\n");
1891 BTFIXUPSET_CALL(pte_page, srmmu_c_pte_page, BTFIXUPCALL_NORM);
1892 BTFIXUPSET_CALL(pmd_page, srmmu_c_pmd_page, BTFIXUPCALL_NORM);
1893 BTFIXUPSET_CALL(pgd_page, srmmu_c_pgd_page, BTFIXUPCALL_NORM);
1894 BTFIXUPSET_CALL(mk_pte, srmmu_c_mk_pte, BTFIXUPCALL_NORM);
1895 BTFIXUPSET_CALL(pte_offset, srmmu_c_pte_offset, BTFIXUPCALL_NORM);
1896 BTFIXUPSET_CALL(pmd_offset, srmmu_c_pmd_offset, BTFIXUPCALL_NORM);
1897 if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set)
1898 BTFIXUPSET_CALL(ctxd_set, srmmu_c_ctxd_set, BTFIXUPCALL_NORM);
1899 BTFIXUPSET_CALL(pgd_set, srmmu_c_pgd_set, BTFIXUPCALL_NORM);
1900 BTFIXUPSET_CALL(pmd_set, srmmu_c_pmd_set, BTFIXUPCALL_NORM);
1901 BTFIXUPSET_CALL(mmu_v2p, srmmu_c_v2p, BTFIXUPCALL_NORM);
1902 BTFIXUPSET_CALL(mmu_p2v, srmmu_c_p2v, BTFIXUPCALL_NORM);
1903 if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk)
1904 BTFIXUPSET_CALL(flush_chunk, viking_c_flush_chunk, BTFIXUPCALL_NORM);
1905 } else if (srmmu_low_pa) {
1906 printk ("SRMMU: Compact physical memory. Using strightforward VA<->PA translations.\n");
1907 BTFIXUPSET_CALL(pte_page, srmmu_s_pte_page, BTFIXUPCALL_NORM);
1908 BTFIXUPSET_CALL(pmd_page, srmmu_s_pmd_page, BTFIXUPCALL_NORM);
1909 BTFIXUPSET_CALL(pgd_page, srmmu_s_pgd_page, BTFIXUPCALL_NORM);
1910 BTFIXUPSET_CALL(mk_pte, srmmu_s_mk_pte, BTFIXUPCALL_NORM);
1911 BTFIXUPSET_CALL(pte_offset, srmmu_s_pte_offset, BTFIXUPCALL_NORM);
1912 BTFIXUPSET_CALL(pmd_offset, srmmu_s_pmd_offset, BTFIXUPCALL_NORM);
1913 if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set)
1914 BTFIXUPSET_CALL(ctxd_set, srmmu_s_ctxd_set, BTFIXUPCALL_NORM);
1915 BTFIXUPSET_CALL(pgd_set, srmmu_s_pgd_set, BTFIXUPCALL_NORM);
1916 BTFIXUPSET_CALL(pmd_set, srmmu_s_pmd_set, BTFIXUPCALL_NORM);
1917 BTFIXUPSET_CALL(mmu_v2p, srmmu_s_v2p, BTFIXUPCALL_NORM);
1918 BTFIXUPSET_CALL(mmu_p2v, srmmu_s_p2v, BTFIXUPCALL_NORM);
1919 if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk)
1920 BTFIXUPSET_CALL(flush_chunk, viking_s_flush_chunk, BTFIXUPCALL_NORM);
1922 btfixup();
1924 return; /* SUCCESS! */
1927 /* Paging initialization on the Sparc Reference MMU. */
1928 extern unsigned long free_area_init(unsigned long, unsigned long);
1929 extern unsigned long sparc_context_init(unsigned long, int);
1931 extern int physmem_mapped_contig;
1932 extern int linux_num_cpus;
1934 void (*poke_srmmu)(void) __initdata = NULL;
1936 __initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem))
1938 unsigned long ptables_start;
1939 int i, cpunode;
1940 char node_str[128];
1942 sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
1943 physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
1945 if (sparc_cpu_model == sun4d)
1946 num_contexts = 65536; /* We know it is Viking */
1947 else {
1948 /* Find the number of contexts on the srmmu. */
1949 cpunode = prom_getchild(prom_root_node);
1950 num_contexts = 0;
1951 while((cpunode = prom_getsibling(cpunode)) != 0) {
1952 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1953 if(!strcmp(node_str, "cpu")) {
1954 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1955 break;
1960 if(!num_contexts) {
1961 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1962 prom_halt();
1965 ptables_start = mempool = PAGE_ALIGN(start_mem);
1966 memset(swapper_pg_dir, 0, PAGE_SIZE);
1967 kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1968 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1969 kbpage -= PAGE_SIZE;
1971 srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
1972 #if CONFIG_SUN_IO
1973 srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr, IOBASE_END);
1974 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1975 #endif
1977 mempool = PAGE_ALIGN(mempool);
1978 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1979 map_kernel();
1980 srmmu_context_table = sparc_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1981 srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
1982 for(i = 0; i < num_contexts; i++)
1983 ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1985 start_mem = PAGE_ALIGN(mempool);
1987 flush_cache_all();
1988 if(BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page) {
1989 unsigned long start = ptables_start;
1990 unsigned long end = start_mem;
1992 while(start < end) {
1993 viking_flush_page(start);
1994 start += PAGE_SIZE;
1997 srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
1998 flush_tlb_all();
1999 poke_srmmu();
2001 start_mem = sparc_context_init(start_mem, num_contexts);
2002 start_mem = free_area_init(start_mem, end_mem);
2004 return PAGE_ALIGN(start_mem);
2007 static int srmmu_mmu_info(char *buf)
2009 return sprintf(buf,
2010 "MMU type\t: %s\n"
2011 "invall\t\t: %d\n"
2012 "invmm\t\t: %d\n"
2013 "invrnge\t\t: %d\n"
2014 "invpg\t\t: %d\n"
2015 "contexts\t: %d\n"
2016 , srmmu_name,
2017 module_stats.invall,
2018 module_stats.invmm,
2019 module_stats.invrnge,
2020 module_stats.invpg,
2021 num_contexts
2025 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
2029 static void srmmu_destroy_context(struct mm_struct *mm)
2031 if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
2032 /* XXX This could be drastically improved.
2033 * XXX We are only called from __exit_mm and it just did
2034 * XXX cache/tlb mm flush and right after this will (re-)
2035 * XXX SET_PAGE_DIR to swapper_pg_dir. -DaveM
2037 flush_cache_mm(mm);
2038 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
2039 flush_tlb_mm(mm);
2040 free_context(mm->context);
2041 mm->context = NO_CONTEXT;
2045 static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
2046 unsigned long address, pte_t pte)
2048 if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) {
2049 struct vm_area_struct *vmaring;
2050 struct file *file;
2051 struct inode *inode;
2052 unsigned long flags, offset, vaddr, start;
2053 int alias_found = 0;
2054 pgd_t *pgdp;
2055 pmd_t *pmdp;
2056 pte_t *ptep;
2058 __save_and_cli(flags);
2060 file = vma->vm_file;
2061 if (!file)
2062 goto done;
2063 inode = file->f_dentry->d_inode;
2064 offset = (address & PAGE_MASK) - vma->vm_start;
2065 vmaring = inode->i_mmap;
2066 do {
2067 /* Do not mistake ourselves as another mapping. */
2068 if(vmaring == vma)
2069 continue;
2071 vaddr = vmaring->vm_start + offset;
2072 if ((vaddr ^ address) & vac_badbits) {
2073 alias_found++;
2074 start = vmaring->vm_start;
2075 while (start < vmaring->vm_end) {
2076 pgdp = srmmu_pgd_offset(vmaring->vm_mm, start);
2077 if(!pgdp) goto next;
2078 pmdp = srmmu_pmd_offset(pgdp, start);
2079 if(!pmdp) goto next;
2080 ptep = srmmu_pte_offset(pmdp, start);
2081 if(!ptep) goto next;
2083 if((pte_val(*ptep) & SRMMU_ET_MASK) == SRMMU_VALID) {
2084 #if 0
2085 printk("Fixing USER/USER alias [%ld:%08lx]\n",
2086 vmaring->vm_mm->context, start);
2087 #endif
2088 flush_cache_page(vmaring, start);
2089 set_pte(ptep, __pte((pte_val(*ptep) &
2090 ~SRMMU_CACHE)));
2091 flush_tlb_page(vmaring, start);
2093 next:
2094 start += PAGE_SIZE;
2097 } while ((vmaring = vmaring->vm_next_share) != NULL);
2099 if(alias_found && ((pte_val(pte) & SRMMU_CACHE) != 0)) {
2100 pgdp = srmmu_pgd_offset(vma->vm_mm, address);
2101 pmdp = srmmu_pmd_offset(pgdp, address);
2102 ptep = srmmu_pte_offset(pmdp, address);
2103 flush_cache_page(vma, address);
2104 set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
2105 flush_tlb_page(vma, address);
2107 done:
2108 __restore_flags(flags);
2112 static void hypersparc_destroy_context(struct mm_struct *mm)
2114 if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
2115 ctxd_t *ctxp;
2117 /* HyperSparc is copy-back, any data for this
2118 * process in a modified cache line is stale
2119 * and must be written back to main memory now
2120 * else we eat shit later big time.
2122 flush_cache_mm(mm);
2124 ctxp = &srmmu_context_table[mm->context];
2125 srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) swapper_pg_dir) >> 4))));
2126 hypersparc_flush_page_to_ram((unsigned long)ctxp);
2128 flush_tlb_mm(mm);
2129 free_context(mm->context);
2130 mm->context = NO_CONTEXT;
2134 /* Init various srmmu chip types. */
2135 __initfunc(static void srmmu_is_bad(void))
2137 prom_printf("Could not determine SRMMU chip type.\n");
2138 prom_halt();
2141 __initfunc(static void init_vac_layout(void))
2143 int nd, cache_lines;
2144 char node_str[128];
2145 #ifdef __SMP__
2146 int cpu = 0;
2147 unsigned long max_size = 0;
2148 unsigned long min_line_size = 0x10000000;
2149 #endif
2151 nd = prom_getchild(prom_root_node);
2152 while((nd = prom_getsibling(nd)) != 0) {
2153 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
2154 if(!strcmp(node_str, "cpu")) {
2155 vac_line_size = prom_getint(nd, "cache-line-size");
2156 if (vac_line_size == -1) {
2157 prom_printf("can't determine cache-line-size, "
2158 "halting.\n");
2159 prom_halt();
2161 cache_lines = prom_getint(nd, "cache-nlines");
2162 if (cache_lines == -1) {
2163 prom_printf("can't determine cache-nlines, halting.\n");
2164 prom_halt();
2167 vac_cache_size = cache_lines * vac_line_size;
2168 vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
2169 #ifdef __SMP__
2170 if(vac_cache_size > max_size)
2171 max_size = vac_cache_size;
2172 if(vac_line_size < min_line_size)
2173 min_line_size = vac_line_size;
2174 cpu++;
2175 if(cpu == smp_num_cpus)
2176 break;
2177 #else
2178 break;
2179 #endif
2182 if(nd == 0) {
2183 prom_printf("No CPU nodes found, halting.\n");
2184 prom_halt();
2186 #ifdef __SMP__
2187 vac_cache_size = max_size;
2188 vac_line_size = min_line_size;
2189 vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
2190 #endif
2191 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
2192 (int)vac_cache_size, (int)vac_line_size);
2195 __initfunc(static void poke_hypersparc(void))
2197 volatile unsigned long clear;
2198 unsigned long mreg = srmmu_get_mmureg();
2200 hyper_flush_unconditional_combined();
2202 mreg &= ~(HYPERSPARC_CWENABLE);
2203 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
2204 mreg |= (HYPERSPARC_CMODE);
2206 srmmu_set_mmureg(mreg);
2208 #if 0 /* I think this is bad news... -DaveM */
2209 hyper_clear_all_tags();
2210 #endif
2212 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
2213 hyper_flush_whole_icache();
2214 clear = srmmu_get_faddr();
2215 clear = srmmu_get_fstatus();
2218 __initfunc(static void init_hypersparc(void))
2220 srmmu_name = "ROSS HyperSparc";
2222 init_vac_layout();
2224 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_hyper, BTFIXUPCALL_NORM);
2225 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
2226 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
2227 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
2228 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
2229 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
2230 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
2231 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
2233 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
2234 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
2235 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
2236 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
2238 BTFIXUPSET_CALL(flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
2239 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
2240 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
2242 BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
2244 BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM);
2245 BTFIXUPSET_CALL(switch_to_context, hypersparc_switch_to_context, BTFIXUPCALL_NORM);
2246 BTFIXUPSET_CALL(init_new_context, hypersparc_init_new_context, BTFIXUPCALL_NORM);
2247 BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM);
2248 BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
2249 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, hypersparc_update_rootmmu_dir, BTFIXUPCALL_NORM);
2250 poke_srmmu = poke_hypersparc;
2252 hypersparc_setup_blockops();
2255 __initfunc(static void poke_cypress(void))
2257 unsigned long mreg = srmmu_get_mmureg();
2258 unsigned long faddr, tagval;
2259 volatile unsigned long cypress_sucks;
2260 volatile unsigned long clear;
2262 clear = srmmu_get_faddr();
2263 clear = srmmu_get_fstatus();
2265 if (!(mreg & CYPRESS_CENABLE)) {
2266 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
2267 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
2268 "sta %%g0, [%0] %2\n\t" : :
2269 "r" (faddr), "r" (0x40000),
2270 "i" (ASI_M_DATAC_TAG));
2272 } else {
2273 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
2274 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
2275 "=r" (tagval) :
2276 "r" (faddr), "r" (0x40000),
2277 "i" (ASI_M_DATAC_TAG));
2279 /* If modified and valid, kick it. */
2280 if((tagval & 0x60) == 0x60)
2281 cypress_sucks = *(unsigned long *)
2282 (0xf0020000 + faddr);
2286 /* And one more, for our good neighbor, Mr. Broken Cypress. */
2287 clear = srmmu_get_faddr();
2288 clear = srmmu_get_fstatus();
2290 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
2291 srmmu_set_mmureg(mreg);
2294 __initfunc(static void init_cypress_common(void))
2296 init_vac_layout();
2298 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_cypress, BTFIXUPCALL_NORM);
2299 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
2300 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
2301 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
2302 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
2303 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
2304 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
2305 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
2307 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
2308 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
2309 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
2310 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
2312 BTFIXUPSET_CALL(flush_chunk, cypress_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
2314 BTFIXUPSET_CALL(flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
2315 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
2316 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
2317 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, cypress_update_rootmmu_dir, BTFIXUPCALL_NORM);
2319 BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
2320 poke_srmmu = poke_cypress;
2323 __initfunc(static void init_cypress_604(void))
2325 srmmu_name = "ROSS Cypress-604(UP)";
2326 srmmu_modtype = Cypress;
2327 init_cypress_common();
2330 __initfunc(static void init_cypress_605(unsigned long mrev))
2332 srmmu_name = "ROSS Cypress-605(MP)";
2333 if(mrev == 0xe) {
2334 srmmu_modtype = Cypress_vE;
2335 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
2336 } else {
2337 if(mrev == 0xd) {
2338 srmmu_modtype = Cypress_vD;
2339 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
2340 } else {
2341 srmmu_modtype = Cypress;
2344 init_cypress_common();
2347 __initfunc(static void poke_swift(void))
2349 unsigned long mreg = srmmu_get_mmureg();
2351 /* Clear any crap from the cache or else... */
2352 swift_idflash_clear();
2353 mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
2355 /* The Swift branch folding logic is completely broken. At
2356 * trap time, if things are just right, if can mistakenly
2357 * think that a trap is coming from kernel mode when in fact
2358 * it is coming from user mode (it mis-executes the branch in
2359 * the trap code). So you see things like crashme completely
2360 * hosing your machine which is completely unacceptable. Turn
2361 * this shit off... nice job Fujitsu.
2363 mreg &= ~(SWIFT_BF);
2364 srmmu_set_mmureg(mreg);
2367 #define SWIFT_MASKID_ADDR 0x10003018
2368 __initfunc(static void init_swift(void))
2370 unsigned long swift_rev;
2372 __asm__ __volatile__("lda [%1] %2, %0\n\t"
2373 "srl %0, 0x18, %0\n\t" :
2374 "=r" (swift_rev) :
2375 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
2376 srmmu_name = "Fujitsu Swift";
2377 switch(swift_rev) {
2378 case 0x11:
2379 case 0x20:
2380 case 0x23:
2381 case 0x30:
2382 srmmu_modtype = Swift_lots_o_bugs;
2383 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
2384 /* Gee george, I wonder why Sun is so hush hush about
2385 * this hardware bug... really braindamage stuff going
2386 * on here. However I think we can find a way to avoid
2387 * all of the workaround overhead under Linux. Basically,
2388 * any page fault can cause kernel pages to become user
2389 * accessible (the mmu gets confused and clears some of
2390 * the ACC bits in kernel ptes). Aha, sounds pretty
2391 * horrible eh? But wait, after extensive testing it appears
2392 * that if you use pgd_t level large kernel pte's (like the
2393 * 4MB pages on the Pentium) the bug does not get tripped
2394 * at all. This avoids almost all of the major overhead.
2395 * Welcome to a world where your vendor tells you to,
2396 * "apply this kernel patch" instead of "sorry for the
2397 * broken hardware, send it back and we'll give you
2398 * properly functioning parts"
2400 break;
2401 case 0x25:
2402 case 0x31:
2403 srmmu_modtype = Swift_bad_c;
2404 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
2405 /* You see Sun allude to this hardware bug but never
2406 * admit things directly, they'll say things like,
2407 * "the Swift chip cache problems" or similar.
2409 break;
2410 default:
2411 srmmu_modtype = Swift_ok;
2412 break;
2415 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
2416 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
2417 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
2418 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
2420 BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
2422 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
2423 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
2424 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
2425 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
2427 BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP);
2428 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
2429 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
2431 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
2433 /* Are you now convinced that the Swift is one of the
2434 * biggest VLSI abortions of all time? Bravo Fujitsu!
2435 * Fujitsu, the !#?!%$'d up processor people. I bet if
2436 * you examined the microcode of the Swift you'd find
2437 * XXX's all over the place.
2439 poke_srmmu = poke_swift;
2442 static void turbosparc_flush_cache_all(void)
2444 flush_user_windows();
2445 turbosparc_idflash_clear();
2448 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
2450 FLUSH_BEGIN(mm)
2451 flush_user_windows();
2452 turbosparc_idflash_clear();
2453 FLUSH_END
2456 static void turbosparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
2458 FLUSH_BEGIN(mm)
2459 flush_user_windows();
2460 turbosparc_idflash_clear();
2461 FLUSH_END
2464 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
2466 FLUSH_BEGIN(vma->vm_mm)
2467 flush_user_windows();
2468 if (vma->vm_flags & VM_EXEC)
2469 turbosparc_flush_icache();
2470 turbosparc_flush_dcache();
2471 FLUSH_END
2474 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
2475 static void turbosparc_flush_page_to_ram(unsigned long page)
2477 #ifdef TURBOSPARC_WRITEBACK
2478 volatile unsigned long clear;
2480 if (srmmu_hwprobe(page))
2481 turbosparc_flush_page_cache(page);
2482 clear = srmmu_get_fstatus();
2483 #endif
2486 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
2490 static void turbosparc_flush_page_for_dma(unsigned long page)
2492 turbosparc_flush_dcache();
2495 static void turbosparc_flush_chunk(unsigned long chunk)
2499 static void turbosparc_flush_tlb_all(void)
2501 srmmu_flush_whole_tlb();
2502 module_stats.invall++;
2505 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
2507 FLUSH_BEGIN(mm)
2508 srmmu_flush_whole_tlb();
2509 module_stats.invmm++;
2510 FLUSH_END
2513 static void turbosparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
2515 FLUSH_BEGIN(mm)
2516 srmmu_flush_whole_tlb();
2517 module_stats.invrnge++;
2518 FLUSH_END
2521 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2523 FLUSH_BEGIN(vma->vm_mm)
2524 srmmu_flush_whole_tlb();
2525 module_stats.invpg++;
2526 FLUSH_END
2530 __initfunc(static void poke_turbosparc(void))
2532 unsigned long mreg = srmmu_get_mmureg();
2533 unsigned long ccreg;
2535 /* Clear any crap from the cache or else... */
2536 turbosparc_flush_cache_all();
2537 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
2538 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
2539 srmmu_set_mmureg(mreg);
2541 ccreg = turbosparc_get_ccreg();
2543 #ifdef TURBOSPARC_WRITEBACK
2544 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
2545 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
2546 /* Write-back D-cache, emulate VLSI
2547 * abortion number three, not number one */
2548 #else
2549 /* For now let's play safe, optimize later */
2550 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
2551 /* Do DVMA snooping in Dcache, Write-thru D-cache */
2552 ccreg &= ~(TURBOSPARC_uS2);
2553 /* Emulate VLSI abortion number three, not number one */
2554 #endif
2556 switch (ccreg & 7) {
2557 case 0: /* No SE cache */
2558 case 7: /* Test mode */
2559 break;
2560 default:
2561 ccreg |= (TURBOSPARC_SCENABLE);
2563 turbosparc_set_ccreg (ccreg);
2565 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
2566 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
2567 srmmu_set_mmureg(mreg);
2570 __initfunc(static void init_turbosparc(void))
2572 srmmu_name = "Fujitsu TurboSparc";
2573 srmmu_modtype = TurboSparc;
2575 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
2576 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
2577 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
2578 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
2580 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
2581 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
2582 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
2583 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
2585 BTFIXUPSET_CALL(flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
2586 BTFIXUPSET_CALL(flush_chunk, turbosparc_flush_chunk, BTFIXUPCALL_NORM);
2588 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
2589 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NOP);
2591 poke_srmmu = poke_turbosparc;
2594 __initfunc(static void poke_tsunami(void))
2596 unsigned long mreg = srmmu_get_mmureg();
2598 tsunami_flush_icache();
2599 tsunami_flush_dcache();
2600 mreg &= ~TSUNAMI_ITD;
2601 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
2602 srmmu_set_mmureg(mreg);
2605 __initfunc(static void init_tsunami(void))
2607 /* Tsunami's pretty sane, Sun and TI actually got it
2608 * somewhat right this time. Fujitsu should have
2609 * taken some lessons from them.
2612 srmmu_name = "TI Tsunami";
2613 srmmu_modtype = Tsunami;
2615 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
2616 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
2617 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
2618 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
2620 BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
2622 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
2623 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
2624 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
2625 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
2627 BTFIXUPSET_CALL(flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
2628 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
2629 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
2631 poke_srmmu = poke_tsunami;
2634 __initfunc(static void poke_viking(void))
2636 unsigned long mreg = srmmu_get_mmureg();
2637 static int smp_catch = 0;
2639 if(viking_mxcc_present) {
2640 unsigned long mxcc_control = mxcc_get_creg();
2642 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
2643 mxcc_control &= ~(MXCC_CTL_RRC);
2644 mxcc_set_creg(mxcc_control);
2646 /* We don't need memory parity checks.
2647 * XXX This is a mess, have to dig out later. ecd.
2648 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
2651 /* We do cache ptables on MXCC. */
2652 mreg |= VIKING_TCENABLE;
2653 } else {
2654 unsigned long bpreg;
2656 mreg &= ~(VIKING_TCENABLE);
2657 if(smp_catch++) {
2658 /* Must disable mixed-cmd mode here for
2659 * other cpu's.
2661 bpreg = viking_get_bpreg();
2662 bpreg &= ~(VIKING_ACTION_MIX);
2663 viking_set_bpreg(bpreg);
2665 /* Just in case PROM does something funny. */
2666 msi_set_sync();
2670 mreg |= VIKING_SPENABLE;
2671 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
2672 mreg |= VIKING_SBENABLE;
2673 mreg &= ~(VIKING_ACENABLE);
2674 srmmu_set_mmureg(mreg);
2676 #ifdef __SMP__
2677 /* Avoid unnecessary cross calls. */
2678 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
2679 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
2680 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
2681 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
2682 BTFIXUPCOPY_CALL(flush_page_to_ram, local_flush_page_to_ram);
2683 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
2684 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
2685 btfixup();
2686 #endif
2689 __initfunc(static void init_viking(void))
2691 unsigned long mreg = srmmu_get_mmureg();
2693 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
2694 if(mreg & VIKING_MMODE) {
2695 srmmu_name = "TI Viking";
2696 viking_mxcc_present = 0;
2697 msi_set_sync();
2699 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_viking, BTFIXUPCALL_NORM);
2700 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
2701 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
2702 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
2703 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, viking_update_rootmmu_dir, BTFIXUPCALL_NORM);
2705 BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
2707 /* We need this to make sure old viking takes no hits
2708 * on it's cache for dma snoops to workaround the
2709 * "load from non-cacheable memory" interrupt bug.
2710 * This is only necessary because of the new way in
2711 * which we use the IOMMU.
2713 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
2714 /* Also, this is so far the only chip which actually uses
2715 the page argument to flush_page_for_dma */
2716 flush_page_for_dma_global = 0;
2717 } else {
2718 srmmu_name = "TI Viking/MXCC";
2719 viking_mxcc_present = 1;
2721 BTFIXUPSET_CALL(flush_chunk, viking_mxcc_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
2723 /* MXCC vikings lack the DMA snooping bug. */
2724 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
2727 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
2728 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
2729 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
2730 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
2732 #ifdef __SMP__
2733 if (sparc_cpu_model == sun4d) {
2734 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
2735 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
2736 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
2737 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
2738 } else
2739 #endif
2741 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
2742 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
2743 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
2744 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
2747 BTFIXUPSET_CALL(flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
2748 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
2750 poke_srmmu = poke_viking;
2753 /* Probe for the srmmu chip version. */
2754 __initfunc(static void get_srmmu_type(void))
2756 unsigned long mreg, psr;
2757 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
2759 srmmu_modtype = SRMMU_INVAL_MOD;
2760 hwbug_bitmask = 0;
2762 mreg = srmmu_get_mmureg(); psr = get_psr();
2763 mod_typ = (mreg & 0xf0000000) >> 28;
2764 mod_rev = (mreg & 0x0f000000) >> 24;
2765 psr_typ = (psr >> 28) & 0xf;
2766 psr_vers = (psr >> 24) & 0xf;
2768 /* First, check for HyperSparc or Cypress. */
2769 if(mod_typ == 1) {
2770 switch(mod_rev) {
2771 case 7:
2772 /* UP or MP Hypersparc */
2773 init_hypersparc();
2774 break;
2775 case 0:
2776 case 2:
2777 /* Uniprocessor Cypress */
2778 init_cypress_604();
2779 break;
2780 case 10:
2781 case 11:
2782 case 12:
2783 /* _REALLY OLD_ Cypress MP chips... */
2784 case 13:
2785 case 14:
2786 case 15:
2787 /* MP Cypress mmu/cache-controller */
2788 init_cypress_605(mod_rev);
2789 break;
2790 default:
2791 /* Some other Cypress revision, assume a 605. */
2792 init_cypress_605(mod_rev);
2793 break;
2795 return;
2798 /* Now Fujitsu TurboSparc. It might happen that it is
2799 in Swift emulation mode, so we will check later... */
2800 if (psr_typ == 0 && psr_vers == 5) {
2801 init_turbosparc();
2802 return;
2805 /* Next check for Fujitsu Swift. */
2806 if(psr_typ == 0 && psr_vers == 4) {
2807 int cpunode;
2808 char node_str[128];
2810 /* Look if it is not a TurboSparc emulating Swift... */
2811 cpunode = prom_getchild(prom_root_node);
2812 while((cpunode = prom_getsibling(cpunode)) != 0) {
2813 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
2814 if(!strcmp(node_str, "cpu")) {
2815 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
2816 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
2817 init_turbosparc();
2818 return;
2820 break;
2824 init_swift();
2825 return;
2828 /* Now the Viking family of srmmu. */
2829 if(psr_typ == 4 &&
2830 ((psr_vers == 0) ||
2831 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2832 init_viking();
2833 return;
2836 /* Finally the Tsunami. */
2837 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2838 init_tsunami();
2839 return;
2842 /* Oh well */
2843 srmmu_is_bad();
2846 static int srmmu_check_pgt_cache(int low, int high)
2848 struct page *page, *page2;
2849 int freed = 0;
2851 if (pgtable_cache_size > high) {
2852 spin_lock(&pte_spinlock);
2853 for (page2 = NULL, page = (struct page *)pte_quicklist; page;) {
2854 if ((unsigned int)page->pprev_hash == 0xffff) {
2855 if (page2)
2856 page2->next_hash = page->next_hash;
2857 else
2858 (struct page *)pte_quicklist = page->next_hash;
2859 page->next_hash = NULL;
2860 page->pprev_hash = NULL;
2861 pgtable_cache_size -= 16;
2862 __free_page(page);
2863 freed++;
2864 if (page2)
2865 page = page2->next_hash;
2866 else
2867 page = (struct page *)pte_quicklist;
2868 if (pgtable_cache_size <= low)
2869 break;
2870 continue;
2872 page2 = page;
2873 page = page->next_hash;
2875 spin_unlock(&pte_spinlock);
2877 if (pgd_cache_size > high / 4) {
2878 spin_lock(&pgd_spinlock);
2879 for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
2880 if ((unsigned int)page->pprev_hash == 0xf) {
2881 if (page2)
2882 page2->next_hash = page->next_hash;
2883 else
2884 (struct page *)pgd_quicklist = page->next_hash;
2885 page->next_hash = NULL;
2886 page->pprev_hash = NULL;
2887 pgd_cache_size -= 4;
2888 __free_page(page);
2889 freed++;
2890 if (page2)
2891 page = page2->next_hash;
2892 else
2893 page = (struct page *)pgd_quicklist;
2894 if (pgd_cache_size <= low / 4)
2895 break;
2896 continue;
2898 page2 = page;
2899 page = page->next_hash;
2901 spin_unlock(&pgd_spinlock);
2903 return freed;
2906 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2907 tsetup_mmu_patchme, rtrap_mmu_patchme;
2909 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2910 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2912 extern unsigned long srmmu_fault;
2914 #define PATCH_BRANCH(insn, dest) do { \
2915 iaddr = &(insn); \
2916 daddr = &(dest); \
2917 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2918 } while(0);
2920 __initfunc(static void patch_window_trap_handlers(void))
2922 unsigned long *iaddr, *daddr;
2924 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2925 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2926 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2927 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2928 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2929 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2930 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2933 #ifdef __SMP__
2934 /* Local cross-calls. */
2935 static void smp_flush_page_for_dma(unsigned long page)
2937 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
2940 #endif
2942 /* Load up routines and constants for sun4m and sun4d mmu */
2943 __initfunc(void ld_mmu_srmmu(void))
2945 extern void ld_mmu_iommu(void);
2946 extern void ld_mmu_iounit(void);
2947 extern void ___xchg32_sun4md(void);
2949 /* First the constants */
2950 BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT);
2951 BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE);
2952 BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK);
2953 BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
2954 BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
2955 BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
2957 BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE);
2958 BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
2959 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
2961 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
2962 BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
2963 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2964 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2965 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
2966 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2968 /* Functions */
2969 #ifndef __SMP__
2970 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2971 #endif
2972 BTFIXUPSET_CALL(get_pte_fast, srmmu_get_pte_fast, BTFIXUPCALL_RETINT(0));
2973 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
2974 BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP);
2975 BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP);
2976 BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM);
2978 BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM);
2980 BTFIXUPSET_CALL(set_pte, srmmu_set_pte_cacheable, BTFIXUPCALL_SWAPO0O1);
2981 BTFIXUPSET_CALL(init_new_context, srmmu_init_new_context, BTFIXUPCALL_NORM);
2982 BTFIXUPSET_CALL(switch_to_context, srmmu_switch_to_context, BTFIXUPCALL_NORM);
2984 BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM);
2985 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
2986 BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
2988 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, srmmu_update_rootmmu_dir, BTFIXUPCALL_NORM);
2990 BTFIXUPSET_SETHI(none_mask, 0xF0000000);
2992 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
2993 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
2995 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
2996 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
2997 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
2999 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
3000 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
3001 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
3002 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
3004 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
3005 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
3006 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
3007 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
3009 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
3010 BTFIXUPSET_CALL(pgd_offset, srmmu_pgd_offset, BTFIXUPCALL_NORM);
3011 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
3012 BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM);
3013 BTFIXUPSET_CALL(pte_free_kernel, srmmu_pte_free, BTFIXUPCALL_NORM);
3014 BTFIXUPSET_CALL(pmd_free_kernel, srmmu_pmd_free, BTFIXUPCALL_NORM);
3015 BTFIXUPSET_CALL(pte_alloc_kernel, srmmu_pte_alloc, BTFIXUPCALL_NORM);
3016 BTFIXUPSET_CALL(pmd_alloc_kernel, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
3017 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
3018 BTFIXUPSET_CALL(pte_alloc, srmmu_pte_alloc, BTFIXUPCALL_NORM);
3019 BTFIXUPSET_CALL(pmd_free, srmmu_pmd_free, BTFIXUPCALL_NORM);
3020 BTFIXUPSET_CALL(pmd_alloc, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
3021 BTFIXUPSET_CALL(pgd_free, srmmu_pgd_free, BTFIXUPCALL_NORM);
3022 BTFIXUPSET_CALL(pgd_alloc, srmmu_pgd_alloc, BTFIXUPCALL_NORM);
3024 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
3025 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
3026 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
3027 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
3028 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
3029 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
3030 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
3031 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
3032 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
3033 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
3034 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
3036 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
3037 BTFIXUPSET_CALL(mmu_v2p, srmmu_v2p, BTFIXUPCALL_NORM);
3038 BTFIXUPSET_CALL(mmu_p2v, srmmu_p2v, BTFIXUPCALL_NORM);
3040 /* Task struct and kernel stack allocating/freeing. */
3041 BTFIXUPSET_CALL(alloc_task_struct, srmmu_alloc_task_struct, BTFIXUPCALL_NORM);
3042 BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM);
3044 BTFIXUPSET_CALL(quick_kernel_fault, srmmu_quick_kernel_fault, BTFIXUPCALL_NORM);
3046 /* SRMMU specific. */
3047 BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM);
3048 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
3050 get_srmmu_type();
3051 patch_window_trap_handlers();
3053 #ifdef __SMP__
3054 /* El switcheroo... */
3056 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
3057 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
3058 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
3059 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
3060 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
3061 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
3062 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
3063 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
3064 BTFIXUPCOPY_CALL(local_flush_page_to_ram, flush_page_to_ram);
3065 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
3066 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
3068 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
3069 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
3070 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
3071 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
3072 if (sparc_cpu_model != sun4d) {
3073 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
3074 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
3075 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
3076 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
3078 BTFIXUPSET_CALL(flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
3079 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
3080 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
3081 #endif
3082 if (sparc_cpu_model == sun4d)
3083 ld_mmu_iounit();
3084 else
3085 ld_mmu_iommu();
3086 #ifdef __SMP__
3087 if (sparc_cpu_model == sun4d)
3088 sun4d_init_smp();
3089 else
3090 sun4m_init_smp();
3091 #endif