1 /* $Id: srmmu.c,v 1.185 1999/03/24 11:42:35 davem Exp $
2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/config.h>
11 #include <linux/kernel.h>
13 #include <linux/malloc.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/init.h>
19 #include <asm/pgtable.h>
21 #include <asm/kdebug.h>
22 #include <asm/vaddrs.h>
23 #include <asm/traps.h>
26 #include <asm/cache.h>
27 #include <asm/oplib.h>
31 #include <asm/a.out.h>
32 #include <asm/mmu_context.h>
33 #include <asm/io-unit.h>
34 #include <asm/spinlock.h>
36 /* Now the cpu specific definitions. */
37 #include <asm/viking.h>
40 #include <asm/tsunami.h>
41 #include <asm/swift.h>
42 #include <asm/turbosparc.h>
44 #include <asm/btfixup.h>
46 /* #define DEBUG_MAP_KERNEL */
47 /* #define PAGESKIP_DEBUG */
49 enum mbus_module srmmu_modtype
;
50 unsigned int hwbug_bitmask
;
55 extern unsigned long sparc_iobase_vaddr
;
58 #define FLUSH_BEGIN(mm)
61 #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
65 static int phys_mem_contig
;
66 BTFIXUPDEF_SETHI(page_contig_offset
)
68 BTFIXUPDEF_CALL(void, ctxd_set
, ctxd_t
*, pgd_t
*)
69 BTFIXUPDEF_CALL(void, pmd_set
, pmd_t
*, pte_t
*)
71 #define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp)
72 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
74 BTFIXUPDEF_CALL(void, flush_page_for_dma
, unsigned long)
75 BTFIXUPDEF_CALL(void, flush_chunk
, unsigned long)
77 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78 int flush_page_for_dma_global
= 1;
79 #define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk)
81 BTFIXUPDEF_CALL(void, local_flush_page_for_dma
, unsigned long)
83 #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
86 static struct srmmu_stats
{
95 ctxd_t
*srmmu_ctx_table_phys
;
96 ctxd_t
*srmmu_context_table
;
98 /* Don't change this without changing access to this
99 * in arch/sparc/mm/viking.S
101 static struct srmmu_trans
{
105 } srmmu_map
[SPARC_PHYS_BANKS
];
107 #define SRMMU_HASHSZ 256
109 /* Not static, viking.S uses it. */
110 unsigned long srmmu_v2p_hash
[SRMMU_HASHSZ
];
111 static unsigned long srmmu_p2v_hash
[SRMMU_HASHSZ
];
113 #define srmmu_ahashfn(addr) ((addr) >> 24)
115 int viking_mxcc_present
= 0;
117 /* Physical memory can be _very_ non-contiguous on the sun4m, especially
118 * the SS10/20 class machines and with the latest openprom revisions.
119 * So we have to do a quick lookup.
120 * We use the same for SS1000/SC2000 as a fall back, when phys memory is
123 static inline unsigned long srmmu_v2p(unsigned long vaddr
)
125 unsigned long off
= srmmu_v2p_hash
[srmmu_ahashfn(vaddr
)];
127 return (vaddr
+ off
);
130 static inline unsigned long srmmu_p2v(unsigned long paddr
)
132 unsigned long off
= srmmu_p2v_hash
[srmmu_ahashfn(paddr
)];
134 if (off
!= 0xffffffffUL
)
135 return (paddr
- off
);
140 /* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case
141 * as a special case to make things faster.
143 /* FIXME: gcc is stupid here and generates very very bad code in this
144 * heavily used routine. So we help it a bit. */
145 static inline unsigned long srmmu_c_v2p(unsigned long vaddr
)
147 #if KERNBASE != 0xf0000000
148 if (vaddr
>= KERNBASE
) return vaddr
- KERNBASE
;
149 return vaddr
- BTFIXUP_SETHI(page_contig_offset
);
151 register unsigned long kernbase
;
153 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase
));
154 return vaddr
- ((vaddr
>= kernbase
) ? kernbase
: BTFIXUP_SETHI(page_contig_offset
));
158 static inline unsigned long srmmu_c_p2v(unsigned long paddr
)
160 #if KERNBASE != 0xf0000000
161 if (paddr
< (0xfd000000 - KERNBASE
)) return paddr
+ KERNBASE
;
162 return (paddr
+ BTFIXUP_SETHI(page_contig_offset
));
164 register unsigned long kernbase
;
165 register unsigned long limit
;
167 __asm__ ("sethi %%hi(0x0d000000), %0" : "=r"(limit
));
168 __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase
));
170 return paddr
+ ((paddr
< limit
) ? kernbase
: BTFIXUP_SETHI(page_contig_offset
));
174 /* On boxes where there is no lots_of_ram, KERNBASE is mapped to PA<0> and highest
175 PA is below 0x0d000000, we can optimize even more :) */
176 static inline unsigned long srmmu_s_v2p(unsigned long vaddr
)
178 return vaddr
- PAGE_OFFSET
;
181 static inline unsigned long srmmu_s_p2v(unsigned long paddr
)
183 return paddr
+ PAGE_OFFSET
;
186 /* In general all page table modifications should use the V8 atomic
187 * swap instruction. This insures the mmu and the cpu are in sync
188 * with respect to ref/mod bits in the page tables.
190 static inline unsigned long srmmu_swap(unsigned long *addr
, unsigned long value
)
192 __asm__
__volatile__("swap [%2], %0" : "=&r" (value
) : "0" (value
), "r" (addr
));
196 /* Functions really use this, not srmmu_swap directly. */
197 #define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry))
199 #ifdef PAGESKIP_DEBUG
200 #define PGSKIP_DEBUG(from,to) prom_printf("PG_skip %ld->%ld\n", (long)(from), (long)(to)); printk("PG_skip %ld->%ld\n", (long)(from), (long)(to))
202 #define PGSKIP_DEBUG(from,to) do { } while (0)
205 __initfunc(void srmmu_frob_mem_map(unsigned long start_mem
))
207 unsigned long bank_start
, bank_end
= 0;
211 /* First, mark all pages as invalid. */
212 for(addr
= PAGE_OFFSET
; MAP_NR(addr
) < max_mapnr
; addr
+= PAGE_SIZE
)
213 mem_map
[MAP_NR(addr
)].flags
|= (1<<PG_reserved
);
215 /* Next, pg[0-3] is sun4c cruft, so we can free it... */
216 mem_map
[MAP_NR(pg0
)].flags
&= ~(1<<PG_reserved
);
217 mem_map
[MAP_NR(pg1
)].flags
&= ~(1<<PG_reserved
);
218 mem_map
[MAP_NR(pg2
)].flags
&= ~(1<<PG_reserved
);
219 mem_map
[MAP_NR(pg3
)].flags
&= ~(1<<PG_reserved
);
221 start_mem
= PAGE_ALIGN(start_mem
);
222 for(i
= 0; srmmu_map
[i
].size
; i
++) {
223 bank_start
= srmmu_map
[i
].vbase
;
225 /* Making a one or two pages PG_skip holes
226 * is not necessary. We add one more because
227 * we must set the PG_skip flag on the first
228 * two mem_map[] entries for the hole. Go and
229 * see the mm/filemap.c:shrink_mmap() loop for
232 if (i
&& bank_start
- bank_end
> 3 * PAGE_SIZE
) {
233 mem_map
[MAP_NR(bank_end
)].flags
|= (1<<PG_skip
);
234 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
+ MAP_NR(bank_start
);
235 mem_map
[MAP_NR(bank_end
)+1UL].flags
|= (1<<PG_skip
);
236 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
+ MAP_NR(bank_start
);
237 PGSKIP_DEBUG(MAP_NR(bank_end
), MAP_NR(bank_start
));
238 if (bank_end
> KERNBASE
&& bank_start
< KERNBASE
) {
239 mem_map
[0].flags
|= (1<<PG_skip
);
240 mem_map
[0].next_hash
= mem_map
+ MAP_NR(bank_start
);
241 mem_map
[1].flags
|= (1<<PG_skip
);
242 mem_map
[1].next_hash
= mem_map
+ MAP_NR(bank_start
);
243 PGSKIP_DEBUG(0, MAP_NR(bank_start
));
247 bank_end
= bank_start
+ srmmu_map
[i
].size
;
248 while(bank_start
< bank_end
) {
249 set_bit(MAP_NR(bank_start
) >> 8, sparc_valid_addr_bitmap
);
250 if((bank_start
>= KERNBASE
) &&
251 (bank_start
< start_mem
)) {
252 bank_start
+= PAGE_SIZE
;
255 mem_map
[MAP_NR(bank_start
)].flags
&= ~(1<<PG_reserved
);
256 bank_start
+= PAGE_SIZE
;
259 if (bank_end
== 0xfd000000)
260 bank_end
= PAGE_OFFSET
;
263 if (bank_end
< KERNBASE
) {
264 mem_map
[MAP_NR(bank_end
)].flags
|= (1<<PG_skip
);
265 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
+ MAP_NR(KERNBASE
);
266 mem_map
[MAP_NR(bank_end
)+1UL].flags
|= (1<<PG_skip
);
267 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
+ MAP_NR(KERNBASE
);
268 PGSKIP_DEBUG(MAP_NR(bank_end
), MAP_NR(KERNBASE
));
269 } else if (MAP_NR(bank_end
) < max_mapnr
) {
270 mem_map
[MAP_NR(bank_end
)].flags
|= (1<<PG_skip
);
271 mem_map
[MAP_NR(bank_end
)+1UL].flags
|= (1<<PG_skip
);
272 if (mem_map
[0].flags
& (1 << PG_skip
)) {
273 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
[0].next_hash
;
274 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
[0].next_hash
;
275 PGSKIP_DEBUG(MAP_NR(bank_end
), mem_map
[0].next_hash
- mem_map
);
277 mem_map
[MAP_NR(bank_end
)].next_hash
= mem_map
;
278 mem_map
[MAP_NR(bank_end
)+1UL].next_hash
= mem_map
;
279 PGSKIP_DEBUG(MAP_NR(bank_end
), 0);
284 /* The very generic SRMMU page table operations. */
285 static inline int srmmu_device_memory(unsigned long x
)
287 return ((x
& 0xF0000000) != 0);
290 static unsigned long srmmu_pgd_page(pgd_t pgd
)
291 { return srmmu_device_memory(pgd_val(pgd
))?~0:srmmu_p2v((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4); }
293 static unsigned long srmmu_pmd_page(pmd_t pmd
)
294 { return srmmu_device_memory(pmd_val(pmd
))?~0:srmmu_p2v((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4); }
296 static unsigned long srmmu_pte_page(pte_t pte
)
297 { return srmmu_device_memory(pte_val(pte
))?~0:srmmu_p2v((pte_val(pte
) & SRMMU_PTE_PMASK
) << 4); }
299 static unsigned long srmmu_c_pgd_page(pgd_t pgd
)
300 { return srmmu_device_memory(pgd_val(pgd
))?~0:srmmu_c_p2v((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4); }
302 static unsigned long srmmu_c_pmd_page(pmd_t pmd
)
303 { return srmmu_device_memory(pmd_val(pmd
))?~0:srmmu_c_p2v((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4); }
305 static unsigned long srmmu_c_pte_page(pte_t pte
)
306 { return srmmu_device_memory(pte_val(pte
))?~0:srmmu_c_p2v((pte_val(pte
) & SRMMU_PTE_PMASK
) << 4); }
308 static unsigned long srmmu_s_pgd_page(pgd_t pgd
)
309 { return srmmu_device_memory(pgd_val(pgd
))?~0:srmmu_s_p2v((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4); }
311 static unsigned long srmmu_s_pmd_page(pmd_t pmd
)
312 { return srmmu_device_memory(pmd_val(pmd
))?~0:srmmu_s_p2v((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4); }
314 static unsigned long srmmu_s_pte_page(pte_t pte
)
315 { return srmmu_device_memory(pte_val(pte
))?~0:srmmu_s_p2v((pte_val(pte
) & SRMMU_PTE_PMASK
) << 4); }
317 static inline int srmmu_pte_none(pte_t pte
)
318 { return !(pte_val(pte
) & 0xFFFFFFF); }
319 static inline int srmmu_pte_present(pte_t pte
)
320 { return ((pte_val(pte
) & SRMMU_ET_MASK
) == SRMMU_ET_PTE
); }
322 static inline void srmmu_pte_clear(pte_t
*ptep
) { set_pte(ptep
, __pte(0)); }
324 static inline int srmmu_pmd_none(pmd_t pmd
)
325 { return !(pmd_val(pmd
) & 0xFFFFFFF); }
326 static inline int srmmu_pmd_bad(pmd_t pmd
)
327 { return (pmd_val(pmd
) & SRMMU_ET_MASK
) != SRMMU_ET_PTD
; }
329 static inline int srmmu_pmd_present(pmd_t pmd
)
330 { return ((pmd_val(pmd
) & SRMMU_ET_MASK
) == SRMMU_ET_PTD
); }
332 static inline void srmmu_pmd_clear(pmd_t
*pmdp
) { set_pte((pte_t
*)pmdp
, __pte(0)); }
334 static inline int srmmu_pgd_none(pgd_t pgd
)
335 { return !(pgd_val(pgd
) & 0xFFFFFFF); }
337 static inline int srmmu_pgd_bad(pgd_t pgd
)
338 { return (pgd_val(pgd
) & SRMMU_ET_MASK
) != SRMMU_ET_PTD
; }
340 static inline int srmmu_pgd_present(pgd_t pgd
)
341 { return ((pgd_val(pgd
) & SRMMU_ET_MASK
) == SRMMU_ET_PTD
); }
343 static inline void srmmu_pgd_clear(pgd_t
* pgdp
) { set_pte((pte_t
*)pgdp
, __pte(0)); }
345 static inline int srmmu_pte_write(pte_t pte
) { return pte_val(pte
) & SRMMU_WRITE
; }
346 static inline int srmmu_pte_dirty(pte_t pte
) { return pte_val(pte
) & SRMMU_DIRTY
; }
347 static inline int srmmu_pte_young(pte_t pte
) { return pte_val(pte
) & SRMMU_REF
; }
349 static inline pte_t
srmmu_pte_wrprotect(pte_t pte
) { return __pte(pte_val(pte
) & ~SRMMU_WRITE
);}
350 static inline pte_t
srmmu_pte_mkclean(pte_t pte
) { return __pte(pte_val(pte
) & ~SRMMU_DIRTY
);}
351 static inline pte_t
srmmu_pte_mkold(pte_t pte
) { return __pte(pte_val(pte
) & ~SRMMU_REF
);}
352 static inline pte_t
srmmu_pte_mkwrite(pte_t pte
) { return __pte(pte_val(pte
) | SRMMU_WRITE
);}
353 static inline pte_t
srmmu_pte_mkdirty(pte_t pte
) { return __pte(pte_val(pte
) | SRMMU_DIRTY
);}
354 static inline pte_t
srmmu_pte_mkyoung(pte_t pte
) { return __pte(pte_val(pte
) | SRMMU_REF
);}
357 * Conversion functions: convert a page and protection to a page entry,
358 * and a page entry and page directory to the page they refer to.
360 static pte_t
srmmu_mk_pte(unsigned long page
, pgprot_t pgprot
)
361 { return __pte(((srmmu_v2p(page
)) >> 4) | pgprot_val(pgprot
)); }
363 static pte_t
srmmu_c_mk_pte(unsigned long page
, pgprot_t pgprot
)
364 { return __pte(((srmmu_c_v2p(page
)) >> 4) | pgprot_val(pgprot
)); }
366 static pte_t
srmmu_s_mk_pte(unsigned long page
, pgprot_t pgprot
)
367 { return __pte(((srmmu_s_v2p(page
)) >> 4) | pgprot_val(pgprot
)); }
369 static pte_t
srmmu_mk_pte_phys(unsigned long page
, pgprot_t pgprot
)
370 { return __pte(((page
) >> 4) | pgprot_val(pgprot
)); }
372 static pte_t
srmmu_mk_pte_io(unsigned long page
, pgprot_t pgprot
, int space
)
374 return __pte(((page
) >> 4) | (space
<< 28) | pgprot_val(pgprot
));
377 static void srmmu_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
379 set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) pgdp
) >> 4)));
382 static void srmmu_pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
384 set_pte((pte_t
*)pgdp
, (SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) pmdp
) >> 4)));
387 static void srmmu_pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
389 set_pte((pte_t
*)pmdp
, (SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) ptep
) >> 4)));
392 static void srmmu_c_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
394 set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (srmmu_c_v2p((unsigned long) pgdp
) >> 4)));
397 static void srmmu_c_pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
399 set_pte((pte_t
*)pgdp
, (SRMMU_ET_PTD
| (srmmu_c_v2p((unsigned long) pmdp
) >> 4)));
402 static void srmmu_c_pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
404 set_pte((pte_t
*)pmdp
, (SRMMU_ET_PTD
| (srmmu_c_v2p((unsigned long) ptep
) >> 4)));
407 static void srmmu_s_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
409 set_pte((pte_t
*)ctxp
, (SRMMU_ET_PTD
| (srmmu_s_v2p((unsigned long) pgdp
) >> 4)));
412 static void srmmu_s_pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
414 set_pte((pte_t
*)pgdp
, (SRMMU_ET_PTD
| (srmmu_s_v2p((unsigned long) pmdp
) >> 4)));
417 static void srmmu_s_pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
419 set_pte((pte_t
*)pmdp
, (SRMMU_ET_PTD
| (srmmu_s_v2p((unsigned long) ptep
) >> 4)));
422 static inline pte_t
srmmu_pte_modify(pte_t pte
, pgprot_t newprot
)
424 return __pte((pte_val(pte
) & SRMMU_CHG_MASK
) | pgprot_val(newprot
));
427 /* to find an entry in a top-level page table... */
428 static inline pgd_t
*srmmu_pgd_offset(struct mm_struct
* mm
, unsigned long address
)
430 return mm
->pgd
+ (address
>> SRMMU_PGDIR_SHIFT
);
433 /* Find an entry in the second-level page table.. */
434 static inline pmd_t
*srmmu_pmd_offset(pgd_t
* dir
, unsigned long address
)
436 return (pmd_t
*) srmmu_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
439 /* Find an entry in the third-level page table.. */
440 static inline pte_t
*srmmu_pte_offset(pmd_t
* dir
, unsigned long address
)
442 return (pte_t
*) srmmu_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
445 static inline pmd_t
*srmmu_c_pmd_offset(pgd_t
* dir
, unsigned long address
)
447 return (pmd_t
*) srmmu_c_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
450 static inline pte_t
*srmmu_c_pte_offset(pmd_t
* dir
, unsigned long address
)
452 return (pte_t
*) srmmu_c_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
455 static inline pmd_t
*srmmu_s_pmd_offset(pgd_t
* dir
, unsigned long address
)
457 return (pmd_t
*) srmmu_s_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
460 static inline pte_t
*srmmu_s_pte_offset(pmd_t
* dir
, unsigned long address
)
462 return (pte_t
*) srmmu_s_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
465 /* This must update the context table entry for this process. */
466 static void srmmu_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
468 if(tsk
->mm
->context
!= NO_CONTEXT
&&
469 tsk
->mm
->pgd
!= pgdp
) {
470 flush_cache_mm(tsk
->mm
);
471 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
472 flush_tlb_mm(tsk
->mm
);
476 static inline pte_t
*srmmu_get_pte_fast(void)
480 spin_lock(&pte_spinlock
);
481 if ((ret
= (struct page
*)pte_quicklist
) != NULL
) {
482 unsigned int mask
= (unsigned int)ret
->pprev_hash
;
483 unsigned int tmp
, off
;
486 for (tmp
= 0x001, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= 256);
488 for (tmp
= 0x100, off
= 2048; (mask
& tmp
) == 0; tmp
<<= 1, off
+= 256);
489 (unsigned int)ret
->pprev_hash
= mask
& ~tmp
;
491 pte_quicklist
= (unsigned long *)ret
->next_hash
;
492 ret
= (struct page
*)(page_address(ret
) + off
);
493 pgtable_cache_size
--;
495 spin_unlock(&pte_spinlock
);
499 static inline pte_t
*srmmu_get_pte_slow(void)
504 ret
= (pte_t
*)get_free_page(GFP_KERNEL
);
506 page
= mem_map
+ MAP_NR(ret
);
507 flush_chunk((unsigned long)ret
);
508 (unsigned int)page
->pprev_hash
= 0xfffe;
509 spin_lock(&pte_spinlock
);
510 (unsigned long *)page
->next_hash
= pte_quicklist
;
511 pte_quicklist
= (unsigned long *)page
;
512 pgtable_cache_size
+= 15;
517 static inline pgd_t
*srmmu_get_pgd_fast(void)
521 spin_lock(&pgd_spinlock
);
522 if ((ret
= (struct page
*)pgd_quicklist
) != NULL
) {
523 unsigned int mask
= (unsigned int)ret
->pprev_hash
;
524 unsigned int tmp
, off
;
526 for (tmp
= 0x001, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= 1024);
527 (unsigned int)ret
->pprev_hash
= mask
& ~tmp
;
529 pgd_quicklist
= (unsigned long *)ret
->next_hash
;
530 ret
= (struct page
*)(page_address(ret
) + off
);
533 spin_unlock(&pgd_spinlock
);
537 static inline pgd_t
*srmmu_get_pgd_slow(void)
542 ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
);
544 pgd_t
*init
= pgd_offset(&init_mm
, 0);
545 memset(ret
+ (0 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
546 memcpy(ret
+ (0 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
547 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
548 memset(ret
+ (1 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
549 memcpy(ret
+ (1 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
550 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
551 memset(ret
+ (2 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
552 memcpy(ret
+ (2 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
553 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
554 memset(ret
+ (3 * PTRS_PER_PGD
), 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
555 memcpy(ret
+ (3 * PTRS_PER_PGD
) + USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
556 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
557 page
= mem_map
+ MAP_NR(ret
);
558 flush_chunk((unsigned long)ret
);
559 (unsigned int)page
->pprev_hash
= 0xe;
560 spin_lock(&pgd_spinlock
);
561 (unsigned long *)page
->next_hash
= pgd_quicklist
;
562 pgd_quicklist
= (unsigned long *)page
;
564 spin_unlock(&pgd_spinlock
);
569 static void srmmu_free_pte_slow(pte_t
*pte
)
573 static void srmmu_free_pgd_slow(pgd_t
*pgd
)
577 static inline void srmmu_pte_free(pte_t
*pte
)
579 struct page
*page
= mem_map
+ MAP_NR(pte
);
581 spin_lock(&pte_spinlock
);
582 if (!page
->pprev_hash
) {
583 (unsigned long *)page
->next_hash
= pte_quicklist
;
584 pte_quicklist
= (unsigned long *)page
;
586 (unsigned int)page
->pprev_hash
|= (1 << ((((unsigned long)pte
) >> 8) & 15));
587 pgtable_cache_size
++;
588 spin_unlock(&pte_spinlock
);
591 static pte_t
*srmmu_pte_alloc(pmd_t
* pmd
, unsigned long address
)
593 address
= (address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1);
594 if(srmmu_pmd_none(*pmd
)) {
595 pte_t
*page
= srmmu_get_pte_fast();
599 return page
+ address
;
601 page
= srmmu_get_pte_slow();
602 if(srmmu_pmd_none(*pmd
)) {
604 spin_unlock(&pte_spinlock
);
606 return page
+ address
;
608 pmd_set(pmd
, BAD_PAGETABLE
);
612 (unsigned int)(((struct page
*)pte_quicklist
)->pprev_hash
) = 0xffff;
613 pgtable_cache_size
++;
614 spin_unlock(&pte_spinlock
);
617 if(srmmu_pmd_bad(*pmd
)) {
618 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
619 pmd_set(pmd
, BAD_PAGETABLE
);
622 return ((pte_t
*) pmd_page(*pmd
)) + address
;
625 /* Real three-level page tables on SRMMU. */
626 static void srmmu_pmd_free(pmd_t
* pmd
)
628 return srmmu_pte_free((pte_t
*)pmd
);
631 static pmd_t
*srmmu_pmd_alloc(pgd_t
* pgd
, unsigned long address
)
633 address
= (address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1);
634 if(srmmu_pgd_none(*pgd
)) {
635 pmd_t
*page
= (pmd_t
*)srmmu_get_pte_fast();
639 return page
+ address
;
641 page
= (pmd_t
*)srmmu_get_pte_slow();
642 if(srmmu_pgd_none(*pgd
)) {
644 spin_unlock(&pte_spinlock
);
646 return page
+ address
;
648 pgd_set(pgd
, (pmd_t
*) BAD_PAGETABLE
);
652 (unsigned int)(((struct page
*)pte_quicklist
)->pprev_hash
) = 0xffff;
653 pgtable_cache_size
++;
654 spin_unlock(&pte_spinlock
);
657 if(srmmu_pgd_bad(*pgd
)) {
658 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd
));
659 pgd_set(pgd
, (pmd_t
*) BAD_PAGETABLE
);
662 return (pmd_t
*) pgd_page(*pgd
) + address
;
665 static void srmmu_pgd_free(pgd_t
*pgd
)
667 struct page
*page
= mem_map
+ MAP_NR(pgd
);
669 spin_lock(&pgd_spinlock
);
670 if (!page
->pprev_hash
) {
671 (unsigned long *)page
->next_hash
= pgd_quicklist
;
672 pgd_quicklist
= (unsigned long *)page
;
674 (unsigned int)page
->pprev_hash
|= (1 << ((((unsigned long)pgd
) >> 10) & 3));
676 spin_unlock(&pgd_spinlock
);
679 static pgd_t
*srmmu_pgd_alloc(void)
683 ret
= srmmu_get_pgd_fast();
685 return srmmu_get_pgd_slow();
689 static void srmmu_set_pgdir(unsigned long address
, pgd_t entry
)
691 struct task_struct
* p
;
694 read_lock(&tasklist_lock
);
698 *pgd_offset(p
->mm
,address
) = entry
;
700 read_unlock(&tasklist_lock
);
701 spin_lock(&pgd_spinlock
);
702 address
>>= SRMMU_PGDIR_SHIFT
;
703 for (page
= (struct page
*)pgd_quicklist
; page
; page
= page
->next_hash
) {
704 pgd_t
*pgd
= (pgd_t
*)page_address(page
);
705 unsigned int mask
= (unsigned int)page
->pprev_hash
;
708 pgd
[address
+ 0 * SRMMU_PTRS_PER_PGD
] = entry
;
710 pgd
[address
+ 1 * SRMMU_PTRS_PER_PGD
] = entry
;
712 pgd
[address
+ 2 * SRMMU_PTRS_PER_PGD
] = entry
;
714 pgd
[address
+ 3 * SRMMU_PTRS_PER_PGD
] = entry
;
716 flush_chunk((unsigned long)pgd
);
718 spin_unlock(&pgd_spinlock
);
721 static void srmmu_set_pte_cacheable(pte_t
*ptep
, pte_t pteval
)
723 srmmu_set_entry(ptep
, pte_val(pteval
));
726 static void srmmu_set_pte_nocache_cypress(pte_t
*ptep
, pte_t pteval
)
728 register unsigned long a
, b
, c
, d
, e
, f
, g
;
729 unsigned long line
, page
;
731 srmmu_set_entry(ptep
, pte_val(pteval
));
732 page
= ((unsigned long)ptep
) & PAGE_MASK
;
733 line
= (page
+ PAGE_SIZE
) - 0x100;
734 a
= 0x20; b
= 0x40; c
= 0x60; d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
739 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
740 "sta %%g0, [%0 + %2] %1\n\t"
741 "sta %%g0, [%0 + %3] %1\n\t"
742 "sta %%g0, [%0 + %4] %1\n\t"
743 "sta %%g0, [%0 + %5] %1\n\t"
744 "sta %%g0, [%0 + %6] %1\n\t"
745 "sta %%g0, [%0 + %7] %1\n\t"
746 "sta %%g0, [%0 + %8] %1\n\t" : :
748 "i" (ASI_M_FLUSH_PAGE
),
749 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
750 "r" (e
), "r" (f
), "r" (g
));
751 } while(line
!= page
);
754 static void srmmu_set_pte_nocache_viking(pte_t
*ptep
, pte_t pteval
)
760 set
= ((unsigned long)ptep
>> 5) & 0x7f;
761 vaddr
= (KERNBASE
+ PAGE_SIZE
) | (set
<< 5);
762 srmmu_set_entry(ptep
, pte_val(pteval
));
763 for (i
= 0; i
< 8; i
++) {
764 __asm__
__volatile__ ("ld [%0], %%g0" : : "r" (vaddr
));
769 static void srmmu_quick_kernel_fault(unsigned long address
)
772 printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
773 smp_processor_id(), address
);
776 printk("Kernel faults at addr=0x%08lx\n", address
);
777 printk("PTE=%08lx\n", srmmu_hwprobe((address
& PAGE_MASK
)));
778 die_if_kernel("SRMMU bolixed...", current
->tss
.kregs
);
782 static inline void alloc_context(struct mm_struct
*mm
)
784 struct ctx_list
*ctxp
;
786 ctxp
= ctx_free
.next
;
787 if(ctxp
!= &ctx_free
) {
788 remove_from_ctx_list(ctxp
);
789 add_to_used_ctxlist(ctxp
);
790 mm
->context
= ctxp
->ctx_number
;
794 ctxp
= ctx_used
.next
;
795 if(ctxp
->ctx_mm
== current
->mm
)
797 if(ctxp
== &ctx_used
)
798 panic("out of mmu contexts");
799 flush_cache_mm(ctxp
->ctx_mm
);
800 flush_tlb_mm(ctxp
->ctx_mm
);
801 remove_from_ctx_list(ctxp
);
802 add_to_used_ctxlist(ctxp
);
803 ctxp
->ctx_mm
->context
= NO_CONTEXT
;
805 mm
->context
= ctxp
->ctx_number
;
808 static inline void free_context(int context
)
810 struct ctx_list
*ctx_old
;
812 ctx_old
= ctx_list_pool
+ context
;
813 remove_from_ctx_list(ctx_old
);
814 add_to_free_ctxlist(ctx_old
);
818 static void srmmu_switch_to_context(struct task_struct
*tsk
)
820 if(tsk
->mm
->context
== NO_CONTEXT
) {
821 alloc_context(tsk
->mm
);
822 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], tsk
->mm
->pgd
);
824 srmmu_set_context(tsk
->mm
->context
);
827 static void srmmu_init_new_context(struct mm_struct
*mm
)
832 ctxd_set(&srmmu_context_table
[mm
->context
], mm
->pgd
);
835 if(mm
== current
->mm
)
836 srmmu_set_context(mm
->context
);
839 /* Low level IO area allocation on the SRMMU. */
840 void srmmu_mapioaddr(unsigned long physaddr
, unsigned long virt_addr
, int bus_type
, int rdonly
)
847 physaddr
&= PAGE_MASK
;
848 pgdp
= srmmu_pgd_offset(init_task
.mm
, virt_addr
);
849 pmdp
= pmd_offset(pgdp
, virt_addr
);
850 ptep
= pte_offset(pmdp
, virt_addr
);
851 tmp
= (physaddr
>> 4) | SRMMU_ET_PTE
;
853 /* I need to test whether this is consistent over all
854 * sun4m's. The bus_type represents the upper 4 bits of
855 * 36-bit physical address on the I/O space lines...
857 tmp
|= (bus_type
<< 28);
859 tmp
|= SRMMU_PRIV_RDONLY
;
862 flush_page_to_ram(virt_addr
);
863 set_pte(ptep
, __pte(tmp
));
867 void srmmu_unmapioaddr(unsigned long virt_addr
)
873 pgdp
= srmmu_pgd_offset(init_task
.mm
, virt_addr
);
874 pmdp
= pmd_offset(pgdp
, virt_addr
);
875 ptep
= pte_offset(pmdp
, virt_addr
);
877 /* No need to flush uncacheable page. */
878 set_pte(ptep
, mk_pte((unsigned long) EMPTY_PGE
, PAGE_SHARED
));
882 /* This is used in many routines below. */
883 #define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
885 /* On the SRMMU we do not have the problems with limited tlb entries
886 * for mapping kernel pages, so we just take things from the free page
887 * pool. As a side effect we are putting a little too much pressure
888 * on the gfp() subsystem. This setup also makes the logic of the
889 * iommu mapping code a lot easier as we can transparently handle
890 * mappings on the kernel stack without any special code as we did
893 struct task_struct
*srmmu_alloc_task_struct(void)
895 return (struct task_struct
*) __get_free_pages(GFP_KERNEL
, 1);
898 static void srmmu_free_task_struct(struct task_struct
*tsk
)
900 free_pages((unsigned long)tsk
, 1);
904 extern void tsunami_flush_cache_all(void);
905 extern void tsunami_flush_cache_mm(struct mm_struct
*mm
);
906 extern void tsunami_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
907 extern void tsunami_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
908 extern void tsunami_flush_page_to_ram(unsigned long page
);
909 extern void tsunami_flush_page_for_dma(unsigned long page
);
910 extern void tsunami_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
911 extern void tsunami_flush_chunk(unsigned long chunk
);
912 extern void tsunami_flush_tlb_all(void);
913 extern void tsunami_flush_tlb_mm(struct mm_struct
*mm
);
914 extern void tsunami_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
915 extern void tsunami_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
917 /* Workaround, until we find what's going on with Swift. When low on memory, it sometimes
918 * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/
919 * fault again on the same instruction. I really don't understand it, have checked it and contexts
920 * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj
922 static void swift_update_mmu_cache(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
924 static unsigned long last
;
926 if (last
== address
) viking_hwprobe(address
);
930 /* Swift flushes. It has the recommended SRMMU specification flushing
931 * facilities, so we can do things in a more fine grained fashion than we
932 * could on the tsunami. Let's watch out for HARDWARE BUGS...
935 static void swift_flush_cache_all(void)
937 flush_user_windows();
938 swift_idflash_clear();
941 static void swift_flush_cache_mm(struct mm_struct
*mm
)
944 flush_user_windows();
945 swift_idflash_clear();
949 static void swift_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
952 flush_user_windows();
953 swift_idflash_clear();
957 static void swift_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
959 FLUSH_BEGIN(vma
->vm_mm
)
960 flush_user_windows();
961 if(vma
->vm_flags
& VM_EXEC
)
962 swift_flush_icache();
963 swift_flush_dcache();
967 /* Not copy-back on swift. */
968 static void swift_flush_page_to_ram(unsigned long page
)
972 /* But not IO coherent either. */
973 static void swift_flush_page_for_dma(unsigned long page
)
975 swift_flush_dcache();
978 /* Again, Swift is non-snooping split I/D cache'd just like tsunami,
979 * so have to punt the icache for on-stack signal insns. Only the
980 * icache need be flushed since the dcache is write-through.
982 static void swift_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
984 swift_flush_icache();
987 static void swift_flush_chunk(unsigned long chunk
)
991 static void swift_flush_tlb_all(void)
993 srmmu_flush_whole_tlb();
994 module_stats
.invall
++;
997 static void swift_flush_tlb_mm(struct mm_struct
*mm
)
1000 srmmu_flush_whole_tlb();
1001 module_stats
.invmm
++;
1005 static void swift_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
1008 srmmu_flush_whole_tlb();
1009 module_stats
.invrnge
++;
1013 static void swift_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1015 FLUSH_BEGIN(vma
->vm_mm
)
1016 srmmu_flush_whole_tlb();
1017 module_stats
.invpg
++;
1021 /* The following are all MBUS based SRMMU modules, and therefore could
1022 * be found in a multiprocessor configuration. On the whole, these
1023 * chips seems to be much more touchy about DVMA and page tables
1024 * with respect to cache coherency.
1027 /* Cypress flushes. */
1028 static void cypress_flush_cache_all(void)
1030 volatile unsigned long cypress_sucks
;
1031 unsigned long faddr
, tagval
;
1033 flush_user_windows();
1034 for(faddr
= 0; faddr
< 0x10000; faddr
+= 0x20) {
1035 __asm__
__volatile__("lda [%1 + %2] %3, %0\n\t" :
1037 "r" (faddr
), "r" (0x40000),
1038 "i" (ASI_M_DATAC_TAG
));
1040 /* If modified and valid, kick it. */
1041 if((tagval
& 0x60) == 0x60)
1042 cypress_sucks
= *(unsigned long *)(0xf0020000 + faddr
);
1046 static void cypress_flush_cache_mm(struct mm_struct
*mm
)
1048 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1049 unsigned long flags
, faddr
;
1053 flush_user_windows();
1054 __save_and_cli(flags
);
1055 octx
= srmmu_get_context();
1056 srmmu_set_context(mm
->context
);
1057 a
= 0x20; b
= 0x40; c
= 0x60;
1058 d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1060 faddr
= (0x10000 - 0x100);
1065 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1066 "sta %%g0, [%0 + %2] %1\n\t"
1067 "sta %%g0, [%0 + %3] %1\n\t"
1068 "sta %%g0, [%0 + %4] %1\n\t"
1069 "sta %%g0, [%0 + %5] %1\n\t"
1070 "sta %%g0, [%0 + %6] %1\n\t"
1071 "sta %%g0, [%0 + %7] %1\n\t"
1072 "sta %%g0, [%0 + %8] %1\n\t" : :
1073 "r" (faddr
), "i" (ASI_M_FLUSH_CTX
),
1074 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1075 "r" (e
), "r" (f
), "r" (g
));
1077 srmmu_set_context(octx
);
1078 __restore_flags(flags
);
1082 static void cypress_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
1084 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1085 unsigned long flags
, faddr
;
1089 flush_user_windows();
1090 __save_and_cli(flags
);
1091 octx
= srmmu_get_context();
1092 srmmu_set_context(mm
->context
);
1093 a
= 0x20; b
= 0x40; c
= 0x60;
1094 d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1096 start
&= SRMMU_PMD_MASK
;
1097 while(start
< end
) {
1098 faddr
= (start
+ (0x10000 - 0x100));
1103 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1104 "sta %%g0, [%0 + %2] %1\n\t"
1105 "sta %%g0, [%0 + %3] %1\n\t"
1106 "sta %%g0, [%0 + %4] %1\n\t"
1107 "sta %%g0, [%0 + %5] %1\n\t"
1108 "sta %%g0, [%0 + %6] %1\n\t"
1109 "sta %%g0, [%0 + %7] %1\n\t"
1110 "sta %%g0, [%0 + %8] %1\n\t" : :
1112 "i" (ASI_M_FLUSH_SEG
),
1113 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1114 "r" (e
), "r" (f
), "r" (g
));
1115 } while (faddr
!= start
);
1116 start
+= SRMMU_PMD_SIZE
;
1118 srmmu_set_context(octx
);
1119 __restore_flags(flags
);
1123 static void cypress_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
1125 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1126 struct mm_struct
*mm
= vma
->vm_mm
;
1127 unsigned long flags
, line
;
1131 flush_user_windows();
1132 __save_and_cli(flags
);
1133 octx
= srmmu_get_context();
1134 srmmu_set_context(mm
->context
);
1135 a
= 0x20; b
= 0x40; c
= 0x60;
1136 d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1139 line
= (page
+ PAGE_SIZE
) - 0x100;
1144 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1145 "sta %%g0, [%0 + %2] %1\n\t"
1146 "sta %%g0, [%0 + %3] %1\n\t"
1147 "sta %%g0, [%0 + %4] %1\n\t"
1148 "sta %%g0, [%0 + %5] %1\n\t"
1149 "sta %%g0, [%0 + %6] %1\n\t"
1150 "sta %%g0, [%0 + %7] %1\n\t"
1151 "sta %%g0, [%0 + %8] %1\n\t" : :
1153 "i" (ASI_M_FLUSH_PAGE
),
1154 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1155 "r" (e
), "r" (f
), "r" (g
));
1156 } while(line
!= page
);
1157 srmmu_set_context(octx
);
1158 __restore_flags(flags
);
1162 /* Cypress is copy-back, at least that is how we configure it. */
1163 static void cypress_flush_page_to_ram(unsigned long page
)
1165 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1168 a
= 0x20; b
= 0x40; c
= 0x60; d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1170 line
= (page
+ PAGE_SIZE
) - 0x100;
1175 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1176 "sta %%g0, [%0 + %2] %1\n\t"
1177 "sta %%g0, [%0 + %3] %1\n\t"
1178 "sta %%g0, [%0 + %4] %1\n\t"
1179 "sta %%g0, [%0 + %5] %1\n\t"
1180 "sta %%g0, [%0 + %6] %1\n\t"
1181 "sta %%g0, [%0 + %7] %1\n\t"
1182 "sta %%g0, [%0 + %8] %1\n\t" : :
1184 "i" (ASI_M_FLUSH_PAGE
),
1185 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1186 "r" (e
), "r" (f
), "r" (g
));
1187 } while(line
!= page
);
1190 static void cypress_flush_chunk(unsigned long chunk
)
1192 cypress_flush_page_to_ram(chunk
);
1195 /* Cypress is also IO cache coherent. */
1196 static void cypress_flush_page_for_dma(unsigned long page
)
1200 /* Cypress has unified L2 VIPT, from which both instructions and data
1201 * are stored. It does not have an onboard icache of any sort, therefore
1202 * no flush is necessary.
1204 static void cypress_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
1208 static void cypress_flush_tlb_all(void)
1210 srmmu_flush_whole_tlb();
1211 module_stats
.invall
++;
1214 static void cypress_flush_tlb_mm(struct mm_struct
*mm
)
1217 __asm__
__volatile__("
1223 : "r" (SRMMU_CTX_REG
), "r" (0x300), "r" (mm
->context
),
1224 "i" (ASI_M_MMUREGS
), "i" (ASI_M_FLUSH_PROBE
)
1226 module_stats
.invmm
++;
1230 static void cypress_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
1235 start
&= SRMMU_PGDIR_MASK
;
1236 size
= SRMMU_PGDIR_ALIGN(end
) - start
;
1237 __asm__
__volatile__("
1242 sta %%g0, [%2 + %3] %6
1245 : "r" (SRMMU_CTX_REG
), "r" (mm
->context
), "r" (start
| 0x200),
1246 "r" (size
), "r" (SRMMU_PGDIR_SIZE
), "i" (ASI_M_MMUREGS
),
1247 "i" (ASI_M_FLUSH_PROBE
)
1249 module_stats
.invrnge
++;
1253 static void cypress_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
1255 struct mm_struct
*mm
= vma
->vm_mm
;
1258 __asm__
__volatile__("
1264 : "r" (SRMMU_CTX_REG
), "r" (mm
->context
), "r" (page
& PAGE_MASK
),
1265 "i" (ASI_M_MMUREGS
), "i" (ASI_M_FLUSH_PROBE
)
1267 module_stats
.invpg
++;
1272 extern void viking_flush_cache_all(void);
1273 extern void viking_flush_cache_mm(struct mm_struct
*mm
);
1274 extern void viking_flush_cache_range(struct mm_struct
*mm
, unsigned long start
,
1276 extern void viking_flush_cache_page(struct vm_area_struct
*vma
,
1277 unsigned long page
);
1278 extern void viking_flush_page_to_ram(unsigned long page
);
1279 extern void viking_flush_page_for_dma(unsigned long page
);
1280 extern void viking_flush_sig_insns(struct mm_struct
*mm
, unsigned long addr
);
1281 extern void viking_flush_page(unsigned long page
);
1282 extern void viking_mxcc_flush_page(unsigned long page
);
1283 extern void viking_flush_chunk(unsigned long chunk
);
1284 extern void viking_c_flush_chunk(unsigned long chunk
);
1285 extern void viking_s_flush_chunk(unsigned long chunk
);
1286 extern void viking_mxcc_flush_chunk(unsigned long chunk
);
1287 extern void viking_flush_tlb_all(void);
1288 extern void viking_flush_tlb_mm(struct mm_struct
*mm
);
1289 extern void viking_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
1291 extern void viking_flush_tlb_page(struct vm_area_struct
*vma
,
1292 unsigned long page
);
1293 extern void sun4dsmp_flush_tlb_all(void);
1294 extern void sun4dsmp_flush_tlb_mm(struct mm_struct
*mm
);
1295 extern void sun4dsmp_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
1297 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct
*vma
,
1298 unsigned long page
);
1301 extern void hypersparc_flush_cache_all(void);
1302 extern void hypersparc_flush_cache_mm(struct mm_struct
*mm
);
1303 extern void hypersparc_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
1304 extern void hypersparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
);
1305 extern void hypersparc_flush_page_to_ram(unsigned long page
);
1306 extern void hypersparc_flush_chunk(unsigned long chunk
);
1307 extern void hypersparc_flush_page_for_dma(unsigned long page
);
1308 extern void hypersparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
);
1309 extern void hypersparc_flush_tlb_all(void);
1310 extern void hypersparc_flush_tlb_mm(struct mm_struct
*mm
);
1311 extern void hypersparc_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
);
1312 extern void hypersparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
1313 extern void hypersparc_setup_blockops(void);
1315 static void srmmu_set_pte_nocache_hyper(pte_t
*ptep
, pte_t pteval
)
1317 unsigned long page
= ((unsigned long)ptep
) & PAGE_MASK
;
1319 srmmu_set_entry(ptep
, pte_val(pteval
));
1320 hypersparc_flush_page_to_ram(page
);
1323 static void hypersparc_ctxd_set(ctxd_t
*ctxp
, pgd_t
*pgdp
)
1325 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) pgdp
) >> 4))));
1326 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
1327 hyper_flush_whole_icache();
1330 static void hypersparc_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
1332 unsigned long page
= ((unsigned long) pgdp
) & PAGE_MASK
;
1334 if(pgdp
!= swapper_pg_dir
)
1335 hypersparc_flush_page_to_ram(page
);
1337 if(tsk
->mm
->context
!= NO_CONTEXT
&&
1338 tsk
->mm
->pgd
!= pgdp
) {
1339 flush_cache_mm(tsk
->mm
);
1340 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
1341 flush_tlb_mm(tsk
->mm
);
1345 static void viking_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
1347 if(pgdp
!= swapper_pg_dir
)
1348 flush_chunk((unsigned long)pgdp
);
1349 if(tsk
->mm
->context
!= NO_CONTEXT
&&
1350 tsk
->mm
->pgd
!= pgdp
) {
1351 flush_cache_mm(tsk
->mm
);
1352 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
1353 flush_tlb_mm(tsk
->mm
);
1357 static void cypress_update_rootmmu_dir(struct task_struct
*tsk
, pgd_t
*pgdp
)
1359 register unsigned long a
, b
, c
, d
, e
, f
, g
;
1360 unsigned long page
= ((unsigned long) pgdp
) & PAGE_MASK
;
1363 if(pgdp
== swapper_pg_dir
)
1366 a
= 0x20; b
= 0x40; c
= 0x60; d
= 0x80; e
= 0xa0; f
= 0xc0; g
= 0xe0;
1368 line
= (page
+ PAGE_SIZE
) - 0x100;
1373 __asm__
__volatile__("sta %%g0, [%0] %1\n\t"
1374 "sta %%g0, [%0 + %2] %1\n\t"
1375 "sta %%g0, [%0 + %3] %1\n\t"
1376 "sta %%g0, [%0 + %4] %1\n\t"
1377 "sta %%g0, [%0 + %5] %1\n\t"
1378 "sta %%g0, [%0 + %6] %1\n\t"
1379 "sta %%g0, [%0 + %7] %1\n\t"
1380 "sta %%g0, [%0 + %8] %1\n\t" : :
1382 "i" (ASI_M_FLUSH_PAGE
),
1383 "r" (a
), "r" (b
), "r" (c
), "r" (d
),
1384 "r" (e
), "r" (f
), "r" (g
));
1385 } while(line
!= page
);
1387 if(tsk
->mm
->context
!= NO_CONTEXT
&&
1388 tsk
->mm
->pgd
!= pgdp
) {
1389 flush_cache_mm(tsk
->mm
);
1390 ctxd_set(&srmmu_context_table
[tsk
->mm
->context
], pgdp
);
1391 flush_tlb_mm(tsk
->mm
);
1395 static void hypersparc_switch_to_context(struct task_struct
*tsk
)
1397 if(tsk
->mm
->context
== NO_CONTEXT
) {
1400 alloc_context(tsk
->mm
);
1401 ctxp
= &srmmu_context_table
[tsk
->mm
->context
];
1402 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) tsk
->mm
->pgd
) >> 4))));
1403 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
1405 hyper_flush_whole_icache();
1406 srmmu_set_context(tsk
->mm
->context
);
1409 static void hypersparc_init_new_context(struct mm_struct
*mm
)
1415 ctxp
= &srmmu_context_table
[mm
->context
];
1416 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) mm
->pgd
) >> 4))));
1417 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
1419 if(mm
== current
->mm
) {
1420 hyper_flush_whole_icache();
1421 srmmu_set_context(mm
->context
);
1425 static unsigned long mempool
;
1427 /* NOTE: All of this startup code assumes the low 16mb (approx.) of
1428 * kernel mappings are done with one single contiguous chunk of
1429 * ram. On small ram machines (classics mainly) we only get
1430 * around 8mb mapped for us.
1433 static unsigned long kbpage
;
1435 /* Some dirty hacks to abstract away the painful boot up init. */
1436 static inline unsigned long srmmu_early_paddr(unsigned long vaddr
)
1438 return ((vaddr
- KERNBASE
) + kbpage
);
1441 static inline void srmmu_early_pgd_set(pgd_t
*pgdp
, pmd_t
*pmdp
)
1443 set_pte((pte_t
*)pgdp
, __pte((SRMMU_ET_PTD
| (srmmu_early_paddr((unsigned long) pmdp
) >> 4))));
1446 static inline void srmmu_early_pmd_set(pmd_t
*pmdp
, pte_t
*ptep
)
1448 set_pte((pte_t
*)pmdp
, __pte((SRMMU_ET_PTD
| (srmmu_early_paddr((unsigned long) ptep
) >> 4))));
1451 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd
)
1453 return (((pgd_val(pgd
) & SRMMU_PTD_PMASK
) << 4) - kbpage
) + KERNBASE
;
1456 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd
)
1458 return (((pmd_val(pmd
) & SRMMU_PTD_PMASK
) << 4) - kbpage
) + KERNBASE
;
1461 static inline pmd_t
*srmmu_early_pmd_offset(pgd_t
*dir
, unsigned long address
)
1463 return (pmd_t
*) srmmu_early_pgd_page(*dir
) + ((address
>> SRMMU_PMD_SHIFT
) & (SRMMU_PTRS_PER_PMD
- 1));
1466 static inline pte_t
*srmmu_early_pte_offset(pmd_t
*dir
, unsigned long address
)
1468 return (pte_t
*) srmmu_early_pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (SRMMU_PTRS_PER_PTE
- 1));
1471 static inline void srmmu_allocate_ptable_skeleton(unsigned long start
, unsigned long end
)
1477 while(start
< end
) {
1478 pgdp
= srmmu_pgd_offset(init_task
.mm
, start
);
1479 if(srmmu_pgd_none(*pgdp
)) {
1480 pmdp
= sparc_init_alloc(&mempool
, SRMMU_PMD_TABLE_SIZE
);
1481 srmmu_early_pgd_set(pgdp
, pmdp
);
1483 pmdp
= srmmu_early_pmd_offset(pgdp
, start
);
1484 if(srmmu_pmd_none(*pmdp
)) {
1485 ptep
= sparc_init_alloc(&mempool
, SRMMU_PTE_TABLE_SIZE
);
1486 srmmu_early_pmd_set(pmdp
, ptep
);
1488 start
= (start
+ SRMMU_PMD_SIZE
) & SRMMU_PMD_MASK
;
1492 /* This is much cleaner than poking around physical address space
1493 * looking at the prom's page table directly which is what most
1494 * other OS's do. Yuck... this is much better.
1496 __initfunc(void srmmu_inherit_prom_mappings(unsigned long start
,unsigned long end
))
1501 int what
= 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1502 unsigned long prompte
;
1504 while(start
<= end
) {
1506 break; /* probably wrap around */
1507 if(start
== 0xfef00000)
1508 start
= KADB_DEBUGGER_BEGVM
;
1509 if(!(prompte
= srmmu_hwprobe(start
))) {
1514 /* A red snapper, see what it really is. */
1517 if(!(start
& ~(SRMMU_PMD_MASK
))) {
1518 if(srmmu_hwprobe((start
-PAGE_SIZE
) + SRMMU_PMD_SIZE
) == prompte
)
1522 if(!(start
& ~(SRMMU_PGDIR_MASK
))) {
1523 if(srmmu_hwprobe((start
-PAGE_SIZE
) + SRMMU_PGDIR_SIZE
) ==
1528 pgdp
= srmmu_pgd_offset(init_task
.mm
, start
);
1530 *pgdp
= __pgd(prompte
);
1531 start
+= SRMMU_PGDIR_SIZE
;
1534 if(srmmu_pgd_none(*pgdp
)) {
1535 pmdp
= sparc_init_alloc(&mempool
, SRMMU_PMD_TABLE_SIZE
);
1536 srmmu_early_pgd_set(pgdp
, pmdp
);
1538 pmdp
= srmmu_early_pmd_offset(pgdp
, start
);
1540 *pmdp
= __pmd(prompte
);
1541 start
+= SRMMU_PMD_SIZE
;
1544 if(srmmu_pmd_none(*pmdp
)) {
1545 ptep
= sparc_init_alloc(&mempool
, SRMMU_PTE_TABLE_SIZE
);
1546 srmmu_early_pmd_set(pmdp
, ptep
);
1548 ptep
= srmmu_early_pte_offset(pmdp
, start
);
1549 *ptep
= __pte(prompte
);
1554 #ifdef DEBUG_MAP_KERNEL
1555 #define MKTRACE(foo) prom_printf foo
1557 #define MKTRACE(foo)
1560 static int lots_of_ram __initdata
= 0;
1561 static int srmmu_low_pa __initdata
= 0;
1562 static unsigned long end_of_phys_memory __initdata
= 0;
1564 __initfunc(void srmmu_end_memory(unsigned long memory_size
, unsigned long *end_mem_p
))
1566 unsigned int sum
= 0;
1567 unsigned long last
= 0xff000000;
1570 unsigned long total
= 0;
1573 pa
= srmmu_hwprobe(KERNBASE
+ PAGE_SIZE
);
1574 pa
= (pa
& SRMMU_PTE_PMASK
) << 4;
1575 if (!sp_banks
[0].base_addr
&& pa
== PAGE_SIZE
) {
1576 for(i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1577 if (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
> 0x0d000000)
1580 if (!sp_banks
[i
].num_bytes
) {
1582 end_of_phys_memory
= SRMMU_PGDIR_ALIGN(sp_banks
[i
-1].base_addr
+ sp_banks
[i
-1].num_bytes
);
1583 *end_mem_p
= KERNBASE
+ end_of_phys_memory
;
1584 if (sp_banks
[0].num_bytes
>= (6 * 1024 * 1024) || end_of_phys_memory
<= 0x06000000) {
1585 /* Make sure there will be enough memory for the whole mem_map (even if sparse) */
1590 for(i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1591 pa
= sp_banks
[i
].base_addr
;
1592 first
= (pa
& (~SRMMU_PGDIR_MASK
));
1593 cur
= (sp_banks
[i
].num_bytes
+ first
- SRMMU_PGDIR_SIZE
);
1594 if (cur
< 0) cur
= 0;
1595 if (!first
|| last
!= (pa
& SRMMU_PGDIR_MASK
))
1596 total
+= SRMMU_PGDIR_SIZE
;
1597 sum
+= sp_banks
[i
].num_bytes
;
1599 if (sum
> memory_size
) {
1600 sp_banks
[i
].num_bytes
-=
1601 (sum
- memory_size
);
1602 cur
= (sp_banks
[i
].num_bytes
+ first
- SRMMU_PGDIR_SIZE
);
1603 if (cur
< 0) cur
= 0;
1604 total
+= SRMMU_PGDIR_ALIGN(cur
);
1606 sp_banks
[++i
].base_addr
= 0xdeadbeef;
1607 sp_banks
[i
].num_bytes
= 0;
1611 total
+= SRMMU_PGDIR_ALIGN(cur
);
1612 last
= (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
- 1) & SRMMU_PGDIR_MASK
;
1614 if (total
<= 0x0d000000)
1615 *end_mem_p
= KERNBASE
+ total
;
1617 *end_mem_p
= 0xfd000000;
1620 end_of_phys_memory
= total
;
1623 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1625 /* Create a third-level SRMMU 16MB page mapping. */
1626 __initfunc(static void do_large_mapping(unsigned long vaddr
, unsigned long phys_base
))
1628 pgd_t
*pgdp
= srmmu_pgd_offset(init_task
.mm
, vaddr
);
1629 unsigned long big_pte
;
1631 MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr
, phys_base
));
1632 big_pte
= KERNEL_PTE(phys_base
>> 4);
1633 *pgdp
= __pgd(big_pte
);
1636 /* Look in the sp_bank for the given physical page, return the
1637 * index number the entry was found in, or -1 for not found.
1639 static inline int find_in_spbanks(unsigned long phys_page
)
1643 for(entry
= 0; sp_banks
[entry
].num_bytes
; entry
++) {
1644 unsigned long start
= sp_banks
[entry
].base_addr
;
1645 unsigned long end
= start
+ sp_banks
[entry
].num_bytes
;
1647 if((start
<= phys_page
) && (phys_page
< end
))
1653 /* Find an spbank entry not mapped as of yet, TAKEN_VECTOR is an
1654 * array of char's, each member indicating if that spbank is mapped
1657 __initfunc(static int find_free_spbank(char *taken_vector
))
1661 for(entry
= 0; sp_banks
[entry
].num_bytes
; entry
++)
1662 if(!taken_vector
[entry
])
1667 static unsigned long map_spbank_last_pa __initdata
= 0xff000000;
1669 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
1671 __initfunc(static unsigned long map_spbank(unsigned long vbase
, int sp_entry
))
1673 unsigned long pstart
= (sp_banks
[sp_entry
].base_addr
& SRMMU_PGDIR_MASK
);
1674 unsigned long vstart
= (vbase
& SRMMU_PGDIR_MASK
);
1675 unsigned long vend
= SRMMU_PGDIR_ALIGN(vbase
+ sp_banks
[sp_entry
].num_bytes
);
1676 static int srmmu_bank
= 0;
1678 MKTRACE(("map_spbank %d[v<%08lx>p<%08lx>s<%08lx>]", sp_entry
, vbase
, sp_banks
[sp_entry
].base_addr
, sp_banks
[sp_entry
].num_bytes
));
1679 MKTRACE(("map_spbank2 %d[p%08lx v%08lx-%08lx]", sp_entry
, pstart
, vstart
, vend
));
1680 while(vstart
< vend
) {
1681 do_large_mapping(vstart
, pstart
);
1682 vstart
+= SRMMU_PGDIR_SIZE
; pstart
+= SRMMU_PGDIR_SIZE
;
1684 srmmu_map
[srmmu_bank
].vbase
= vbase
;
1685 srmmu_map
[srmmu_bank
].pbase
= sp_banks
[sp_entry
].base_addr
;
1686 srmmu_map
[srmmu_bank
].size
= sp_banks
[sp_entry
].num_bytes
;
1688 map_spbank_last_pa
= pstart
- SRMMU_PGDIR_SIZE
;
1692 static inline void memprobe_error(char *msg
)
1695 prom_printf("Halting now...\n");
1699 /* Assumptions: The bank given to the kernel from the prom/bootloader
1700 * is part of a full bank which is at least 4MB in size and begins at
1701 * 0xf0000000 (ie. KERNBASE).
1703 static inline void map_kernel(void)
1705 unsigned long raw_pte
, physpage
;
1706 unsigned long vaddr
, low_base
;
1707 char etaken
[SPARC_PHYS_BANKS
];
1710 /* Step 1: Clear out sp_banks taken map. */
1711 MKTRACE(("map_kernel: clearing etaken vector... "));
1712 for(entry
= 0; entry
< SPARC_PHYS_BANKS
; entry
++)
1715 low_base
= KERNBASE
;
1717 /* Step 2: Fill in KERNBASE base pgd. Lots of sanity checking here. */
1718 raw_pte
= srmmu_hwprobe(KERNBASE
+ PAGE_SIZE
);
1719 if((raw_pte
& SRMMU_ET_MASK
) != SRMMU_ET_PTE
)
1720 memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
1721 physpage
= (raw_pte
& SRMMU_PTE_PMASK
) << 4;
1722 physpage
-= PAGE_SIZE
;
1723 if(physpage
& ~(SRMMU_PGDIR_MASK
))
1724 memprobe_error("Wheee, kernel not mapped on 16MB physical boundry.\n");
1725 entry
= find_in_spbanks(physpage
);
1726 if(entry
== -1 || (sp_banks
[entry
].base_addr
!= physpage
))
1727 memprobe_error("Kernel mapped in non-existant memory.\n");
1728 MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE
, entry
, sp_banks
[entry
].base_addr
, sp_banks
[entry
].num_bytes
));
1729 if (sp_banks
[entry
].num_bytes
> 0x0d000000) {
1730 unsigned long orig_base
= sp_banks
[entry
].base_addr
;
1731 unsigned long orig_len
= sp_banks
[entry
].num_bytes
;
1732 unsigned long can_map
= 0x0d000000;
1734 /* Map a partial bank in this case, adjust the base
1735 * and the length, but don't mark it used.
1737 sp_banks
[entry
].num_bytes
= can_map
;
1738 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base
, can_map
));
1739 vaddr
= map_spbank(KERNBASE
, entry
);
1740 MKTRACE(("vaddr now %08lx ", vaddr
));
1741 sp_banks
[entry
].base_addr
= orig_base
+ can_map
;
1742 sp_banks
[entry
].num_bytes
= orig_len
- can_map
;
1743 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base
+ can_map
), (orig_len
- can_map
)));
1744 MKTRACE(("map_kernel: skipping first loop\n"));
1747 vaddr
= map_spbank(KERNBASE
, entry
);
1750 /* Step 3: Map what we can above KERNBASE. */
1751 MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr
));
1753 unsigned long bank_size
;
1755 MKTRACE(("map_kernel: ffsp()"));
1756 entry
= find_free_spbank(&etaken
[0]);
1757 bank_size
= sp_banks
[entry
].num_bytes
;
1758 MKTRACE(("<%d> base=%08lx bs=%08lx ", entry
, sp_banks
[entry
].base_addr
, bank_size
));
1762 vaddr
= KERNBASE
+ sp_banks
[entry
].base_addr
;
1763 else if (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
)) {
1764 if (map_spbank_last_pa
== (sp_banks
[entry
].base_addr
& SRMMU_PGDIR_MASK
))
1765 vaddr
-= SRMMU_PGDIR_SIZE
;
1766 vaddr
+= (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
));
1768 if ((vaddr
+ bank_size
- KERNBASE
) > 0x0d000000) {
1769 unsigned long orig_base
= sp_banks
[entry
].base_addr
;
1770 unsigned long orig_len
= sp_banks
[entry
].num_bytes
;
1771 unsigned long can_map
= (0xfd000000 - vaddr
);
1773 /* Map a partial bank in this case, adjust the base
1774 * and the length, but don't mark it used.
1776 sp_banks
[entry
].num_bytes
= can_map
;
1777 MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base
, can_map
));
1778 vaddr
= map_spbank(vaddr
, entry
);
1779 MKTRACE(("vaddr now %08lx ", vaddr
));
1780 sp_banks
[entry
].base_addr
= orig_base
+ can_map
;
1781 sp_banks
[entry
].num_bytes
= orig_len
- can_map
;
1782 MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base
+ can_map
), (orig_len
- can_map
)));
1786 /* Ok, we can map this one, do it. */
1787 MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr
, entry
));
1788 vaddr
= map_spbank(vaddr
, entry
);
1790 MKTRACE(("vaddr now %08lx\n", vaddr
));
1793 /* If not lots_of_ram, assume we did indeed map it all above. */
1796 goto check_and_return
;
1798 /* Step 4: Map the rest (if any) right below KERNBASE. */
1799 MKTRACE(("map_kernel: doing low mappings... "));
1800 low_base
= (KERNBASE
- end_of_phys_memory
+ 0x0d000000);
1801 MKTRACE(("end_of_phys_memory=%08lx low_base=%08lx\n", end_of_phys_memory
, low_base
));
1803 /* Ok, now map 'em. */
1804 MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base
,KERNBASE
));
1805 srmmu_allocate_ptable_skeleton(low_base
, KERNBASE
);
1807 map_spbank_last_pa
= 0xff000000;
1808 MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr
));
1810 unsigned long bank_size
;
1812 entry
= find_free_spbank(&etaken
[0]);
1813 bank_size
= sp_banks
[entry
].num_bytes
;
1814 MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry
, sp_banks
[entry
].base_addr
, bank_size
));
1817 if (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
)) {
1818 if (map_spbank_last_pa
== (sp_banks
[entry
].base_addr
& SRMMU_PGDIR_MASK
))
1819 vaddr
-= SRMMU_PGDIR_SIZE
;
1820 vaddr
+= (sp_banks
[entry
].base_addr
& (~SRMMU_PGDIR_MASK
));
1822 if((vaddr
+ bank_size
) > KERNBASE
)
1823 memprobe_error("Wheee, kernel low mapping overflow.\n");
1824 MKTRACE(("map_spbank(%08lx, %d) ", vaddr
, entry
));
1825 vaddr
= map_spbank(vaddr
, entry
);
1827 MKTRACE(("Now, vaddr=%08lx end_of_phys_memory=%08lx\n", vaddr
, end_of_phys_memory
));
1832 /* Step 5: Sanity check, make sure we did it all. */
1833 MKTRACE(("check_and_return: "));
1834 for(entry
= 0; sp_banks
[entry
].num_bytes
; entry
++) {
1835 MKTRACE(("e[%d]=%d ", entry
, etaken
[entry
]));
1836 if(!etaken
[entry
]) {
1837 MKTRACE(("oops\n"));
1838 memprobe_error("Some bank did not get mapped.\n");
1841 MKTRACE(("success\n"));
1842 init_task
.mm
->mmap
->vm_start
= page_offset
= low_base
;
1843 stack_top
= page_offset
- PAGE_SIZE
;
1844 BTFIXUPSET_SETHI(page_offset
, low_base
);
1845 BTFIXUPSET_SETHI(stack_top
, page_offset
- PAGE_SIZE
);
1846 BTFIXUPSET_SIMM13(user_ptrs_per_pgd
, page_offset
/ SRMMU_PGDIR_SIZE
);
1849 for(entry
= 0; srmmu_map
[entry
].size
; entry
++) {
1850 printk("[%d]: v[%08lx,%08lx](%lx) p[%08lx]\n", entry
,
1851 srmmu_map
[entry
].vbase
,
1852 srmmu_map
[entry
].vbase
+ srmmu_map
[entry
].size
,
1853 srmmu_map
[entry
].size
,
1854 srmmu_map
[entry
].pbase
);
1858 /* Now setup the p2v/v2p hash tables. */
1859 for(entry
= 0; entry
< SRMMU_HASHSZ
; entry
++)
1860 srmmu_v2p_hash
[entry
] = ((0xff - entry
) << 24);
1861 for(entry
= 0; entry
< SRMMU_HASHSZ
; entry
++)
1862 srmmu_p2v_hash
[entry
] = 0xffffffffUL
;
1863 for(entry
= 0; srmmu_map
[entry
].size
; entry
++) {
1866 for(addr
= srmmu_map
[entry
].vbase
;
1867 addr
< (srmmu_map
[entry
].vbase
+ srmmu_map
[entry
].size
);
1869 srmmu_v2p_hash
[srmmu_ahashfn(addr
)] =
1870 srmmu_map
[entry
].pbase
- srmmu_map
[entry
].vbase
;
1871 for(addr
= srmmu_map
[entry
].pbase
;
1872 addr
< (srmmu_map
[entry
].pbase
+ srmmu_map
[entry
].size
);
1874 srmmu_p2v_hash
[srmmu_ahashfn(addr
)] =
1875 srmmu_map
[entry
].pbase
- srmmu_map
[entry
].vbase
;
1878 BTFIXUPSET_SETHI(page_contig_offset
, page_offset
- (0xfd000000 - KERNBASE
));
1880 phys_mem_contig
= 0;
1882 phys_mem_contig
= 1;
1883 for(entry
= 0; srmmu_map
[entry
].size
; entry
++)
1884 if (srmmu_map
[entry
].pbase
!= srmmu_c_v2p (srmmu_map
[entry
].vbase
)) {
1885 phys_mem_contig
= 0;
1889 if (phys_mem_contig
) {
1890 printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes.\n");
1891 BTFIXUPSET_CALL(pte_page
, srmmu_c_pte_page
, BTFIXUPCALL_NORM
);
1892 BTFIXUPSET_CALL(pmd_page
, srmmu_c_pmd_page
, BTFIXUPCALL_NORM
);
1893 BTFIXUPSET_CALL(pgd_page
, srmmu_c_pgd_page
, BTFIXUPCALL_NORM
);
1894 BTFIXUPSET_CALL(mk_pte
, srmmu_c_mk_pte
, BTFIXUPCALL_NORM
);
1895 BTFIXUPSET_CALL(pte_offset
, srmmu_c_pte_offset
, BTFIXUPCALL_NORM
);
1896 BTFIXUPSET_CALL(pmd_offset
, srmmu_c_pmd_offset
, BTFIXUPCALL_NORM
);
1897 if (BTFIXUPVAL_CALL(ctxd_set
) == (unsigned long)srmmu_ctxd_set
)
1898 BTFIXUPSET_CALL(ctxd_set
, srmmu_c_ctxd_set
, BTFIXUPCALL_NORM
);
1899 BTFIXUPSET_CALL(pgd_set
, srmmu_c_pgd_set
, BTFIXUPCALL_NORM
);
1900 BTFIXUPSET_CALL(pmd_set
, srmmu_c_pmd_set
, BTFIXUPCALL_NORM
);
1901 BTFIXUPSET_CALL(mmu_v2p
, srmmu_c_v2p
, BTFIXUPCALL_NORM
);
1902 BTFIXUPSET_CALL(mmu_p2v
, srmmu_c_p2v
, BTFIXUPCALL_NORM
);
1903 if (BTFIXUPVAL_CALL(flush_chunk
) == (unsigned long)viking_flush_chunk
)
1904 BTFIXUPSET_CALL(flush_chunk
, viking_c_flush_chunk
, BTFIXUPCALL_NORM
);
1905 } else if (srmmu_low_pa
) {
1906 printk ("SRMMU: Compact physical memory. Using strightforward VA<->PA translations.\n");
1907 BTFIXUPSET_CALL(pte_page
, srmmu_s_pte_page
, BTFIXUPCALL_NORM
);
1908 BTFIXUPSET_CALL(pmd_page
, srmmu_s_pmd_page
, BTFIXUPCALL_NORM
);
1909 BTFIXUPSET_CALL(pgd_page
, srmmu_s_pgd_page
, BTFIXUPCALL_NORM
);
1910 BTFIXUPSET_CALL(mk_pte
, srmmu_s_mk_pte
, BTFIXUPCALL_NORM
);
1911 BTFIXUPSET_CALL(pte_offset
, srmmu_s_pte_offset
, BTFIXUPCALL_NORM
);
1912 BTFIXUPSET_CALL(pmd_offset
, srmmu_s_pmd_offset
, BTFIXUPCALL_NORM
);
1913 if (BTFIXUPVAL_CALL(ctxd_set
) == (unsigned long)srmmu_ctxd_set
)
1914 BTFIXUPSET_CALL(ctxd_set
, srmmu_s_ctxd_set
, BTFIXUPCALL_NORM
);
1915 BTFIXUPSET_CALL(pgd_set
, srmmu_s_pgd_set
, BTFIXUPCALL_NORM
);
1916 BTFIXUPSET_CALL(pmd_set
, srmmu_s_pmd_set
, BTFIXUPCALL_NORM
);
1917 BTFIXUPSET_CALL(mmu_v2p
, srmmu_s_v2p
, BTFIXUPCALL_NORM
);
1918 BTFIXUPSET_CALL(mmu_p2v
, srmmu_s_p2v
, BTFIXUPCALL_NORM
);
1919 if (BTFIXUPVAL_CALL(flush_chunk
) == (unsigned long)viking_flush_chunk
)
1920 BTFIXUPSET_CALL(flush_chunk
, viking_s_flush_chunk
, BTFIXUPCALL_NORM
);
1924 return; /* SUCCESS! */
1927 /* Paging initialization on the Sparc Reference MMU. */
1928 extern unsigned long free_area_init(unsigned long, unsigned long);
1929 extern unsigned long sparc_context_init(unsigned long, int);
1931 extern int physmem_mapped_contig
;
1932 extern int linux_num_cpus
;
1934 void (*poke_srmmu
)(void) __initdata
= NULL
;
1936 __initfunc(unsigned long srmmu_paging_init(unsigned long start_mem
, unsigned long end_mem
))
1938 unsigned long ptables_start
;
1942 sparc_iobase_vaddr
= 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
1943 physmem_mapped_contig
= 0; /* for init.c:taint_real_pages() */
1945 if (sparc_cpu_model
== sun4d
)
1946 num_contexts
= 65536; /* We know it is Viking */
1948 /* Find the number of contexts on the srmmu. */
1949 cpunode
= prom_getchild(prom_root_node
);
1951 while((cpunode
= prom_getsibling(cpunode
)) != 0) {
1952 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
1953 if(!strcmp(node_str
, "cpu")) {
1954 num_contexts
= prom_getintdefault(cpunode
, "mmu-nctx", 0x8);
1961 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1965 ptables_start
= mempool
= PAGE_ALIGN(start_mem
);
1966 memset(swapper_pg_dir
, 0, PAGE_SIZE
);
1967 kbpage
= srmmu_hwprobe(KERNBASE
+ PAGE_SIZE
);
1968 kbpage
= (kbpage
& SRMMU_PTE_PMASK
) << 4;
1969 kbpage
-= PAGE_SIZE
;
1971 srmmu_allocate_ptable_skeleton(KERNBASE
, end_mem
);
1973 srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr
, IOBASE_END
);
1974 srmmu_allocate_ptable_skeleton(DVMA_VADDR
, DVMA_END
);
1977 mempool
= PAGE_ALIGN(mempool
);
1978 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM
-PAGE_SIZE
));
1980 srmmu_context_table
= sparc_init_alloc(&mempool
, num_contexts
*sizeof(ctxd_t
));
1981 srmmu_ctx_table_phys
= (ctxd_t
*) srmmu_v2p((unsigned long) srmmu_context_table
);
1982 for(i
= 0; i
< num_contexts
; i
++)
1983 ctxd_set(&srmmu_context_table
[i
], swapper_pg_dir
);
1985 start_mem
= PAGE_ALIGN(mempool
);
1988 if(BTFIXUPVAL_CALL(flush_page_for_dma
) == (unsigned long)viking_flush_page
) {
1989 unsigned long start
= ptables_start
;
1990 unsigned long end
= start_mem
;
1992 while(start
< end
) {
1993 viking_flush_page(start
);
1997 srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys
);
2001 start_mem
= sparc_context_init(start_mem
, num_contexts
);
2002 start_mem
= free_area_init(start_mem
, end_mem
);
2004 return PAGE_ALIGN(start_mem
);
2007 static int srmmu_mmu_info(char *buf
)
2017 module_stats
.invall
,
2019 module_stats
.invrnge
,
2025 static void srmmu_update_mmu_cache(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
2029 static void srmmu_destroy_context(struct mm_struct
*mm
)
2031 if(mm
->context
!= NO_CONTEXT
&& atomic_read(&mm
->count
) == 1) {
2032 /* XXX This could be drastically improved.
2033 * XXX We are only called from __exit_mm and it just did
2034 * XXX cache/tlb mm flush and right after this will (re-)
2035 * XXX SET_PAGE_DIR to swapper_pg_dir. -DaveM
2038 ctxd_set(&srmmu_context_table
[mm
->context
], swapper_pg_dir
);
2040 free_context(mm
->context
);
2041 mm
->context
= NO_CONTEXT
;
2045 static void srmmu_vac_update_mmu_cache(struct vm_area_struct
* vma
,
2046 unsigned long address
, pte_t pte
)
2048 if((vma
->vm_flags
& (VM_WRITE
|VM_SHARED
)) == (VM_WRITE
|VM_SHARED
)) {
2049 struct vm_area_struct
*vmaring
;
2051 struct inode
*inode
;
2052 unsigned long flags
, offset
, vaddr
, start
;
2053 int alias_found
= 0;
2058 __save_and_cli(flags
);
2060 file
= vma
->vm_file
;
2063 inode
= file
->f_dentry
->d_inode
;
2064 offset
= (address
& PAGE_MASK
) - vma
->vm_start
;
2065 vmaring
= inode
->i_mmap
;
2067 /* Do not mistake ourselves as another mapping. */
2071 vaddr
= vmaring
->vm_start
+ offset
;
2072 if ((vaddr
^ address
) & vac_badbits
) {
2074 start
= vmaring
->vm_start
;
2075 while (start
< vmaring
->vm_end
) {
2076 pgdp
= srmmu_pgd_offset(vmaring
->vm_mm
, start
);
2077 if(!pgdp
) goto next
;
2078 pmdp
= srmmu_pmd_offset(pgdp
, start
);
2079 if(!pmdp
) goto next
;
2080 ptep
= srmmu_pte_offset(pmdp
, start
);
2081 if(!ptep
) goto next
;
2083 if((pte_val(*ptep
) & SRMMU_ET_MASK
) == SRMMU_VALID
) {
2085 printk("Fixing USER/USER alias [%ld:%08lx]\n",
2086 vmaring
->vm_mm
->context
, start
);
2088 flush_cache_page(vmaring
, start
);
2089 set_pte(ptep
, __pte((pte_val(*ptep
) &
2091 flush_tlb_page(vmaring
, start
);
2097 } while ((vmaring
= vmaring
->vm_next_share
) != NULL
);
2099 if(alias_found
&& ((pte_val(pte
) & SRMMU_CACHE
) != 0)) {
2100 pgdp
= srmmu_pgd_offset(vma
->vm_mm
, address
);
2101 pmdp
= srmmu_pmd_offset(pgdp
, address
);
2102 ptep
= srmmu_pte_offset(pmdp
, address
);
2103 flush_cache_page(vma
, address
);
2104 set_pte(ptep
, __pte((pte_val(*ptep
) & ~SRMMU_CACHE
)));
2105 flush_tlb_page(vma
, address
);
2108 __restore_flags(flags
);
2112 static void hypersparc_destroy_context(struct mm_struct
*mm
)
2114 if(mm
->context
!= NO_CONTEXT
&& atomic_read(&mm
->count
) == 1) {
2117 /* HyperSparc is copy-back, any data for this
2118 * process in a modified cache line is stale
2119 * and must be written back to main memory now
2120 * else we eat shit later big time.
2124 ctxp
= &srmmu_context_table
[mm
->context
];
2125 srmmu_set_entry((pte_t
*)ctxp
, __pte((SRMMU_ET_PTD
| (srmmu_v2p((unsigned long) swapper_pg_dir
) >> 4))));
2126 hypersparc_flush_page_to_ram((unsigned long)ctxp
);
2129 free_context(mm
->context
);
2130 mm
->context
= NO_CONTEXT
;
2134 /* Init various srmmu chip types. */
2135 __initfunc(static void srmmu_is_bad(void))
2137 prom_printf("Could not determine SRMMU chip type.\n");
2141 __initfunc(static void init_vac_layout(void))
2143 int nd
, cache_lines
;
2147 unsigned long max_size
= 0;
2148 unsigned long min_line_size
= 0x10000000;
2151 nd
= prom_getchild(prom_root_node
);
2152 while((nd
= prom_getsibling(nd
)) != 0) {
2153 prom_getstring(nd
, "device_type", node_str
, sizeof(node_str
));
2154 if(!strcmp(node_str
, "cpu")) {
2155 vac_line_size
= prom_getint(nd
, "cache-line-size");
2156 if (vac_line_size
== -1) {
2157 prom_printf("can't determine cache-line-size, "
2161 cache_lines
= prom_getint(nd
, "cache-nlines");
2162 if (cache_lines
== -1) {
2163 prom_printf("can't determine cache-nlines, halting.\n");
2167 vac_cache_size
= cache_lines
* vac_line_size
;
2168 vac_badbits
= (vac_cache_size
- 1) & PAGE_MASK
;
2170 if(vac_cache_size
> max_size
)
2171 max_size
= vac_cache_size
;
2172 if(vac_line_size
< min_line_size
)
2173 min_line_size
= vac_line_size
;
2175 if(cpu
== smp_num_cpus
)
2183 prom_printf("No CPU nodes found, halting.\n");
2187 vac_cache_size
= max_size
;
2188 vac_line_size
= min_line_size
;
2189 vac_badbits
= (vac_cache_size
- 1) & PAGE_MASK
;
2191 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
2192 (int)vac_cache_size
, (int)vac_line_size
);
2195 __initfunc(static void poke_hypersparc(void))
2197 volatile unsigned long clear
;
2198 unsigned long mreg
= srmmu_get_mmureg();
2200 hyper_flush_unconditional_combined();
2202 mreg
&= ~(HYPERSPARC_CWENABLE
);
2203 mreg
|= (HYPERSPARC_CENABLE
| HYPERSPARC_WBENABLE
);
2204 mreg
|= (HYPERSPARC_CMODE
);
2206 srmmu_set_mmureg(mreg
);
2208 #if 0 /* I think this is bad news... -DaveM */
2209 hyper_clear_all_tags();
2212 put_ross_icr(HYPERSPARC_ICCR_FTD
| HYPERSPARC_ICCR_ICE
);
2213 hyper_flush_whole_icache();
2214 clear
= srmmu_get_faddr();
2215 clear
= srmmu_get_fstatus();
2218 __initfunc(static void init_hypersparc(void))
2220 srmmu_name
= "ROSS HyperSparc";
2224 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_nocache_hyper
, BTFIXUPCALL_NORM
);
2225 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_NORM
);
2226 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_NORM
);
2227 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_NORM
);
2228 BTFIXUPSET_CALL(flush_cache_all
, hypersparc_flush_cache_all
, BTFIXUPCALL_NORM
);
2229 BTFIXUPSET_CALL(flush_cache_mm
, hypersparc_flush_cache_mm
, BTFIXUPCALL_NORM
);
2230 BTFIXUPSET_CALL(flush_cache_range
, hypersparc_flush_cache_range
, BTFIXUPCALL_NORM
);
2231 BTFIXUPSET_CALL(flush_cache_page
, hypersparc_flush_cache_page
, BTFIXUPCALL_NORM
);
2233 BTFIXUPSET_CALL(flush_tlb_all
, hypersparc_flush_tlb_all
, BTFIXUPCALL_NORM
);
2234 BTFIXUPSET_CALL(flush_tlb_mm
, hypersparc_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2235 BTFIXUPSET_CALL(flush_tlb_range
, hypersparc_flush_tlb_range
, BTFIXUPCALL_NORM
);
2236 BTFIXUPSET_CALL(flush_tlb_page
, hypersparc_flush_tlb_page
, BTFIXUPCALL_NORM
);
2238 BTFIXUPSET_CALL(flush_page_to_ram
, hypersparc_flush_page_to_ram
, BTFIXUPCALL_NORM
);
2239 BTFIXUPSET_CALL(flush_sig_insns
, hypersparc_flush_sig_insns
, BTFIXUPCALL_NORM
);
2240 BTFIXUPSET_CALL(flush_page_for_dma
, hypersparc_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2242 BTFIXUPSET_CALL(flush_chunk
, hypersparc_flush_chunk
, BTFIXUPCALL_NORM
); /* local flush _only_ */
2244 BTFIXUPSET_CALL(ctxd_set
, hypersparc_ctxd_set
, BTFIXUPCALL_NORM
);
2245 BTFIXUPSET_CALL(switch_to_context
, hypersparc_switch_to_context
, BTFIXUPCALL_NORM
);
2246 BTFIXUPSET_CALL(init_new_context
, hypersparc_init_new_context
, BTFIXUPCALL_NORM
);
2247 BTFIXUPSET_CALL(destroy_context
, hypersparc_destroy_context
, BTFIXUPCALL_NORM
);
2248 BTFIXUPSET_CALL(update_mmu_cache
, srmmu_vac_update_mmu_cache
, BTFIXUPCALL_NORM
);
2249 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, hypersparc_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2250 poke_srmmu
= poke_hypersparc
;
2252 hypersparc_setup_blockops();
2255 __initfunc(static void poke_cypress(void))
2257 unsigned long mreg
= srmmu_get_mmureg();
2258 unsigned long faddr
, tagval
;
2259 volatile unsigned long cypress_sucks
;
2260 volatile unsigned long clear
;
2262 clear
= srmmu_get_faddr();
2263 clear
= srmmu_get_fstatus();
2265 if (!(mreg
& CYPRESS_CENABLE
)) {
2266 for(faddr
= 0x0; faddr
< 0x10000; faddr
+= 20) {
2267 __asm__
__volatile__("sta %%g0, [%0 + %1] %2\n\t"
2268 "sta %%g0, [%0] %2\n\t" : :
2269 "r" (faddr
), "r" (0x40000),
2270 "i" (ASI_M_DATAC_TAG
));
2273 for(faddr
= 0; faddr
< 0x10000; faddr
+= 0x20) {
2274 __asm__
__volatile__("lda [%1 + %2] %3, %0\n\t" :
2276 "r" (faddr
), "r" (0x40000),
2277 "i" (ASI_M_DATAC_TAG
));
2279 /* If modified and valid, kick it. */
2280 if((tagval
& 0x60) == 0x60)
2281 cypress_sucks
= *(unsigned long *)
2282 (0xf0020000 + faddr
);
2286 /* And one more, for our good neighbor, Mr. Broken Cypress. */
2287 clear
= srmmu_get_faddr();
2288 clear
= srmmu_get_fstatus();
2290 mreg
|= (CYPRESS_CENABLE
| CYPRESS_CMODE
);
2291 srmmu_set_mmureg(mreg
);
2294 __initfunc(static void init_cypress_common(void))
2298 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_nocache_cypress
, BTFIXUPCALL_NORM
);
2299 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_NORM
);
2300 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_NORM
);
2301 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_NORM
);
2302 BTFIXUPSET_CALL(flush_cache_all
, cypress_flush_cache_all
, BTFIXUPCALL_NORM
);
2303 BTFIXUPSET_CALL(flush_cache_mm
, cypress_flush_cache_mm
, BTFIXUPCALL_NORM
);
2304 BTFIXUPSET_CALL(flush_cache_range
, cypress_flush_cache_range
, BTFIXUPCALL_NORM
);
2305 BTFIXUPSET_CALL(flush_cache_page
, cypress_flush_cache_page
, BTFIXUPCALL_NORM
);
2307 BTFIXUPSET_CALL(flush_tlb_all
, cypress_flush_tlb_all
, BTFIXUPCALL_NORM
);
2308 BTFIXUPSET_CALL(flush_tlb_mm
, cypress_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2309 BTFIXUPSET_CALL(flush_tlb_page
, cypress_flush_tlb_page
, BTFIXUPCALL_NORM
);
2310 BTFIXUPSET_CALL(flush_tlb_range
, cypress_flush_tlb_range
, BTFIXUPCALL_NORM
);
2312 BTFIXUPSET_CALL(flush_chunk
, cypress_flush_chunk
, BTFIXUPCALL_NORM
); /* local flush _only_ */
2314 BTFIXUPSET_CALL(flush_page_to_ram
, cypress_flush_page_to_ram
, BTFIXUPCALL_NORM
);
2315 BTFIXUPSET_CALL(flush_sig_insns
, cypress_flush_sig_insns
, BTFIXUPCALL_NOP
);
2316 BTFIXUPSET_CALL(flush_page_for_dma
, cypress_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2317 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, cypress_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2319 BTFIXUPSET_CALL(update_mmu_cache
, srmmu_vac_update_mmu_cache
, BTFIXUPCALL_NORM
);
2320 poke_srmmu
= poke_cypress
;
2323 __initfunc(static void init_cypress_604(void))
2325 srmmu_name
= "ROSS Cypress-604(UP)";
2326 srmmu_modtype
= Cypress
;
2327 init_cypress_common();
2330 __initfunc(static void init_cypress_605(unsigned long mrev
))
2332 srmmu_name
= "ROSS Cypress-605(MP)";
2334 srmmu_modtype
= Cypress_vE
;
2335 hwbug_bitmask
|= HWBUG_COPYBACK_BROKEN
;
2338 srmmu_modtype
= Cypress_vD
;
2339 hwbug_bitmask
|= HWBUG_ASIFLUSH_BROKEN
;
2341 srmmu_modtype
= Cypress
;
2344 init_cypress_common();
2347 __initfunc(static void poke_swift(void))
2349 unsigned long mreg
= srmmu_get_mmureg();
2351 /* Clear any crap from the cache or else... */
2352 swift_idflash_clear();
2353 mreg
|= (SWIFT_IE
| SWIFT_DE
); /* I & D caches on */
2355 /* The Swift branch folding logic is completely broken. At
2356 * trap time, if things are just right, if can mistakenly
2357 * think that a trap is coming from kernel mode when in fact
2358 * it is coming from user mode (it mis-executes the branch in
2359 * the trap code). So you see things like crashme completely
2360 * hosing your machine which is completely unacceptable. Turn
2361 * this shit off... nice job Fujitsu.
2363 mreg
&= ~(SWIFT_BF
);
2364 srmmu_set_mmureg(mreg
);
2367 #define SWIFT_MASKID_ADDR 0x10003018
2368 __initfunc(static void init_swift(void))
2370 unsigned long swift_rev
;
2372 __asm__
__volatile__("lda [%1] %2, %0\n\t"
2373 "srl %0, 0x18, %0\n\t" :
2375 "r" (SWIFT_MASKID_ADDR
), "i" (ASI_M_BYPASS
));
2376 srmmu_name
= "Fujitsu Swift";
2382 srmmu_modtype
= Swift_lots_o_bugs
;
2383 hwbug_bitmask
|= (HWBUG_KERN_ACCBROKEN
| HWBUG_KERN_CBITBROKEN
);
2384 /* Gee george, I wonder why Sun is so hush hush about
2385 * this hardware bug... really braindamage stuff going
2386 * on here. However I think we can find a way to avoid
2387 * all of the workaround overhead under Linux. Basically,
2388 * any page fault can cause kernel pages to become user
2389 * accessible (the mmu gets confused and clears some of
2390 * the ACC bits in kernel ptes). Aha, sounds pretty
2391 * horrible eh? But wait, after extensive testing it appears
2392 * that if you use pgd_t level large kernel pte's (like the
2393 * 4MB pages on the Pentium) the bug does not get tripped
2394 * at all. This avoids almost all of the major overhead.
2395 * Welcome to a world where your vendor tells you to,
2396 * "apply this kernel patch" instead of "sorry for the
2397 * broken hardware, send it back and we'll give you
2398 * properly functioning parts"
2403 srmmu_modtype
= Swift_bad_c
;
2404 hwbug_bitmask
|= HWBUG_KERN_CBITBROKEN
;
2405 /* You see Sun allude to this hardware bug but never
2406 * admit things directly, they'll say things like,
2407 * "the Swift chip cache problems" or similar.
2411 srmmu_modtype
= Swift_ok
;
2415 BTFIXUPSET_CALL(flush_cache_all
, swift_flush_cache_all
, BTFIXUPCALL_NORM
);
2416 BTFIXUPSET_CALL(flush_cache_mm
, swift_flush_cache_mm
, BTFIXUPCALL_NORM
);
2417 BTFIXUPSET_CALL(flush_cache_page
, swift_flush_cache_page
, BTFIXUPCALL_NORM
);
2418 BTFIXUPSET_CALL(flush_cache_range
, swift_flush_cache_range
, BTFIXUPCALL_NORM
);
2420 BTFIXUPSET_CALL(flush_chunk
, swift_flush_chunk
, BTFIXUPCALL_NOP
); /* local flush _only_ */
2422 BTFIXUPSET_CALL(flush_tlb_all
, swift_flush_tlb_all
, BTFIXUPCALL_NORM
);
2423 BTFIXUPSET_CALL(flush_tlb_mm
, swift_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2424 BTFIXUPSET_CALL(flush_tlb_page
, swift_flush_tlb_page
, BTFIXUPCALL_NORM
);
2425 BTFIXUPSET_CALL(flush_tlb_range
, swift_flush_tlb_range
, BTFIXUPCALL_NORM
);
2427 BTFIXUPSET_CALL(flush_page_to_ram
, swift_flush_page_to_ram
, BTFIXUPCALL_NOP
);
2428 BTFIXUPSET_CALL(flush_sig_insns
, swift_flush_sig_insns
, BTFIXUPCALL_NORM
);
2429 BTFIXUPSET_CALL(flush_page_for_dma
, swift_flush_page_for_dma
, BTFIXUPCALL_NORM
);
2431 BTFIXUPSET_CALL(update_mmu_cache
, swift_update_mmu_cache
, BTFIXUPCALL_NORM
);
2433 /* Are you now convinced that the Swift is one of the
2434 * biggest VLSI abortions of all time? Bravo Fujitsu!
2435 * Fujitsu, the !#?!%$'d up processor people. I bet if
2436 * you examined the microcode of the Swift you'd find
2437 * XXX's all over the place.
2439 poke_srmmu
= poke_swift
;
2442 static void turbosparc_flush_cache_all(void)
2444 flush_user_windows();
2445 turbosparc_idflash_clear();
2448 static void turbosparc_flush_cache_mm(struct mm_struct
*mm
)
2451 flush_user_windows();
2452 turbosparc_idflash_clear();
2456 static void turbosparc_flush_cache_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
2459 flush_user_windows();
2460 turbosparc_idflash_clear();
2464 static void turbosparc_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
2466 FLUSH_BEGIN(vma
->vm_mm
)
2467 flush_user_windows();
2468 if (vma
->vm_flags
& VM_EXEC
)
2469 turbosparc_flush_icache();
2470 turbosparc_flush_dcache();
2474 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
2475 static void turbosparc_flush_page_to_ram(unsigned long page
)
2477 #ifdef TURBOSPARC_WRITEBACK
2478 volatile unsigned long clear
;
2480 if (srmmu_hwprobe(page
))
2481 turbosparc_flush_page_cache(page
);
2482 clear
= srmmu_get_fstatus();
2486 static void turbosparc_flush_sig_insns(struct mm_struct
*mm
, unsigned long insn_addr
)
2490 static void turbosparc_flush_page_for_dma(unsigned long page
)
2492 turbosparc_flush_dcache();
2495 static void turbosparc_flush_chunk(unsigned long chunk
)
2499 static void turbosparc_flush_tlb_all(void)
2501 srmmu_flush_whole_tlb();
2502 module_stats
.invall
++;
2505 static void turbosparc_flush_tlb_mm(struct mm_struct
*mm
)
2508 srmmu_flush_whole_tlb();
2509 module_stats
.invmm
++;
2513 static void turbosparc_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
2516 srmmu_flush_whole_tlb();
2517 module_stats
.invrnge
++;
2521 static void turbosparc_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
2523 FLUSH_BEGIN(vma
->vm_mm
)
2524 srmmu_flush_whole_tlb();
2525 module_stats
.invpg
++;
2530 __initfunc(static void poke_turbosparc(void))
2532 unsigned long mreg
= srmmu_get_mmureg();
2533 unsigned long ccreg
;
2535 /* Clear any crap from the cache or else... */
2536 turbosparc_flush_cache_all();
2537 mreg
&= ~(TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* Temporarily disable I & D caches */
2538 mreg
&= ~(TURBOSPARC_PCENABLE
); /* Don't check parity */
2539 srmmu_set_mmureg(mreg
);
2541 ccreg
= turbosparc_get_ccreg();
2543 #ifdef TURBOSPARC_WRITEBACK
2544 ccreg
|= (TURBOSPARC_SNENABLE
); /* Do DVMA snooping in Dcache */
2545 ccreg
&= ~(TURBOSPARC_uS2
| TURBOSPARC_WTENABLE
);
2546 /* Write-back D-cache, emulate VLSI
2547 * abortion number three, not number one */
2549 /* For now let's play safe, optimize later */
2550 ccreg
|= (TURBOSPARC_SNENABLE
| TURBOSPARC_WTENABLE
);
2551 /* Do DVMA snooping in Dcache, Write-thru D-cache */
2552 ccreg
&= ~(TURBOSPARC_uS2
);
2553 /* Emulate VLSI abortion number three, not number one */
2556 switch (ccreg
& 7) {
2557 case 0: /* No SE cache */
2558 case 7: /* Test mode */
2561 ccreg
|= (TURBOSPARC_SCENABLE
);
2563 turbosparc_set_ccreg (ccreg
);
2565 mreg
|= (TURBOSPARC_ICENABLE
| TURBOSPARC_DCENABLE
); /* I & D caches on */
2566 mreg
|= (TURBOSPARC_ICSNOOP
); /* Icache snooping on */
2567 srmmu_set_mmureg(mreg
);
2570 __initfunc(static void init_turbosparc(void))
2572 srmmu_name
= "Fujitsu TurboSparc";
2573 srmmu_modtype
= TurboSparc
;
2575 BTFIXUPSET_CALL(flush_cache_all
, turbosparc_flush_cache_all
, BTFIXUPCALL_NORM
);
2576 BTFIXUPSET_CALL(flush_cache_mm
, turbosparc_flush_cache_mm
, BTFIXUPCALL_NORM
);
2577 BTFIXUPSET_CALL(flush_cache_page
, turbosparc_flush_cache_page
, BTFIXUPCALL_NORM
);
2578 BTFIXUPSET_CALL(flush_cache_range
, turbosparc_flush_cache_range
, BTFIXUPCALL_NORM
);
2580 BTFIXUPSET_CALL(flush_tlb_all
, turbosparc_flush_tlb_all
, BTFIXUPCALL_NORM
);
2581 BTFIXUPSET_CALL(flush_tlb_mm
, turbosparc_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2582 BTFIXUPSET_CALL(flush_tlb_page
, turbosparc_flush_tlb_page
, BTFIXUPCALL_NORM
);
2583 BTFIXUPSET_CALL(flush_tlb_range
, turbosparc_flush_tlb_range
, BTFIXUPCALL_NORM
);
2585 BTFIXUPSET_CALL(flush_page_to_ram
, turbosparc_flush_page_to_ram
, BTFIXUPCALL_NORM
);
2586 BTFIXUPSET_CALL(flush_chunk
, turbosparc_flush_chunk
, BTFIXUPCALL_NORM
);
2588 BTFIXUPSET_CALL(flush_sig_insns
, turbosparc_flush_sig_insns
, BTFIXUPCALL_NOP
);
2589 BTFIXUPSET_CALL(flush_page_for_dma
, turbosparc_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2591 poke_srmmu
= poke_turbosparc
;
2594 __initfunc(static void poke_tsunami(void))
2596 unsigned long mreg
= srmmu_get_mmureg();
2598 tsunami_flush_icache();
2599 tsunami_flush_dcache();
2600 mreg
&= ~TSUNAMI_ITD
;
2601 mreg
|= (TSUNAMI_IENAB
| TSUNAMI_DENAB
);
2602 srmmu_set_mmureg(mreg
);
2605 __initfunc(static void init_tsunami(void))
2607 /* Tsunami's pretty sane, Sun and TI actually got it
2608 * somewhat right this time. Fujitsu should have
2609 * taken some lessons from them.
2612 srmmu_name
= "TI Tsunami";
2613 srmmu_modtype
= Tsunami
;
2615 BTFIXUPSET_CALL(flush_cache_all
, tsunami_flush_cache_all
, BTFIXUPCALL_NORM
);
2616 BTFIXUPSET_CALL(flush_cache_mm
, tsunami_flush_cache_mm
, BTFIXUPCALL_NORM
);
2617 BTFIXUPSET_CALL(flush_cache_page
, tsunami_flush_cache_page
, BTFIXUPCALL_NORM
);
2618 BTFIXUPSET_CALL(flush_cache_range
, tsunami_flush_cache_range
, BTFIXUPCALL_NORM
);
2620 BTFIXUPSET_CALL(flush_chunk
, tsunami_flush_chunk
, BTFIXUPCALL_NOP
); /* local flush _only_ */
2622 BTFIXUPSET_CALL(flush_tlb_all
, tsunami_flush_tlb_all
, BTFIXUPCALL_NORM
);
2623 BTFIXUPSET_CALL(flush_tlb_mm
, tsunami_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2624 BTFIXUPSET_CALL(flush_tlb_page
, tsunami_flush_tlb_page
, BTFIXUPCALL_NORM
);
2625 BTFIXUPSET_CALL(flush_tlb_range
, tsunami_flush_tlb_range
, BTFIXUPCALL_NORM
);
2627 BTFIXUPSET_CALL(flush_page_to_ram
, tsunami_flush_page_to_ram
, BTFIXUPCALL_NOP
);
2628 BTFIXUPSET_CALL(flush_sig_insns
, tsunami_flush_sig_insns
, BTFIXUPCALL_NORM
);
2629 BTFIXUPSET_CALL(flush_page_for_dma
, tsunami_flush_page_for_dma
, BTFIXUPCALL_NORM
);
2631 poke_srmmu
= poke_tsunami
;
2634 __initfunc(static void poke_viking(void))
2636 unsigned long mreg
= srmmu_get_mmureg();
2637 static int smp_catch
= 0;
2639 if(viking_mxcc_present
) {
2640 unsigned long mxcc_control
= mxcc_get_creg();
2642 mxcc_control
|= (MXCC_CTL_ECE
| MXCC_CTL_PRE
| MXCC_CTL_MCE
);
2643 mxcc_control
&= ~(MXCC_CTL_RRC
);
2644 mxcc_set_creg(mxcc_control
);
2646 /* We don't need memory parity checks.
2647 * XXX This is a mess, have to dig out later. ecd.
2648 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
2651 /* We do cache ptables on MXCC. */
2652 mreg
|= VIKING_TCENABLE
;
2654 unsigned long bpreg
;
2656 mreg
&= ~(VIKING_TCENABLE
);
2658 /* Must disable mixed-cmd mode here for
2661 bpreg
= viking_get_bpreg();
2662 bpreg
&= ~(VIKING_ACTION_MIX
);
2663 viking_set_bpreg(bpreg
);
2665 /* Just in case PROM does something funny. */
2670 mreg
|= VIKING_SPENABLE
;
2671 mreg
|= (VIKING_ICENABLE
| VIKING_DCENABLE
);
2672 mreg
|= VIKING_SBENABLE
;
2673 mreg
&= ~(VIKING_ACENABLE
);
2674 srmmu_set_mmureg(mreg
);
2677 /* Avoid unnecessary cross calls. */
2678 BTFIXUPCOPY_CALL(flush_cache_all
, local_flush_cache_all
);
2679 BTFIXUPCOPY_CALL(flush_cache_mm
, local_flush_cache_mm
);
2680 BTFIXUPCOPY_CALL(flush_cache_range
, local_flush_cache_range
);
2681 BTFIXUPCOPY_CALL(flush_cache_page
, local_flush_cache_page
);
2682 BTFIXUPCOPY_CALL(flush_page_to_ram
, local_flush_page_to_ram
);
2683 BTFIXUPCOPY_CALL(flush_sig_insns
, local_flush_sig_insns
);
2684 BTFIXUPCOPY_CALL(flush_page_for_dma
, local_flush_page_for_dma
);
2689 __initfunc(static void init_viking(void))
2691 unsigned long mreg
= srmmu_get_mmureg();
2693 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
2694 if(mreg
& VIKING_MMODE
) {
2695 srmmu_name
= "TI Viking";
2696 viking_mxcc_present
= 0;
2699 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_nocache_viking
, BTFIXUPCALL_NORM
);
2700 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_NORM
);
2701 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_NORM
);
2702 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_NORM
);
2703 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, viking_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2705 BTFIXUPSET_CALL(flush_chunk
, viking_flush_chunk
, BTFIXUPCALL_NORM
); /* local flush _only_ */
2707 /* We need this to make sure old viking takes no hits
2708 * on it's cache for dma snoops to workaround the
2709 * "load from non-cacheable memory" interrupt bug.
2710 * This is only necessary because of the new way in
2711 * which we use the IOMMU.
2713 BTFIXUPSET_CALL(flush_page_for_dma
, viking_flush_page
, BTFIXUPCALL_NORM
);
2714 /* Also, this is so far the only chip which actually uses
2715 the page argument to flush_page_for_dma */
2716 flush_page_for_dma_global
= 0;
2718 srmmu_name
= "TI Viking/MXCC";
2719 viking_mxcc_present
= 1;
2721 BTFIXUPSET_CALL(flush_chunk
, viking_mxcc_flush_chunk
, BTFIXUPCALL_NOP
); /* local flush _only_ */
2723 /* MXCC vikings lack the DMA snooping bug. */
2724 BTFIXUPSET_CALL(flush_page_for_dma
, viking_flush_page_for_dma
, BTFIXUPCALL_NOP
);
2727 BTFIXUPSET_CALL(flush_cache_all
, viking_flush_cache_all
, BTFIXUPCALL_NORM
);
2728 BTFIXUPSET_CALL(flush_cache_mm
, viking_flush_cache_mm
, BTFIXUPCALL_NORM
);
2729 BTFIXUPSET_CALL(flush_cache_page
, viking_flush_cache_page
, BTFIXUPCALL_NORM
);
2730 BTFIXUPSET_CALL(flush_cache_range
, viking_flush_cache_range
, BTFIXUPCALL_NORM
);
2733 if (sparc_cpu_model
== sun4d
) {
2734 BTFIXUPSET_CALL(flush_tlb_all
, sun4dsmp_flush_tlb_all
, BTFIXUPCALL_NORM
);
2735 BTFIXUPSET_CALL(flush_tlb_mm
, sun4dsmp_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2736 BTFIXUPSET_CALL(flush_tlb_page
, sun4dsmp_flush_tlb_page
, BTFIXUPCALL_NORM
);
2737 BTFIXUPSET_CALL(flush_tlb_range
, sun4dsmp_flush_tlb_range
, BTFIXUPCALL_NORM
);
2741 BTFIXUPSET_CALL(flush_tlb_all
, viking_flush_tlb_all
, BTFIXUPCALL_NORM
);
2742 BTFIXUPSET_CALL(flush_tlb_mm
, viking_flush_tlb_mm
, BTFIXUPCALL_NORM
);
2743 BTFIXUPSET_CALL(flush_tlb_page
, viking_flush_tlb_page
, BTFIXUPCALL_NORM
);
2744 BTFIXUPSET_CALL(flush_tlb_range
, viking_flush_tlb_range
, BTFIXUPCALL_NORM
);
2747 BTFIXUPSET_CALL(flush_page_to_ram
, viking_flush_page_to_ram
, BTFIXUPCALL_NOP
);
2748 BTFIXUPSET_CALL(flush_sig_insns
, viking_flush_sig_insns
, BTFIXUPCALL_NOP
);
2750 poke_srmmu
= poke_viking
;
2753 /* Probe for the srmmu chip version. */
2754 __initfunc(static void get_srmmu_type(void))
2756 unsigned long mreg
, psr
;
2757 unsigned long mod_typ
, mod_rev
, psr_typ
, psr_vers
;
2759 srmmu_modtype
= SRMMU_INVAL_MOD
;
2762 mreg
= srmmu_get_mmureg(); psr
= get_psr();
2763 mod_typ
= (mreg
& 0xf0000000) >> 28;
2764 mod_rev
= (mreg
& 0x0f000000) >> 24;
2765 psr_typ
= (psr
>> 28) & 0xf;
2766 psr_vers
= (psr
>> 24) & 0xf;
2768 /* First, check for HyperSparc or Cypress. */
2772 /* UP or MP Hypersparc */
2777 /* Uniprocessor Cypress */
2783 /* _REALLY OLD_ Cypress MP chips... */
2787 /* MP Cypress mmu/cache-controller */
2788 init_cypress_605(mod_rev
);
2791 /* Some other Cypress revision, assume a 605. */
2792 init_cypress_605(mod_rev
);
2798 /* Now Fujitsu TurboSparc. It might happen that it is
2799 in Swift emulation mode, so we will check later... */
2800 if (psr_typ
== 0 && psr_vers
== 5) {
2805 /* Next check for Fujitsu Swift. */
2806 if(psr_typ
== 0 && psr_vers
== 4) {
2810 /* Look if it is not a TurboSparc emulating Swift... */
2811 cpunode
= prom_getchild(prom_root_node
);
2812 while((cpunode
= prom_getsibling(cpunode
)) != 0) {
2813 prom_getstring(cpunode
, "device_type", node_str
, sizeof(node_str
));
2814 if(!strcmp(node_str
, "cpu")) {
2815 if (!prom_getintdefault(cpunode
, "psr-implementation", 1) &&
2816 prom_getintdefault(cpunode
, "psr-version", 1) == 5) {
2828 /* Now the Viking family of srmmu. */
2831 ((psr_vers
== 1) && (mod_typ
== 0) && (mod_rev
== 0)))) {
2836 /* Finally the Tsunami. */
2837 if(psr_typ
== 4 && psr_vers
== 1 && (mod_typ
|| mod_rev
)) {
2846 static int srmmu_check_pgt_cache(int low
, int high
)
2848 struct page
*page
, *page2
;
2851 if (pgtable_cache_size
> high
) {
2852 spin_lock(&pte_spinlock
);
2853 for (page2
= NULL
, page
= (struct page
*)pte_quicklist
; page
;) {
2854 if ((unsigned int)page
->pprev_hash
== 0xffff) {
2856 page2
->next_hash
= page
->next_hash
;
2858 (struct page
*)pte_quicklist
= page
->next_hash
;
2859 page
->next_hash
= NULL
;
2860 page
->pprev_hash
= NULL
;
2861 pgtable_cache_size
-= 16;
2865 page
= page2
->next_hash
;
2867 page
= (struct page
*)pte_quicklist
;
2868 if (pgtable_cache_size
<= low
)
2873 page
= page
->next_hash
;
2875 spin_unlock(&pte_spinlock
);
2877 if (pgd_cache_size
> high
/ 4) {
2878 spin_lock(&pgd_spinlock
);
2879 for (page2
= NULL
, page
= (struct page
*)pgd_quicklist
; page
;) {
2880 if ((unsigned int)page
->pprev_hash
== 0xf) {
2882 page2
->next_hash
= page
->next_hash
;
2884 (struct page
*)pgd_quicklist
= page
->next_hash
;
2885 page
->next_hash
= NULL
;
2886 page
->pprev_hash
= NULL
;
2887 pgd_cache_size
-= 4;
2891 page
= page2
->next_hash
;
2893 page
= (struct page
*)pgd_quicklist
;
2894 if (pgd_cache_size
<= low
/ 4)
2899 page
= page
->next_hash
;
2901 spin_unlock(&pgd_spinlock
);
2906 extern unsigned long spwin_mmu_patchme
, fwin_mmu_patchme
,
2907 tsetup_mmu_patchme
, rtrap_mmu_patchme
;
2909 extern unsigned long spwin_srmmu_stackchk
, srmmu_fwin_stackchk
,
2910 tsetup_srmmu_stackchk
, srmmu_rett_stackchk
;
2912 extern unsigned long srmmu_fault
;
2914 #define PATCH_BRANCH(insn, dest) do { \
2917 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2920 __initfunc(static void patch_window_trap_handlers(void))
2922 unsigned long *iaddr
, *daddr
;
2924 PATCH_BRANCH(spwin_mmu_patchme
, spwin_srmmu_stackchk
);
2925 PATCH_BRANCH(fwin_mmu_patchme
, srmmu_fwin_stackchk
);
2926 PATCH_BRANCH(tsetup_mmu_patchme
, tsetup_srmmu_stackchk
);
2927 PATCH_BRANCH(rtrap_mmu_patchme
, srmmu_rett_stackchk
);
2928 PATCH_BRANCH(sparc_ttable
[SP_TRAP_TFLT
].inst_three
, srmmu_fault
);
2929 PATCH_BRANCH(sparc_ttable
[SP_TRAP_DFLT
].inst_three
, srmmu_fault
);
2930 PATCH_BRANCH(sparc_ttable
[SP_TRAP_DACC
].inst_three
, srmmu_fault
);
2934 /* Local cross-calls. */
2935 static void smp_flush_page_for_dma(unsigned long page
)
2937 xc1((smpfunc_t
) BTFIXUP_CALL(local_flush_page_for_dma
), page
);
2942 /* Load up routines and constants for sun4m and sun4d mmu */
2943 __initfunc(void ld_mmu_srmmu(void))
2945 extern void ld_mmu_iommu(void);
2946 extern void ld_mmu_iounit(void);
2947 extern void ___xchg32_sun4md(void);
2949 /* First the constants */
2950 BTFIXUPSET_SIMM13(pmd_shift
, SRMMU_PMD_SHIFT
);
2951 BTFIXUPSET_SETHI(pmd_size
, SRMMU_PMD_SIZE
);
2952 BTFIXUPSET_SETHI(pmd_mask
, SRMMU_PMD_MASK
);
2953 BTFIXUPSET_SIMM13(pgdir_shift
, SRMMU_PGDIR_SHIFT
);
2954 BTFIXUPSET_SETHI(pgdir_size
, SRMMU_PGDIR_SIZE
);
2955 BTFIXUPSET_SETHI(pgdir_mask
, SRMMU_PGDIR_MASK
);
2957 BTFIXUPSET_SIMM13(ptrs_per_pte
, SRMMU_PTRS_PER_PTE
);
2958 BTFIXUPSET_SIMM13(ptrs_per_pmd
, SRMMU_PTRS_PER_PMD
);
2959 BTFIXUPSET_SIMM13(ptrs_per_pgd
, SRMMU_PTRS_PER_PGD
);
2961 BTFIXUPSET_INT(page_none
, pgprot_val(SRMMU_PAGE_NONE
));
2962 BTFIXUPSET_INT(page_shared
, pgprot_val(SRMMU_PAGE_SHARED
));
2963 BTFIXUPSET_INT(page_copy
, pgprot_val(SRMMU_PAGE_COPY
));
2964 BTFIXUPSET_INT(page_readonly
, pgprot_val(SRMMU_PAGE_RDONLY
));
2965 BTFIXUPSET_INT(page_kernel
, pgprot_val(SRMMU_PAGE_KERNEL
));
2966 pg_iobits
= SRMMU_VALID
| SRMMU_WRITE
| SRMMU_REF
;
2970 BTFIXUPSET_CALL(___xchg32
, ___xchg32_sun4md
, BTFIXUPCALL_SWAPG1G2
);
2972 BTFIXUPSET_CALL(get_pte_fast
, srmmu_get_pte_fast
, BTFIXUPCALL_RETINT(0));
2973 BTFIXUPSET_CALL(get_pgd_fast
, srmmu_get_pgd_fast
, BTFIXUPCALL_RETINT(0));
2974 BTFIXUPSET_CALL(free_pte_slow
, srmmu_free_pte_slow
, BTFIXUPCALL_NOP
);
2975 BTFIXUPSET_CALL(free_pgd_slow
, srmmu_free_pgd_slow
, BTFIXUPCALL_NOP
);
2976 BTFIXUPSET_CALL(do_check_pgt_cache
, srmmu_check_pgt_cache
, BTFIXUPCALL_NORM
);
2978 BTFIXUPSET_CALL(set_pgdir
, srmmu_set_pgdir
, BTFIXUPCALL_NORM
);
2980 BTFIXUPSET_CALL(set_pte
, srmmu_set_pte_cacheable
, BTFIXUPCALL_SWAPO0O1
);
2981 BTFIXUPSET_CALL(init_new_context
, srmmu_init_new_context
, BTFIXUPCALL_NORM
);
2982 BTFIXUPSET_CALL(switch_to_context
, srmmu_switch_to_context
, BTFIXUPCALL_NORM
);
2984 BTFIXUPSET_CALL(pte_page
, srmmu_pte_page
, BTFIXUPCALL_NORM
);
2985 BTFIXUPSET_CALL(pmd_page
, srmmu_pmd_page
, BTFIXUPCALL_NORM
);
2986 BTFIXUPSET_CALL(pgd_page
, srmmu_pgd_page
, BTFIXUPCALL_NORM
);
2988 BTFIXUPSET_CALL(sparc_update_rootmmu_dir
, srmmu_update_rootmmu_dir
, BTFIXUPCALL_NORM
);
2990 BTFIXUPSET_SETHI(none_mask
, 0xF0000000);
2992 BTFIXUPSET_CALL(pte_present
, srmmu_pte_present
, BTFIXUPCALL_NORM
);
2993 BTFIXUPSET_CALL(pte_clear
, srmmu_pte_clear
, BTFIXUPCALL_SWAPO0G0
);
2995 BTFIXUPSET_CALL(pmd_bad
, srmmu_pmd_bad
, BTFIXUPCALL_NORM
);
2996 BTFIXUPSET_CALL(pmd_present
, srmmu_pmd_present
, BTFIXUPCALL_NORM
);
2997 BTFIXUPSET_CALL(pmd_clear
, srmmu_pmd_clear
, BTFIXUPCALL_SWAPO0G0
);
2999 BTFIXUPSET_CALL(pgd_none
, srmmu_pgd_none
, BTFIXUPCALL_NORM
);
3000 BTFIXUPSET_CALL(pgd_bad
, srmmu_pgd_bad
, BTFIXUPCALL_NORM
);
3001 BTFIXUPSET_CALL(pgd_present
, srmmu_pgd_present
, BTFIXUPCALL_NORM
);
3002 BTFIXUPSET_CALL(pgd_clear
, srmmu_pgd_clear
, BTFIXUPCALL_SWAPO0G0
);
3004 BTFIXUPSET_CALL(mk_pte
, srmmu_mk_pte
, BTFIXUPCALL_NORM
);
3005 BTFIXUPSET_CALL(mk_pte_phys
, srmmu_mk_pte_phys
, BTFIXUPCALL_NORM
);
3006 BTFIXUPSET_CALL(mk_pte_io
, srmmu_mk_pte_io
, BTFIXUPCALL_NORM
);
3007 BTFIXUPSET_CALL(pgd_set
, srmmu_pgd_set
, BTFIXUPCALL_NORM
);
3009 BTFIXUPSET_INT(pte_modify_mask
, SRMMU_CHG_MASK
);
3010 BTFIXUPSET_CALL(pgd_offset
, srmmu_pgd_offset
, BTFIXUPCALL_NORM
);
3011 BTFIXUPSET_CALL(pmd_offset
, srmmu_pmd_offset
, BTFIXUPCALL_NORM
);
3012 BTFIXUPSET_CALL(pte_offset
, srmmu_pte_offset
, BTFIXUPCALL_NORM
);
3013 BTFIXUPSET_CALL(pte_free_kernel
, srmmu_pte_free
, BTFIXUPCALL_NORM
);
3014 BTFIXUPSET_CALL(pmd_free_kernel
, srmmu_pmd_free
, BTFIXUPCALL_NORM
);
3015 BTFIXUPSET_CALL(pte_alloc_kernel
, srmmu_pte_alloc
, BTFIXUPCALL_NORM
);
3016 BTFIXUPSET_CALL(pmd_alloc_kernel
, srmmu_pmd_alloc
, BTFIXUPCALL_NORM
);
3017 BTFIXUPSET_CALL(pte_free
, srmmu_pte_free
, BTFIXUPCALL_NORM
);
3018 BTFIXUPSET_CALL(pte_alloc
, srmmu_pte_alloc
, BTFIXUPCALL_NORM
);
3019 BTFIXUPSET_CALL(pmd_free
, srmmu_pmd_free
, BTFIXUPCALL_NORM
);
3020 BTFIXUPSET_CALL(pmd_alloc
, srmmu_pmd_alloc
, BTFIXUPCALL_NORM
);
3021 BTFIXUPSET_CALL(pgd_free
, srmmu_pgd_free
, BTFIXUPCALL_NORM
);
3022 BTFIXUPSET_CALL(pgd_alloc
, srmmu_pgd_alloc
, BTFIXUPCALL_NORM
);
3024 BTFIXUPSET_HALF(pte_writei
, SRMMU_WRITE
);
3025 BTFIXUPSET_HALF(pte_dirtyi
, SRMMU_DIRTY
);
3026 BTFIXUPSET_HALF(pte_youngi
, SRMMU_REF
);
3027 BTFIXUPSET_HALF(pte_wrprotecti
, SRMMU_WRITE
);
3028 BTFIXUPSET_HALF(pte_mkcleani
, SRMMU_DIRTY
);
3029 BTFIXUPSET_HALF(pte_mkoldi
, SRMMU_REF
);
3030 BTFIXUPSET_CALL(pte_mkwrite
, srmmu_pte_mkwrite
, BTFIXUPCALL_ORINT(SRMMU_WRITE
));
3031 BTFIXUPSET_CALL(pte_mkdirty
, srmmu_pte_mkdirty
, BTFIXUPCALL_ORINT(SRMMU_DIRTY
));
3032 BTFIXUPSET_CALL(pte_mkyoung
, srmmu_pte_mkyoung
, BTFIXUPCALL_ORINT(SRMMU_REF
));
3033 BTFIXUPSET_CALL(update_mmu_cache
, srmmu_update_mmu_cache
, BTFIXUPCALL_NOP
);
3034 BTFIXUPSET_CALL(destroy_context
, srmmu_destroy_context
, BTFIXUPCALL_NORM
);
3036 BTFIXUPSET_CALL(mmu_info
, srmmu_mmu_info
, BTFIXUPCALL_NORM
);
3037 BTFIXUPSET_CALL(mmu_v2p
, srmmu_v2p
, BTFIXUPCALL_NORM
);
3038 BTFIXUPSET_CALL(mmu_p2v
, srmmu_p2v
, BTFIXUPCALL_NORM
);
3040 /* Task struct and kernel stack allocating/freeing. */
3041 BTFIXUPSET_CALL(alloc_task_struct
, srmmu_alloc_task_struct
, BTFIXUPCALL_NORM
);
3042 BTFIXUPSET_CALL(free_task_struct
, srmmu_free_task_struct
, BTFIXUPCALL_NORM
);
3044 BTFIXUPSET_CALL(quick_kernel_fault
, srmmu_quick_kernel_fault
, BTFIXUPCALL_NORM
);
3046 /* SRMMU specific. */
3047 BTFIXUPSET_CALL(ctxd_set
, srmmu_ctxd_set
, BTFIXUPCALL_NORM
);
3048 BTFIXUPSET_CALL(pmd_set
, srmmu_pmd_set
, BTFIXUPCALL_NORM
);
3051 patch_window_trap_handlers();
3054 /* El switcheroo... */
3056 BTFIXUPCOPY_CALL(local_flush_cache_all
, flush_cache_all
);
3057 BTFIXUPCOPY_CALL(local_flush_cache_mm
, flush_cache_mm
);
3058 BTFIXUPCOPY_CALL(local_flush_cache_range
, flush_cache_range
);
3059 BTFIXUPCOPY_CALL(local_flush_cache_page
, flush_cache_page
);
3060 BTFIXUPCOPY_CALL(local_flush_tlb_all
, flush_tlb_all
);
3061 BTFIXUPCOPY_CALL(local_flush_tlb_mm
, flush_tlb_mm
);
3062 BTFIXUPCOPY_CALL(local_flush_tlb_range
, flush_tlb_range
);
3063 BTFIXUPCOPY_CALL(local_flush_tlb_page
, flush_tlb_page
);
3064 BTFIXUPCOPY_CALL(local_flush_page_to_ram
, flush_page_to_ram
);
3065 BTFIXUPCOPY_CALL(local_flush_sig_insns
, flush_sig_insns
);
3066 BTFIXUPCOPY_CALL(local_flush_page_for_dma
, flush_page_for_dma
);
3068 BTFIXUPSET_CALL(flush_cache_all
, smp_flush_cache_all
, BTFIXUPCALL_NORM
);
3069 BTFIXUPSET_CALL(flush_cache_mm
, smp_flush_cache_mm
, BTFIXUPCALL_NORM
);
3070 BTFIXUPSET_CALL(flush_cache_range
, smp_flush_cache_range
, BTFIXUPCALL_NORM
);
3071 BTFIXUPSET_CALL(flush_cache_page
, smp_flush_cache_page
, BTFIXUPCALL_NORM
);
3072 if (sparc_cpu_model
!= sun4d
) {
3073 BTFIXUPSET_CALL(flush_tlb_all
, smp_flush_tlb_all
, BTFIXUPCALL_NORM
);
3074 BTFIXUPSET_CALL(flush_tlb_mm
, smp_flush_tlb_mm
, BTFIXUPCALL_NORM
);
3075 BTFIXUPSET_CALL(flush_tlb_range
, smp_flush_tlb_range
, BTFIXUPCALL_NORM
);
3076 BTFIXUPSET_CALL(flush_tlb_page
, smp_flush_tlb_page
, BTFIXUPCALL_NORM
);
3078 BTFIXUPSET_CALL(flush_page_to_ram
, smp_flush_page_to_ram
, BTFIXUPCALL_NORM
);
3079 BTFIXUPSET_CALL(flush_sig_insns
, smp_flush_sig_insns
, BTFIXUPCALL_NORM
);
3080 BTFIXUPSET_CALL(flush_page_for_dma
, smp_flush_page_for_dma
, BTFIXUPCALL_NORM
);
3082 if (sparc_cpu_model
== sun4d
)
3087 if (sparc_cpu_model
== sun4d
)