Import 2.3.12pre2
[davej-history.git] / arch / sparc / ap1000 / apmmu.c
blob0140cde35d03fac69236b4f8c8c62b456ec52a41
1 /*
2 * Copyright 1996 The Australian National University.
3 * Copyright 1996 Fujitsu Laboratories Limited
4 *
5 * This software may be distributed under the terms of the Gnu
6 * Public License version 2 or later
7 */
8 /*
9 * apmmu.c: mmu routines for the AP1000
11 * based on srmmu.c
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/malloc.h>
17 #include <linux/vmalloc.h>
18 #include <linux/init.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/kdebug.h>
24 #include <asm/vaddrs.h>
25 #include <asm/traps.h>
26 #include <asm/smp.h>
27 #include <asm/mbus.h>
28 #include <asm/cache.h>
29 #include <asm/oplib.h>
30 #include <asm/sbus.h>
31 #include <asm/iommu.h>
32 #include <asm/asi.h>
33 #include <asm/msi.h>
34 #include <asm/a.out.h>
35 #include <asm/ap1000/pgtapmmu.h>
36 #include <asm/viking.h>
39 extern void mc_tlb_flush_all(void);
41 static void poke_viking(void);
42 static void viking_flush_tlb_page_for_cbit)(unsigned long page);
44 static struct apmmu_stats {
45 int invall;
46 int invpg;
47 int invrnge;
48 int invmm;
49 } module_stats;
51 static char *apmmu_name;
53 static ctxd_t *apmmu_ctx_table_phys;
54 static ctxd_t *apmmu_context_table;
56 static unsigned long ap_mem_size;
57 static unsigned long mempool;
60 static inline unsigned long apmmu_v2p(unsigned long vaddr)
62 if (KERNBASE <= vaddr &&
63 (KERNBASE + ap_mem_size > vaddr)) {
64 return (vaddr - KERNBASE);
66 return 0xffffffffUL;
69 static inline unsigned long apmmu_p2v(unsigned long paddr)
71 if (ap_mem_size > paddr)
72 return (paddr + KERNBASE);
73 return 0xffffffffUL;
76 /* In general all page table modifications should use the V8 atomic
77 * swap instruction. This insures the mmu and the cpu are in sync
78 * with respect to ref/mod bits in the page tables.
80 static inline unsigned long apmmu_swap(unsigned long *addr, unsigned long value)
82 /* the AP1000 has its memory on bus 8, not 0 like suns do */
83 if ((value&0xF0000000) == 0)
84 value |= MEM_BUS_SPACE<<28;
85 __asm__ __volatile__("swap [%2], %0\n\t" :
86 "=&r" (value) :
87 "0" (value), "r" (addr));
88 return value;
91 /* Functions really use this, not apmmu_swap directly. */
92 #define apmmu_set_entry(ptr, newentry) \
93 apmmu_swap((unsigned long *) (ptr), (newentry))
96 /* The very generic APMMU page table operations. */
97 static unsigned int apmmu_pmd_align(unsigned int addr) { return APMMU_PMD_ALIGN(addr); }
98 static unsigned int apmmu_pgdir_align(unsigned int addr) { return APMMU_PGDIR_ALIGN(addr); }
100 static inline int apmmu_device_memory(unsigned long x)
102 return ((x & 0xF0000000) != 0);
105 static unsigned long apmmu_pgd_page(pgd_t pgd)
106 { return apmmu_device_memory(pgd_val(pgd))?~0:apmmu_p2v((pgd_val(pgd) & APMMU_PTD_PMASK) << 4); }
108 static unsigned long apmmu_pmd_page(pmd_t pmd)
109 { return apmmu_device_memory(pmd_val(pmd))?~0:apmmu_p2v((pmd_val(pmd) & APMMU_PTD_PMASK) << 4); }
111 static unsigned long apmmu_pte_page(pte_t pte)
112 { return apmmu_device_memory(pte_val(pte))?~0:apmmu_p2v((pte_val(pte) & APMMU_PTE_PMASK) << 4); }
114 static int apmmu_pte_none(pte_t pte)
115 { return !(pte_val(pte) & 0xFFFFFFF); }
117 static int apmmu_pte_present(pte_t pte)
118 { return ((pte_val(pte) & APMMU_ET_MASK) == APMMU_ET_PTE); }
120 static void apmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
122 static int apmmu_pmd_none(pmd_t pmd)
123 { return !(pmd_val(pmd) & 0xFFFFFFF); }
125 static int apmmu_pmd_bad(pmd_t pmd)
126 { return (pmd_val(pmd) & APMMU_ET_MASK) != APMMU_ET_PTD; }
128 static int apmmu_pmd_present(pmd_t pmd)
129 { return ((pmd_val(pmd) & APMMU_ET_MASK) == APMMU_ET_PTD); }
131 static void apmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
133 static int apmmu_pgd_none(pgd_t pgd)
134 { return !(pgd_val(pgd) & 0xFFFFFFF); }
136 static int apmmu_pgd_bad(pgd_t pgd)
137 { return (pgd_val(pgd) & APMMU_ET_MASK) != APMMU_ET_PTD; }
139 static int apmmu_pgd_present(pgd_t pgd)
140 { return ((pgd_val(pgd) & APMMU_ET_MASK) == APMMU_ET_PTD); }
142 static void apmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
144 static pte_t apmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | APMMU_WRITE);}
145 static pte_t apmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | APMMU_DIRTY);}
146 static pte_t apmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | APMMU_REF);}
149 * Conversion functions: convert a page and protection to a page entry,
150 * and a page entry and page directory to the page they refer to.
152 static pte_t apmmu_mk_pte(unsigned long page, pgprot_t pgprot)
153 { return __pte(((apmmu_v2p(page)) >> 4) | pgprot_val(pgprot)); }
155 static pte_t apmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
156 { return __pte(((page) >> 4) | pgprot_val(pgprot)); }
158 static pte_t apmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
160 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
163 static void apmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
165 set_pte((pte_t *)ctxp, (APMMU_ET_PTD | (apmmu_v2p((unsigned long) pgdp) >> 4)));
168 static void apmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
170 set_pte((pte_t *)pgdp, (APMMU_ET_PTD | (apmmu_v2p((unsigned long) pmdp) >> 4)));
173 static void apmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
175 set_pte((pte_t *)pmdp, (APMMU_ET_PTD | (apmmu_v2p((unsigned long) ptep) >> 4)));
178 static pte_t apmmu_pte_modify(pte_t pte, pgprot_t newprot)
180 return __pte((pte_val(pte) & APMMU_CHG_MASK) | pgprot_val(newprot));
183 /* to find an entry in a top-level page table... */
184 static pgd_t *apmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
186 return mm->pgd + ((address >> APMMU_PGDIR_SHIFT) & (APMMU_PTRS_PER_PGD - 1));
189 /* Find an entry in the second-level page table.. */
190 static pmd_t *apmmu_pmd_offset(pgd_t * dir, unsigned long address)
192 return (pmd_t *) apmmu_pgd_page(*dir) + ((address >> APMMU_PMD_SHIFT) & (APMMU_PTRS_PER_PMD - 1));
195 /* Find an entry in the third-level page table.. */
196 static pte_t *apmmu_pte_offset(pmd_t * dir, unsigned long address)
198 return (pte_t *) apmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (APMMU_PTRS_PER_PTE - 1));
201 /* This must update the context table entry for this process. */
202 static void apmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
204 if(tsk->mm->context != NO_CONTEXT) {
205 flush_cache_mm(current->mm);
206 apmmu_ctxd_set(&apmmu_context_table[tsk->mm->context], pgdp);
207 flush_tlb_mm(current->mm);
212 /* Accessing the MMU control register. */
213 static inline unsigned int apmmu_get_mmureg(void)
215 unsigned int retval;
216 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
217 "=r" (retval) :
218 "i" (ASI_M_MMUREGS));
219 return retval;
222 static inline void apmmu_set_mmureg(unsigned long regval)
224 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
225 "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
229 static inline void apmmu_set_ctable_ptr(unsigned long paddr)
231 paddr = ((paddr >> 4) & APMMU_CTX_PMASK);
232 paddr |= (MEM_BUS_SPACE<<28);
233 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
234 "r" (paddr), "r" (APMMU_CTXTBL_PTR),
235 "i" (ASI_M_MMUREGS) :
236 "memory");
239 static inline void apmmu_flush_whole_tlb(void)
241 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
242 "r" (0x400), /* Flush entire TLB!! */
243 "i" (ASI_M_FLUSH_PROBE) : "memory");
247 /* These flush types are not available on all chips... */
248 static inline void apmmu_flush_tlb_ctx(void)
250 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
251 "r" (0x300), /* Flush TLB ctx.. */
252 "i" (ASI_M_FLUSH_PROBE) : "memory");
256 static inline void apmmu_flush_tlb_region(unsigned long addr)
258 addr &= APMMU_PGDIR_MASK;
259 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
260 "r" (addr | 0x200), /* Flush TLB region.. */
261 "i" (ASI_M_FLUSH_PROBE) : "memory");
266 static inline void apmmu_flush_tlb_segment(unsigned long addr)
268 addr &= APMMU_PMD_MASK;
269 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
270 "r" (addr | 0x100), /* Flush TLB segment.. */
271 "i" (ASI_M_FLUSH_PROBE) : "memory");
275 static inline void apmmu_flush_tlb_page(unsigned long page)
277 page &= PAGE_MASK;
278 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
279 "r" (page), /* Flush TLB page.. */
280 "i" (ASI_M_FLUSH_PROBE) : "memory");
284 static inline unsigned long apmmu_hwprobe(unsigned long vaddr)
286 unsigned long retval;
288 vaddr &= PAGE_MASK;
289 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
290 "=r" (retval) :
291 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
293 return retval;
296 static inline void apmmu_uncache_page(unsigned long addr)
298 pgd_t *pgdp = apmmu_pgd_offset(&init_mm, addr);
299 pmd_t *pmdp;
300 pte_t *ptep;
302 if((pgd_val(*pgdp) & APMMU_ET_MASK) == APMMU_ET_PTE) {
303 ptep = (pte_t *) pgdp;
304 } else {
305 pmdp = apmmu_pmd_offset(pgdp, addr);
306 if((pmd_val(*pmdp) & APMMU_ET_MASK) == APMMU_ET_PTE) {
307 ptep = (pte_t *) pmdp;
308 } else {
309 ptep = apmmu_pte_offset(pmdp, addr);
313 set_pte(ptep, __pte((pte_val(*ptep) & ~APMMU_CACHE)));
314 viking_flush_tlb_page_for_cbit(addr);
317 static inline void apmmu_recache_page(unsigned long addr)
319 pgd_t *pgdp = apmmu_pgd_offset(&init_mm, addr);
320 pmd_t *pmdp;
321 pte_t *ptep;
323 if((pgd_val(*pgdp) & APMMU_ET_MASK) == APMMU_ET_PTE) {
324 ptep = (pte_t *) pgdp;
325 } else {
326 pmdp = apmmu_pmd_offset(pgdp, addr);
327 if((pmd_val(*pmdp) & APMMU_ET_MASK) == APMMU_ET_PTE) {
328 ptep = (pte_t *) pmdp;
329 } else {
330 ptep = apmmu_pte_offset(pmdp, addr);
333 set_pte(ptep, __pte((pte_val(*ptep) | APMMU_CACHE)));
334 viking_flush_tlb_page_for_cbit(addr);
337 static inline unsigned long apmmu_getpage(void)
339 unsigned long page = get_free_page(GFP_KERNEL);
341 return page;
344 static inline void apmmu_putpage(unsigned long page)
346 free_page(page);
349 /* The easy versions. */
350 #define NEW_PGD() (pgd_t *) apmmu_getpage()
351 #define NEW_PMD() (pmd_t *) apmmu_getpage()
352 #define NEW_PTE() (pte_t *) apmmu_getpage()
353 #define FREE_PGD(chunk) apmmu_putpage((unsigned long)(chunk))
354 #define FREE_PMD(chunk) apmmu_putpage((unsigned long)(chunk))
355 #define FREE_PTE(chunk) apmmu_putpage((unsigned long)(chunk))
357 static pte_t *apmmu_get_pte_fast(void)
359 return (pte_t *)0;
362 static pmd_t *apmmu_get_pmd_fast(void)
364 return (pmd_t *)0;
367 static pgd_t *apmmu_get_pgd_fast(void)
369 return (pgd_t *)0;
372 static void apmmu_free_pte_slow(pte_t *pte)
374 /* TBD */
377 static void apmmu_free_pmd_slow(pmd_t *pmd)
379 /* TBD */
382 static void apmmu_free_pgd_slow(pgd_t *pgd)
384 /* TBD */
389 * Allocate and free page tables. The xxx_kernel() versions are
390 * used to allocate a kernel page table - this turns on ASN bits
391 * if any, and marks the page tables reserved.
393 static void apmmu_pte_free_kernel(pte_t *pte)
395 FREE_PTE(pte);
398 static pte_t *apmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
400 address = (address >> PAGE_SHIFT) & (APMMU_PTRS_PER_PTE - 1);
401 if(apmmu_pmd_none(*pmd)) {
402 pte_t *page = NEW_PTE();
403 if(apmmu_pmd_none(*pmd)) {
404 if(page) {
405 apmmu_pmd_set(pmd, page);
406 return page + address;
408 apmmu_pmd_set(pmd, BAD_PAGETABLE);
409 return NULL;
411 FREE_PTE(page);
413 if(apmmu_pmd_bad(*pmd)) {
414 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
415 apmmu_pmd_set(pmd, BAD_PAGETABLE);
416 return NULL;
418 return (pte_t *) apmmu_pmd_page(*pmd) + address;
421 static void apmmu_pmd_free_kernel(pmd_t *pmd)
423 FREE_PMD(pmd);
426 static pmd_t *apmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
428 address = (address >> APMMU_PMD_SHIFT) & (APMMU_PTRS_PER_PMD - 1);
429 if(apmmu_pgd_none(*pgd)) {
430 pmd_t *page;
431 page = NEW_PMD();
432 if(apmmu_pgd_none(*pgd)) {
433 if(page) {
434 pgd_set(pgd, page);
435 return page + address;
437 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
438 return NULL;
440 FREE_PMD(page);
442 if(apmmu_pgd_bad(*pgd)) {
443 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
444 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
445 return NULL;
447 return (pmd_t *) pgd_page(*pgd) + address;
450 static void apmmu_pte_free(pte_t *pte)
452 FREE_PTE(pte);
455 static pte_t *apmmu_pte_alloc(pmd_t * pmd, unsigned long address)
457 address = (address >> PAGE_SHIFT) & (APMMU_PTRS_PER_PTE - 1);
458 if(apmmu_pmd_none(*pmd)) {
459 pte_t *page = NEW_PTE();
460 if(apmmu_pmd_none(*pmd)) {
461 if(page) {
462 apmmu_pmd_set(pmd, page);
463 return page + address;
465 apmmu_pmd_set(pmd, BAD_PAGETABLE);
466 return NULL;
468 FREE_PTE(page);
470 if(apmmu_pmd_bad(*pmd)) {
471 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
472 apmmu_pmd_set(pmd, BAD_PAGETABLE);
473 return NULL;
475 return ((pte_t *) apmmu_pmd_page(*pmd)) + address;
478 /* Real three-level page tables on APMMU. */
479 static void apmmu_pmd_free(pmd_t * pmd)
481 FREE_PMD(pmd);
484 static pmd_t *apmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
486 address = (address >> APMMU_PMD_SHIFT) & (APMMU_PTRS_PER_PMD - 1);
487 if(apmmu_pgd_none(*pgd)) {
488 pmd_t *page = NEW_PMD();
489 if(apmmu_pgd_none(*pgd)) {
490 if(page) {
491 pgd_set(pgd, page);
492 return page + address;
494 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
495 return NULL;
497 FREE_PMD(page);
499 if(apmmu_pgd_bad(*pgd)) {
500 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
501 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
502 return NULL;
504 return (pmd_t *) apmmu_pgd_page(*pgd) + address;
507 static void apmmu_pgd_free(pgd_t *pgd)
509 FREE_PGD(pgd);
512 static pgd_t *apmmu_pgd_alloc(void)
514 return NEW_PGD();
517 static void apmmu_pgd_flush(pgd_t *pgdp)
521 static void apmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
523 apmmu_set_entry(ptep, pte_val(pteval));
526 static void apmmu_quick_kernel_fault(unsigned long address)
528 printk("Kernel faults at addr=0x%08lx\n", address);
529 printk("PTE=%08lx\n", apmmu_hwprobe((address & PAGE_MASK)));
530 die_if_kernel("APMMU bolixed...", current->tss.kregs);
533 static inline void alloc_context(struct task_struct *tsk)
535 struct mm_struct *mm = tsk->mm;
536 struct ctx_list *ctxp;
538 if (tsk->taskid >= MPP_TASK_BASE) {
539 mm->context = MPP_CONTEXT_BASE + (tsk->taskid - MPP_TASK_BASE);
540 return;
543 ctxp = ctx_free.next;
544 if(ctxp != &ctx_free) {
545 remove_from_ctx_list(ctxp);
546 add_to_used_ctxlist(ctxp);
547 mm->context = ctxp->ctx_number;
548 ctxp->ctx_mm = mm;
549 return;
551 ctxp = ctx_used.next;
552 if(ctxp->ctx_mm == current->mm)
553 ctxp = ctxp->next;
554 if(ctxp == &ctx_used)
555 panic("out of mmu contexts");
556 flush_cache_mm(ctxp->ctx_mm);
557 flush_tlb_mm(ctxp->ctx_mm);
558 remove_from_ctx_list(ctxp);
559 add_to_used_ctxlist(ctxp);
560 ctxp->ctx_mm->context = NO_CONTEXT;
561 ctxp->ctx_mm = mm;
562 mm->context = ctxp->ctx_number;
565 static inline void free_context(int context)
567 struct ctx_list *ctx_old;
569 if (context >= MPP_CONTEXT_BASE)
570 return; /* nothing to do! */
572 ctx_old = ctx_list_pool + context;
573 remove_from_ctx_list(ctx_old);
574 add_to_free_ctxlist(ctx_old);
578 static void apmmu_switch_to_context(struct task_struct *tsk)
580 if(tsk->mm->context == NO_CONTEXT) {
581 alloc_context(tsk);
582 flush_cache_mm(current->mm);
583 apmmu_ctxd_set(&apmmu_context_table[tsk->mm->context], tsk->mm->pgd);
584 flush_tlb_mm(current->mm);
586 apmmu_set_context(tsk->mm->context);
589 static char *apmmu_lockarea(char *vaddr, unsigned long len)
591 return vaddr;
594 static void apmmu_unlockarea(char *vaddr, unsigned long len)
598 struct task_struct *apmmu_alloc_task_struct(void)
600 return (struct task_struct *) kmalloc(sizeof(struct task_struct), GFP_KERNEL);
603 static void apmmu_free_task_struct(struct task_struct *tsk)
605 kfree(tsk);
608 static void apmmu_null_func(void)
612 static inline void mc_tlb_flush_all(void)
614 unsigned long long *tlb4k;
615 int i;
617 tlb4k = (unsigned long long *)MC_MMU_TLB4K;
618 for (i = MC_MMU_TLB4K_SIZE/4; i > 0; --i) {
619 tlb4k[0] = 0;
620 tlb4k[1] = 0;
621 tlb4k[2] = 0;
622 tlb4k[3] = 0;
623 tlb4k += 4;
627 static inline void mc_tlb_flush_page(unsigned vaddr,int ctx)
629 if (ctx == SYSTEM_CONTEXT || MPP_IS_PAR_CTX(ctx)) {
630 *(((unsigned long long *)MC_MMU_TLB4K) + ((vaddr>>12)&0xFF)) = 0;
634 static inline void mc_tlb_flush_ctx(int ctx)
636 unsigned long long *tlb4k = (unsigned long long *)MC_MMU_TLB4K;
637 if (ctx == SYSTEM_CONTEXT || MPP_IS_PAR_CTX(ctx)) {
638 int i;
639 for (i=0; i<MC_MMU_TLB4K_SIZE;i++)
640 if (((tlb4k[i] >> 5) & 0xFFF) == ctx) tlb4k[i] = 0;
644 static inline void mc_tlb_flush_region(unsigned start,int ctx)
646 mc_tlb_flush_ctx(ctx);
649 static inline void mc_tlb_flush_segment(unsigned start,int ctx)
651 mc_tlb_flush_ctx(ctx);
654 static void viking_flush_tlb_all(void)
656 module_stats.invall++;
657 flush_user_windows();
658 apmmu_flush_whole_tlb();
659 mc_tlb_flush_all();
662 static void viking_flush_tlb_mm(struct mm_struct *mm)
664 int octx;
666 module_stats.invmm++;
668 if(mm->context != NO_CONTEXT) {
669 flush_user_windows();
670 octx = apmmu_get_context();
671 if (octx != mm->context)
672 apmmu_set_context(mm->context);
673 apmmu_flush_tlb_ctx();
674 mc_tlb_flush_ctx(mm->context);
675 if (octx != mm->context)
676 apmmu_set_context(octx);
680 static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
682 int octx;
684 module_stats.invrnge++;
686 if(mm->context != NO_CONTEXT) {
687 flush_user_windows();
688 octx = apmmu_get_context();
689 if (octx != mm->context)
690 apmmu_set_context(mm->context);
691 if((end - start) < APMMU_PMD_SIZE) {
692 start &= PAGE_MASK;
693 while(start < end) {
694 apmmu_flush_tlb_page(start);
695 mc_tlb_flush_page(start,mm->context);
696 start += PAGE_SIZE;
698 } else if((end - start) < APMMU_PGDIR_SIZE) {
699 start &= APMMU_PMD_MASK;
700 while(start < end) {
701 apmmu_flush_tlb_segment(start);
702 mc_tlb_flush_segment(start,mm->context);
703 start += APMMU_PMD_SIZE;
705 } else {
706 start &= APMMU_PGDIR_MASK;
707 while(start < end) {
708 apmmu_flush_tlb_region(start);
709 mc_tlb_flush_region(start,mm->context);
710 start += APMMU_PGDIR_SIZE;
713 if (octx != mm->context)
714 apmmu_set_context(octx);
718 static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
720 int octx;
721 struct mm_struct *mm = vma->vm_mm;
723 module_stats.invpg++;
724 if(mm->context != NO_CONTEXT) {
725 flush_user_windows();
726 octx = apmmu_get_context();
727 if (octx != mm->context)
728 apmmu_set_context(mm->context);
729 apmmu_flush_tlb_page(page);
730 mc_tlb_flush_page(page,mm->context);
731 if (octx != mm->context)
732 apmmu_set_context(octx);
736 static void viking_flush_tlb_page_for_cbit(unsigned long page)
738 apmmu_flush_tlb_page(page);
739 mc_tlb_flush_page(page,apmmu_get_context());
742 /* Some dirty hacks to abstract away the painful boot up init. */
743 static inline unsigned long apmmu_early_paddr(unsigned long vaddr)
745 return (vaddr - KERNBASE);
748 static inline void apmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
750 set_pte((pte_t *)pgdp, __pte((APMMU_ET_PTD | (apmmu_early_paddr((unsigned long) pmdp) >> 4))));
753 static inline void apmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
755 set_pte((pte_t *)pmdp, __pte((APMMU_ET_PTD | (apmmu_early_paddr((unsigned long) ptep) >> 4))));
758 static inline unsigned long apmmu_early_pgd_page(pgd_t pgd)
760 return ((pgd_val(pgd) & APMMU_PTD_PMASK) << 4) + KERNBASE;
763 static inline unsigned long apmmu_early_pmd_page(pmd_t pmd)
765 return ((pmd_val(pmd) & APMMU_PTD_PMASK) << 4) + KERNBASE;
768 static inline pmd_t *apmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
770 return (pmd_t *) apmmu_early_pgd_page(*dir) + ((address >> APMMU_PMD_SHIFT) & (APMMU_PTRS_PER_PMD - 1));
773 static inline pte_t *apmmu_early_pte_offset(pmd_t *dir, unsigned long address)
775 return (pte_t *) apmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (APMMU_PTRS_PER_PTE - 1));
778 __initfunc(static inline void apmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end))
780 pgd_t *pgdp;
781 pmd_t *pmdp;
782 pte_t *ptep;
784 while(start < end) {
785 pgdp = apmmu_pgd_offset(&init_mm, start);
786 if(apmmu_pgd_none(*pgdp)) {
787 pmdp = sparc_init_alloc(&mempool, APMMU_PMD_TABLE_SIZE);
788 apmmu_early_pgd_set(pgdp, pmdp);
790 pmdp = apmmu_early_pmd_offset(pgdp, start);
791 if(apmmu_pmd_none(*pmdp)) {
792 ptep = sparc_init_alloc(&mempool, APMMU_PTE_TABLE_SIZE);
793 apmmu_early_pmd_set(pmdp, ptep);
795 start = (start + APMMU_PMD_SIZE) & APMMU_PMD_MASK;
800 __initfunc(static void make_page(unsigned virt_page, unsigned phys_page, unsigned prot))
802 pgd_t *pgdp;
803 pmd_t *pmdp;
804 pte_t *ptep;
805 unsigned start = virt_page<<12;
807 pgdp = apmmu_pgd_offset(&init_mm, start);
808 if(apmmu_pgd_none(*pgdp)) {
809 pmdp = sparc_init_alloc(&mempool, APMMU_PMD_TABLE_SIZE);
810 apmmu_early_pgd_set(pgdp, pmdp);
812 pmdp = apmmu_early_pmd_offset(pgdp, start);
813 if(apmmu_pmd_none(*pmdp)) {
814 ptep = sparc_init_alloc(&mempool, APMMU_PTE_TABLE_SIZE);
815 apmmu_early_pmd_set(pmdp, ptep);
817 ptep = apmmu_early_pte_offset(pmdp, start);
818 *ptep = __pte((phys_page<<8) | prot);
822 __initfunc(static void make_large_page(unsigned virt_page, unsigned phys_page, unsigned prot))
824 pgd_t *pgdp;
825 unsigned start = virt_page<<12;
827 pgdp = apmmu_pgd_offset(&init_mm, start);
828 *pgdp = __pgd((phys_page<<8) | prot);
832 __initfunc(static void ap_setup_mappings(void))
834 unsigned Srwe = APMMU_PRIV | APMMU_VALID;
835 unsigned SrweUr = 0x14 | APMMU_VALID; /* weird! */
837 /* LBus */
838 make_large_page(0xfb000,0x9fb000,Srwe);
839 make_large_page(0xff000,0x9ff000,SrweUr);
840 make_large_page(0xfc000,0x911000,Srwe);
842 /* MC Register */
843 make_page(0xfa000,0xb00000,SrweUr);
844 make_page(0xfa001,0xb00001,Srwe);
845 make_page(0xfa002,0xb00002,Srwe);
846 make_page(0xfa003,0xb00003,Srwe);
847 make_page(0xfa004,0xb00004,Srwe);
848 make_page(0xfa005,0xb00005,Srwe);
849 make_page(0xfa006,0xb00006,Srwe);
850 make_page(0xfa007,0xb00007,Srwe);
852 /* MSC+ Register */
853 make_page(0xfa008,0xc00000,SrweUr);
854 make_page(0xfa009,0xc00001,Srwe);
855 make_page(0xfa00a,0xc00002,Srwe);
856 make_page(0xfa00b,0xc00003,Srwe);
857 make_page(0xfa00c,0xc00004,Srwe);
858 make_page(0xfa00d,0xc00005,Srwe); /* RBMPR 0 */
859 make_page(0xfa00e,0xc00006,Srwe); /* RBMPR 1 */
860 make_page(0xfa00f,0xc00007,Srwe); /* RBMPR 2 */
862 /* user queues */
863 make_page(MSC_PUT_QUEUE>>PAGE_SHIFT, 0xa00000,Srwe);
864 make_page(MSC_GET_QUEUE>>PAGE_SHIFT, 0xa00001,Srwe);
865 make_page(MSC_SEND_QUEUE>>PAGE_SHIFT, 0xa00040,Srwe);
866 make_page(MSC_XY_QUEUE>>PAGE_SHIFT, 0xa00640,Srwe);
867 make_page(MSC_X_QUEUE>>PAGE_SHIFT, 0xa00240,Srwe);
868 make_page(MSC_Y_QUEUE>>PAGE_SHIFT, 0xa00440,Srwe);
869 make_page(MSC_XYG_QUEUE>>PAGE_SHIFT, 0xa00600,Srwe);
870 make_page(MSC_XG_QUEUE>>PAGE_SHIFT, 0xa00200,Srwe);
871 make_page(MSC_YG_QUEUE>>PAGE_SHIFT, 0xa00400,Srwe);
872 make_page(MSC_CSI_QUEUE>>PAGE_SHIFT, 0xa02004,Srwe);
873 make_page(MSC_FOP_QUEUE>>PAGE_SHIFT, 0xa02005,Srwe);
875 /* system queues */
876 make_page(MSC_PUT_QUEUE_S>>PAGE_SHIFT, 0xa02000,Srwe); /* system put */
877 make_page(MSC_CPUT_QUEUE_S>>PAGE_SHIFT, 0xa02020,Srwe); /* system creg put */
878 make_page(MSC_GET_QUEUE_S>>PAGE_SHIFT, 0xa02001,Srwe); /* system get */
879 make_page(MSC_CGET_QUEUE_S>>PAGE_SHIFT, 0xa02021,Srwe); /* system creg get */
880 make_page(MSC_SEND_QUEUE_S>>PAGE_SHIFT, 0xa02040,Srwe); /* system send */
881 make_page(MSC_BSEND_QUEUE_S>>PAGE_SHIFT,0xa02640,Srwe); /* system send broad */
882 make_page(MSC_XYG_QUEUE_S>>PAGE_SHIFT, 0xa02600,Srwe); /* system put broad */
883 make_page(MSC_CXYG_QUEUE_S>>PAGE_SHIFT, 0xa02620,Srwe); /* system put broad */
885 /* Direct queue access entries for refilling the MSC send queue */
886 make_page(MSC_SYSTEM_DIRECT>>PAGE_SHIFT, 0xa08000,Srwe);
887 make_page(MSC_USER_DIRECT>>PAGE_SHIFT, 0xa08001,Srwe);
888 make_page(MSC_REMOTE_DIRECT>>PAGE_SHIFT, 0xa08002,Srwe);
889 make_page(MSC_REPLY_DIRECT>>PAGE_SHIFT, 0xa08003,Srwe);
890 make_page(MSC_REMREPLY_DIRECT>>PAGE_SHIFT, 0xa08004,Srwe);
892 /* As above with end-bit set */
893 make_page(MSC_SYSTEM_DIRECT_END>>PAGE_SHIFT, 0xa0c000,Srwe);
894 make_page(MSC_USER_DIRECT_END>>PAGE_SHIFT, 0xa0c001,Srwe);
895 make_page(MSC_REMOTE_DIRECT_END>>PAGE_SHIFT, 0xa0c002,Srwe);
896 make_page(MSC_REPLY_DIRECT_END>>PAGE_SHIFT, 0xa0c003,Srwe);
897 make_page(MSC_REMREPLY_DIRECT_END>>PAGE_SHIFT, 0xa0c004,Srwe);
900 __initfunc(static void map_kernel(void))
902 int phys;
904 /* the AP+ only ever has one bank of memory starting at address 0 */
905 ap_mem_size = sp_banks[0].num_bytes;
906 for (phys=0; phys < sp_banks[0].num_bytes; phys += APMMU_PGDIR_SIZE)
907 make_large_page((KERNBASE+phys)>>12,
908 (phys>>12),
909 APMMU_CACHE|APMMU_PRIV|APMMU_VALID);
910 init_mm.mmap->vm_start = page_offset = KERNBASE;
911 stack_top = page_offset - PAGE_SIZE;
914 extern unsigned long free_area_init(unsigned long, unsigned long);
915 extern unsigned long sparc_context_init(unsigned long, int);
917 extern int physmem_mapped_contig;
918 extern int linux_num_cpus;
920 __initfunc(unsigned long apmmu_paging_init(unsigned long start_mem, unsigned long end_mem))
922 int i;
924 physmem_mapped_contig = 1; /* for init.c:taint_real_pages() */
926 num_contexts = AP_NUM_CONTEXTS;
927 mempool = PAGE_ALIGN(start_mem);
928 memset(swapper_pg_dir, 0, PAGE_SIZE);
930 apmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
931 mempool = PAGE_ALIGN(mempool);
932 map_kernel();
933 ap_setup_mappings();
935 /* the MSC wants this aligned on a 16k boundary */
936 apmmu_context_table =
937 sparc_init_alloc(&mempool,
938 num_contexts*sizeof(ctxd_t)<0x4000?
939 0x4000:
940 num_contexts*sizeof(ctxd_t));
941 apmmu_ctx_table_phys = (ctxd_t *) apmmu_v2p((unsigned long) apmmu_context_table);
942 for(i = 0; i < num_contexts; i++)
943 apmmu_ctxd_set(&apmmu_context_table[i], swapper_pg_dir);
945 start_mem = PAGE_ALIGN(mempool);
947 flush_cache_all();
948 apmmu_set_ctable_ptr((unsigned long) apmmu_ctx_table_phys);
949 flush_tlb_all();
950 poke_viking();
952 /* on the AP we don't put the top few contexts into the free
953 context list as these are reserved for parallel tasks */
954 start_mem = sparc_context_init(start_mem, MPP_CONTEXT_BASE);
955 start_mem = free_area_init(start_mem, end_mem);
957 return PAGE_ALIGN(start_mem);
960 static int apmmu_mmu_info(char *buf)
962 return sprintf(buf,
963 "MMU type\t: %s\n"
964 "invall\t\t: %d\n"
965 "invmm\t\t: %d\n"
966 "invrnge\t\t: %d\n"
967 "invpg\t\t: %d\n"
968 "contexts\t: %d\n"
969 , apmmu_name,
970 module_stats.invall,
971 module_stats.invmm,
972 module_stats.invrnge,
973 module_stats.invpg,
974 num_contexts
978 static void apmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
982 __initfunc(static void poke_viking(void))
984 unsigned long mreg = apmmu_get_mmureg();
986 mreg |= VIKING_SPENABLE;
987 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
988 mreg &= ~VIKING_ACENABLE;
989 mreg &= ~VIKING_SBENABLE;
990 mreg |= VIKING_TCENABLE;
991 apmmu_set_mmureg(mreg);
994 __initfunc(static void init_viking(void))
996 apmmu_name = "TI Viking/AP1000";
998 BTFIXUPSET_CALL(flush_cache_all, apmmu_null_func, BTFIXUPCALL_NOP);
999 BTFIXUPSET_CALL(flush_cache_mm, apmmu_null_func, BTFIXUPCALL_NOP);
1000 BTFIXUPSET_CALL(flush_cache_page, apmmu_null_func, BTFIXUPCALL_NOP);
1001 BTFIXUPSET_CALL(flush_cache_range, apmmu_null_func, BTFIXUPCALL_NOP);
1003 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1004 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1005 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1006 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1008 BTFIXUPSET_CALL(flush_page_to_ram, apmmu_null_func, BTFIXUPCALL_NOP);
1009 BTFIXUPSET_CALL(flush_sig_insns, apmmu_null_func, BTFIXUPCALL_NOP);
1013 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
1014 tsetup_mmu_patchme, rtrap_mmu_patchme;
1016 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
1017 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
1019 extern unsigned long srmmu_fault;
1021 #define PATCH_BRANCH(insn, dest) do { \
1022 iaddr = &(insn); \
1023 daddr = &(dest); \
1024 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
1025 } while(0);
1027 __initfunc(static void patch_window_trap_handlers(void))
1029 unsigned long *iaddr, *daddr;
1031 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
1032 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
1033 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
1034 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
1035 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
1036 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
1037 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
1040 /* Load up routines and constants for apmmu */
1041 __initfunc(void ld_mmu_apmmu(void))
1043 /* First the constants */
1044 BTFIXUPSET_SIMM13(pmd_shift, APMMU_PMD_SHIFT);
1045 BTFIXUPSET_SETHI(pmd_size, APMMU_PMD_SIZE);
1046 BTFIXUPSET_SETHI(pmd_mask, APMMU_PMD_MASK);
1047 BTFIXUPSET_SIMM13(pgdir_shift, APMMU_PGDIR_SHIFT);
1048 BTFIXUPSET_SETHI(pgdir_size, APMMU_PGDIR_SIZE);
1049 BTFIXUPSET_SETHI(pgdir_mask, APMMU_PGDIR_MASK);
1051 BTFIXUPSET_SIMM13(ptrs_per_pte, APMMU_PTRS_PER_PTE);
1052 BTFIXUPSET_SIMM13(ptrs_per_pmd, APMMU_PTRS_PER_PMD);
1053 BTFIXUPSET_SIMM13(ptrs_per_pgd, APMMU_PTRS_PER_PGD);
1055 BTFIXUPSET_INT(page_none, pgprot_val(APMMU_PAGE_NONE));
1056 BTFIXUPSET_INT(page_shared, pgprot_val(APMMU_PAGE_SHARED));
1057 BTFIXUPSET_INT(page_copy, pgprot_val(APMMU_PAGE_COPY));
1058 BTFIXUPSET_INT(page_readonly, pgprot_val(APMMU_PAGE_RDONLY));
1059 BTFIXUPSET_INT(page_kernel, pgprot_val(APMMU_PAGE_KERNEL));
1060 pg_iobits = APMMU_VALID | APMMU_WRITE | APMMU_REF;
1062 /* Functions */
1063 BTFIXUPSET_CALL(get_pte_fast, apmmu_get_pte_fast, BTFIXUPCALL_RETINT(0));
1064 BTFIXUPSET_CALL(get_pmd_fast, apmmu_get_pmd_fast, BTFIXUPCALL_RETINT(0));
1065 BTFIXUPSET_CALL(get_pgd_fast, apmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
1066 BTFIXUPSET_CALL(free_pte_slow, apmmu_free_pte_slow, BTFIXUPCALL_NOP);
1067 BTFIXUPSET_CALL(free_pmd_slow, apmmu_free_pmd_slow, BTFIXUPCALL_NOP);
1068 BTFIXUPSET_CALL(free_pgd_slow, apmmu_free_pgd_slow, BTFIXUPCALL_NOP);
1070 BTFIXUPSET_CALL(set_pte, apmmu_set_pte_cacheable, BTFIXUPCALL_NORM);
1071 BTFIXUPSET_CALL(switch_to_context, apmmu_switch_to_context, BTFIXUPCALL_NORM);
1073 BTFIXUPSET_CALL(pte_page, apmmu_pte_page, BTFIXUPCALL_NORM);
1074 BTFIXUPSET_CALL(pmd_page, apmmu_pmd_page, BTFIXUPCALL_NORM);
1075 BTFIXUPSET_CALL(pgd_page, apmmu_pgd_page, BTFIXUPCALL_NORM);
1077 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, apmmu_update_rootmmu_dir, BTFIXUPCALL_NORM);
1079 BTFIXUPSET_SETHI(none_mask, 0xF0000000);
1081 BTFIXUPSET_CALL(pte_present, apmmu_pte_present, BTFIXUPCALL_NORM);
1082 BTFIXUPSET_CALL(pte_clear, apmmu_pte_clear, BTFIXUPCALL_NORM);
1084 BTFIXUPSET_CALL(pmd_bad, apmmu_pmd_bad, BTFIXUPCALL_NORM);
1085 BTFIXUPSET_CALL(pmd_present, apmmu_pmd_present, BTFIXUPCALL_NORM);
1086 BTFIXUPSET_CALL(pmd_clear, apmmu_pmd_clear, BTFIXUPCALL_NORM);
1088 BTFIXUPSET_CALL(pgd_none, apmmu_pgd_none, BTFIXUPCALL_NORM);
1089 BTFIXUPSET_CALL(pgd_bad, apmmu_pgd_bad, BTFIXUPCALL_NORM);
1090 BTFIXUPSET_CALL(pgd_present, apmmu_pgd_present, BTFIXUPCALL_NORM);
1091 BTFIXUPSET_CALL(pgd_clear, apmmu_pgd_clear, BTFIXUPCALL_NORM);
1093 BTFIXUPSET_CALL(mk_pte, apmmu_mk_pte, BTFIXUPCALL_NORM);
1094 BTFIXUPSET_CALL(mk_pte_phys, apmmu_mk_pte_phys, BTFIXUPCALL_NORM);
1095 BTFIXUPSET_CALL(mk_pte_io, apmmu_mk_pte_io, BTFIXUPCALL_NORM);
1096 BTFIXUPSET_CALL(pgd_set, apmmu_pgd_set, BTFIXUPCALL_NORM);
1098 BTFIXUPSET_INT(pte_modify_mask, APMMU_CHG_MASK);
1099 BTFIXUPSET_CALL(pgd_offset, apmmu_pgd_offset, BTFIXUPCALL_NORM);
1100 BTFIXUPSET_CALL(pmd_offset, apmmu_pmd_offset, BTFIXUPCALL_NORM);
1101 BTFIXUPSET_CALL(pte_offset, apmmu_pte_offset, BTFIXUPCALL_NORM);
1102 BTFIXUPSET_CALL(pte_free_kernel, apmmu_pte_free_kernel, BTFIXUPCALL_NORM);
1103 BTFIXUPSET_CALL(pmd_free_kernel, apmmu_pmd_free_kernel, BTFIXUPCALL_NORM);
1104 BTFIXUPSET_CALL(pte_alloc_kernel, apmmu_pte_alloc_kernel, BTFIXUPCALL_NORM);
1105 BTFIXUPSET_CALL(pmd_alloc_kernel, apmmu_pmd_alloc_kernel, BTFIXUPCALL_NORM);
1106 BTFIXUPSET_CALL(pte_free, apmmu_pte_free, BTFIXUPCALL_NORM);
1107 BTFIXUPSET_CALL(pte_alloc, apmmu_pte_alloc, BTFIXUPCALL_NORM);
1108 BTFIXUPSET_CALL(pmd_free, apmmu_pmd_free, BTFIXUPCALL_NORM);
1109 BTFIXUPSET_CALL(pmd_alloc, apmmu_pmd_alloc, BTFIXUPCALL_NORM);
1110 BTFIXUPSET_CALL(pgd_free, apmmu_pgd_free, BTFIXUPCALL_NORM);
1111 BTFIXUPSET_CALL(pgd_alloc, apmmu_pgd_alloc, BTFIXUPCALL_NORM);
1112 BTFIXUPSET_CALL(pgd_flush, apmmu_pgd_flush, BTFIXUPCALL_NORM);
1114 BTFIXUPSET_HALF(pte_writei, APMMU_WRITE);
1115 BTFIXUPSET_HALF(pte_dirtyi, APMMU_DIRTY);
1116 BTFIXUPSET_HALF(pte_youngi, APMMU_REF);
1117 BTFIXUPSET_HALF(pte_wrprotecti, APMMU_WRITE);
1118 BTFIXUPSET_HALF(pte_mkcleani, APMMU_DIRTY);
1119 BTFIXUPSET_HALF(pte_mkoldi, APMMU_REF);
1120 BTFIXUPSET_CALL(pte_mkwrite, apmmu_pte_mkwrite, BTFIXUPCALL_NORM);
1121 BTFIXUPSET_CALL(pte_mkdirty, apmmu_pte_mkdirty, BTFIXUPCALL_NORM);
1122 BTFIXUPSET_CALL(pte_mkyoung, apmmu_pte_mkyoung, BTFIXUPCALL_NORM);
1123 BTFIXUPSET_CALL(update_mmu_cache, apmmu_update_mmu_cache, BTFIXUPCALL_NOP);
1125 BTFIXUPSET_CALL(mmu_lockarea, apmmu_lockarea, BTFIXUPCALL_NORM);
1126 BTFIXUPSET_CALL(mmu_unlockarea, apmmu_unlockarea, BTFIXUPCALL_NORM);
1128 BTFIXUPSET_CALL(mmu_get_scsi_one, apmmu_null_func, BTFIXUPCALL_RETO0);
1129 BTFIXUPSET_CALL(mmu_get_scsi_sgl, apmmu_null_func, BTFIXUPCALL_NOP);
1130 BTFIXUPSET_CALL(mmu_release_scsi_one, apmmu_null_func, BTFIXUPCALL_NOP);
1131 BTFIXUPSET_CALL(mmu_release_scsi_sgl, apmmu_null_func, BTFIXUPCALL_NOP);
1133 BTFIXUPSET_CALL(mmu_info, apmmu_mmu_info, BTFIXUPCALL_NORM);
1134 BTFIXUPSET_CALL(mmu_v2p, apmmu_v2p, BTFIXUPCALL_NORM);
1135 BTFIXUPSET_CALL(mmu_p2v, apmmu_p2v, BTFIXUPCALL_NORM);
1137 /* Task struct and kernel stack allocating/freeing. */
1138 BTFIXUPSET_CALL(alloc_task_struct, apmmu_alloc_task_struct, BTFIXUPCALL_NORM);
1139 BTFIXUPSET_CALL(free_task_struct, apmmu_free_task_struct, BTFIXUPCALL_NORM);
1141 BTFIXUPSET_CALL(quick_kernel_fault, apmmu_quick_kernel_fault, BTFIXUPCALL_NORM);
1143 init_viking();
1144 patch_window_trap_handlers();