Linux-2.3.3 and a short hiatus..
[davej-history.git] / include / asm-sparc64 / mmu_context.h
blobd29cb2a35901512f483526b3c7ce0e9b294c4025
1 /* $Id: mmu_context.h,v 1.35 1999/05/08 03:03:20 davem Exp $ */
2 #ifndef __SPARC64_MMU_CONTEXT_H
3 #define __SPARC64_MMU_CONTEXT_H
5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
7 #include <asm/system.h>
8 #include <asm/spitfire.h>
9 #include <asm/spinlock.h>
11 #define NO_CONTEXT 0
13 #ifndef __ASSEMBLY__
15 extern unsigned long tlb_context_cache;
16 extern unsigned long mmu_context_bmap[];
18 #define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
19 #define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
20 #define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
22 extern void get_new_mmu_context(struct mm_struct *mm);
24 /* Initialize/destroy the context related info for a new mm_struct
25 * instance.
27 #define init_new_context(__mm) ((__mm)->context = NO_CONTEXT)
29 /* Kernel threads like rpciod and nfsd drop their mm, and then use
30 * init_mm, when this happens we must make sure the tsk->tss.ctx is
31 * updated as well. Otherwise we have disasters relating to
32 * set_fs/get_fs usage later on.
34 * Also we can only clear the mmu_context_bmap bit when this is
35 * the final reference to the address space.
37 #define destroy_context(__mm) do { \
38 if ((__mm)->context != NO_CONTEXT && \
39 atomic_read(&(__mm)->count) == 1) { \
40 if (!(((__mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK))\
41 clear_bit((__mm)->context & ~(CTX_VERSION_MASK), \
42 mmu_context_bmap); \
43 (__mm)->context = NO_CONTEXT; \
44 if(current->mm == (__mm)) { \
45 current->tss.ctx = 0; \
46 spitfire_set_secondary_context(0); \
47 __asm__ __volatile__("flush %g6"); \
48 } \
49 } \
50 } while (0)
52 /* This routine must called with interrupts off,
53 * this is necessary to guarentee that the current->tss.ctx
54 * to CPU secontary context register relationship is maintained
55 * when traps can happen.
57 * Also the caller must flush the current set of user windows
58 * to the stack (if necessary) before we get here.
60 extern __inline__ void __get_mmu_context(struct task_struct *tsk)
62 register unsigned long paddr asm("o5");
63 register unsigned long pgd_cache asm("o4");
64 struct mm_struct *mm = tsk->mm;
66 if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
67 !(tsk->flags & PF_EXITING)) {
68 unsigned long ctx = tlb_context_cache;
69 if((mm->context ^ ctx) & CTX_VERSION_MASK)
70 get_new_mmu_context(mm);
71 if(!(mm->cpu_vm_mask & (1UL<<smp_processor_id()))) {
72 spitfire_set_secondary_context(mm->context & 0x3ff);
73 __asm__ __volatile__("flush %g6");
74 spitfire_flush_dtlb_secondary_context();
75 spitfire_flush_itlb_secondary_context();
76 __asm__ __volatile__("flush %g6");
78 /* Don't worry, set_fs() will restore it... */
79 /* Sigh, damned include loops... just poke seg directly. */
80 tsk->tss.ctx = (tsk->tss.current_ds.seg ?
81 (mm->context & 0x3ff) : 0);
82 } else
83 tsk->tss.ctx = 0;
84 spitfire_set_secondary_context(tsk->tss.ctx);
85 __asm__ __volatile__("flush %g6");
86 paddr = __pa(mm->pgd);
87 if((tsk->tss.flags & (SPARC_FLAG_32BIT|SPARC_FLAG_KTHREAD)) ==
88 (SPARC_FLAG_32BIT))
89 pgd_cache = ((unsigned long) mm->pgd[0]) << 11UL;
90 else
91 pgd_cache = 0;
92 __asm__ __volatile__("
93 rdpr %%pstate, %%o2
94 andn %%o2, %2, %%o3
95 wrpr %%o3, %5, %%pstate
96 mov %4, %%g4
97 mov %0, %%g7
98 stxa %1, [%%g4] %3
99 wrpr %%o2, 0x0, %%pstate
100 " : /* no outputs */
101 : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_IE),
102 "i" (ASI_DMMU), "i" (TSB_REG), "i" (PSTATE_MG)
103 : "o2", "o3");
106 /* Now we define this as a do nothing macro, because the only
107 * generic user right now is the scheduler, and we handle all
108 * the atomicity issues by having switch_to() call the above
109 * function itself.
111 #define get_mmu_context(x) do { } while(0)
114 * After we have set current->mm to a new value, this activates
115 * the context for the new mm so we see the new mappings. Currently,
116 * this is always called for 'current', if that changes put appropriate
117 * checks here.
119 * We set the cpu_vm_mask first to zero to enforce a tlb flush for
120 * the new context above, then we set it to the current cpu so the
121 * smp tlb flush routines do not get confused.
123 #define activate_context(__tsk) \
124 do { flushw_user(); \
125 (__tsk)->mm->cpu_vm_mask = 0; \
126 __get_mmu_context(__tsk); \
127 (__tsk)->mm->cpu_vm_mask = (1UL<<smp_processor_id()); \
128 } while(0)
130 #endif /* !(__ASSEMBLY__) */
132 #endif /* !(__SPARC64_MMU_CONTEXT_H) */