[PATCH] lower VM_DONTCOPY total_vm
[linux-2.6.22.y-op.git] / include / asm-sh64 / mmu_context.h
blobf062e1513272745e1f1432749894ddcb1cf5660c
1 #ifndef __ASM_SH64_MMU_CONTEXT_H
2 #define __ASM_SH64_MMU_CONTEXT_H
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
9 * include/asm-sh64/mmu_context.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
14 * ASID handling idea taken from MIPS implementation.
18 #ifndef __ASSEMBLY__
21 * Cache of MMU context last used.
23 * The MMU "context" consists of two things:
24 * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
25 * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
27 extern unsigned long mmu_context_cache;
29 #include <linux/config.h>
30 #include <asm/page.h>
33 /* Current mm's pgd */
34 extern pgd_t *mmu_pdtp_cache;
36 #define SR_ASID_MASK 0xffffffffff00ffffULL
37 #define SR_ASID_SHIFT 16
39 #define MMU_CONTEXT_ASID_MASK 0x000000ff
40 #define MMU_CONTEXT_VERSION_MASK 0xffffff00
41 #define MMU_CONTEXT_FIRST_VERSION 0x00000100
42 #define NO_CONTEXT 0
44 /* ASID is 8-bit value, so it can't be 0x100 */
45 #define MMU_NO_ASID 0x100
49 * Virtual Page Number mask
51 #define MMU_VPN_MASK 0xfffff000
53 extern __inline__ void
54 get_new_mmu_context(struct mm_struct *mm)
56 extern void flush_tlb_all(void);
57 extern void flush_cache_all(void);
59 unsigned long mc = ++mmu_context_cache;
61 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
62 /* We exhaust ASID of this version.
63 Flush all TLB and start new cycle. */
64 flush_tlb_all();
65 /* We have to flush all caches as ASIDs are
66 used in cache */
67 flush_cache_all();
68 /* Fix version if needed.
69 Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
70 if (!mc)
71 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
73 mm->context = mc;
77 * Get MMU context if needed.
79 static __inline__ void
80 get_mmu_context(struct mm_struct *mm)
82 if (mm) {
83 unsigned long mc = mmu_context_cache;
84 /* Check if we have old version of context.
85 If it's old, we need to get new context with new version. */
86 if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK)
87 get_new_mmu_context(mm);
92 * Initialize the context related info for a new mm_struct
93 * instance.
95 static inline int init_new_context(struct task_struct *tsk,
96 struct mm_struct *mm)
98 mm->context = NO_CONTEXT;
100 return 0;
104 * Destroy context related info for an mm_struct that is about
105 * to be put to rest.
107 static inline void destroy_context(struct mm_struct *mm)
109 extern void flush_tlb_mm(struct mm_struct *mm);
111 /* Well, at least free TLB entries */
112 flush_tlb_mm(mm);
115 #endif /* __ASSEMBLY__ */
117 /* Common defines */
118 #define TLB_STEP 0x00000010
119 #define TLB_PTEH 0x00000000
120 #define TLB_PTEL 0x00000008
122 /* PTEH defines */
123 #define PTEH_ASID_SHIFT 2
124 #define PTEH_VALID 0x0000000000000001
125 #define PTEH_SHARED 0x0000000000000002
126 #define PTEH_MATCH_ASID 0x00000000000003ff
128 #ifndef __ASSEMBLY__
129 /* This has to be a common function because the next location to fill
130 * information is shared. */
131 extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
133 /* Profiling counter. */
134 #ifdef CONFIG_SH64_PROC_TLB
135 extern unsigned long long calls_to_do_fast_page_fault;
136 #endif
138 static inline unsigned long get_asid(void)
140 unsigned long long sr;
142 asm volatile ("getcon " __SR ", %0\n\t"
143 : "=r" (sr));
145 sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
146 return (unsigned long) sr;
149 /* Set ASID into SR */
150 static inline void set_asid(unsigned long asid)
152 unsigned long long sr, pc;
154 asm volatile ("getcon " __SR ", %0" : "=r" (sr));
156 sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
159 * It is possible that this function may be inlined and so to avoid
160 * the assembler reporting duplicate symbols we make use of the gas trick
161 * of generating symbols using numerics and forward reference.
163 asm volatile ("movi 1, %1\n\t"
164 "shlli %1, 28, %1\n\t"
165 "or %0, %1, %1\n\t"
166 "putcon %1, " __SR "\n\t"
167 "putcon %0, " __SSR "\n\t"
168 "movi 1f, %1\n\t"
169 "ori %1, 1 , %1\n\t"
170 "putcon %1, " __SPC "\n\t"
171 "rte\n"
172 "1:\n\t"
173 : "=r" (sr), "=r" (pc) : "0" (sr));
177 * After we have set current->mm to a new value, this activates
178 * the context for the new mm so we see the new mappings.
180 static __inline__ void activate_context(struct mm_struct *mm)
182 get_mmu_context(mm);
183 set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
187 static __inline__ void switch_mm(struct mm_struct *prev,
188 struct mm_struct *next,
189 struct task_struct *tsk)
191 if (prev != next) {
192 mmu_pdtp_cache = next->pgd;
193 activate_context(next);
197 #define deactivate_mm(tsk,mm) do { } while (0)
199 #define activate_mm(prev, next) \
200 switch_mm((prev),(next),NULL)
202 static inline void
203 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
207 #endif /* __ASSEMBLY__ */
209 #endif /* __ASM_SH64_MMU_CONTEXT_H */