2 * include/asm-xtensa/mmu_context.h
4 * Switch an MMU context.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
13 #ifndef _XTENSA_MMU_CONTEXT_H
14 #define _XTENSA_MMU_CONTEXT_H
16 #include <linux/stringify.h>
18 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
24 * Linux was ported to Xtensa assuming all auto-refill ways in set 0
25 * had the same properties (a very likely assumption). Multiple sets
26 * of auto-refill ways will still work properly, but not as optimally
27 * as the Xtensa designer may have assumed.
29 * We make this case a hard #error, killing the kernel build, to alert
30 * the developer to this condition (which is more likely an error).
31 * You super-duper clever developers can change it to a warning or
32 * remove it altogether if you think you know what you're doing. :)
35 #if (XCHAL_HAVE_TLBS != 1)
36 # error "Linux must have an MMU!"
39 #if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
40 # error "MMU must have auto-refill ways"
43 #if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
44 # error Linux may not use all auto-refill ways as efficiently as you think
47 #if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
48 # error Only one page size allowed!
51 extern unsigned long asid_cache
;
52 extern pgd_t
*current_pgd
;
55 * Define the number of entries per auto-refill way in set 0 of both I and D
56 * TLBs. We deal only with set 0 here (an assumption further explained in
57 * assertions.h). Also, define the total number of ARF entries in both TLBs.
60 #define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
61 #define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
63 #define ITLB_ENTRIES \
64 (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
65 #define DTLB_ENTRIES \
66 (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
70 * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
71 * In practice, they are probably equal. This macro simplifies function
75 #if (DTLB_ENTRIES < ITLB_ENTRIES)
76 # define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
78 # define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
83 * asid_cache tracks only the ASID[USER_RING] field of the RASID special
84 * register, which is the current user-task asid allocation value.
85 * mm->context has the same meaning. When it comes time to write the
86 * asid_cache or mm->context values to the RASID special register, we first
87 * shift the value left by 8, then insert the value.
88 * ASID[0] always contains the kernel's asid value, and we reserve three
89 * other asid values that we never assign to user tasks.
93 #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
96 * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
97 * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
98 * Xtensa processor constant indicating the kernel address space. They can
99 * be arbitrary values.
101 * We identify three more unique, reserved ASID values to use in the unused
102 * ring positions. No other user process will be assigned these reserved
105 * For example, given that
107 * XCHAL_MMU_ASID_INVALID == 0
108 * XCHAL_MMU_ASID_KERNEL == 1
110 * the following maze of #if statements would generate
112 * ASID_RESERVED_1 == 2
113 * ASID_RESERVED_2 == 3
114 * ASID_RESERVED_3 == 4
115 * ASID_FIRST_NONRESERVED == 5
118 #if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
119 # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
121 # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
124 #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
125 # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
127 # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
130 #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
131 # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
133 # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
136 #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
137 # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
139 # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
142 #define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
143 ((ASID_RESERVED_2) << 16) + \
144 ((ASID_RESERVED_3) << 8) + \
145 ((XCHAL_MMU_ASID_KERNEL)) )
149 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
150 * any user or kernel context. NO_CONTEXT is a better mnemonic than
151 * XCHAL_MMU_ASID_INVALID, so we use it in code instead.
154 #define NO_CONTEXT XCHAL_MMU_ASID_INVALID
156 #if (KERNEL_RING != 0)
157 # error The KERNEL_RING really should be zero.
160 #if (USER_RING >= XCHAL_MMU_RINGS)
161 # error USER_RING cannot be greater than the highest numbered ring.
164 #if (USER_RING == KERNEL_RING)
165 # error The user and kernel rings really should not be equal.
169 #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
170 ((ASID_RESERVED_2) << 16) + \
171 (((x) & (ASID_MASK)) << 8) + \
172 ((XCHAL_MMU_ASID_KERNEL)) )
174 #elif (USER_RING == 2)
175 #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
176 (((x) & (ASID_MASK)) << 16) + \
177 ((ASID_RESERVED_2) << 8) + \
178 ((XCHAL_MMU_ASID_KERNEL)) )
180 #elif (USER_RING == 3)
181 #define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
182 ((ASID_RESERVED_1) << 16) + \
183 ((ASID_RESERVED_2) << 8) + \
184 ((XCHAL_MMU_ASID_KERNEL)) )
187 #error Goofy value for USER_RING
189 #endif /* USER_RING == 1 */
193 * All unused by hardware upper bits will be considered
194 * as a software asid extension.
197 #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
198 #define ASID_FIRST_VERSION \
199 ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
201 static inline void set_rasid_register (unsigned long val
)
203 __asm__
__volatile__ (" wsr %0, "__stringify(RASID
)"\n\t"
204 " isync\n" : : "a" (val
));
207 static inline unsigned long get_rasid_register (void)
210 __asm__
__volatile__ (" rsr %0, "__stringify(RASID
)"\n\t" : "=a" (tmp
));
215 #if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
218 get_new_mmu_context(struct mm_struct
*mm
, unsigned long asid
)
220 extern void flush_tlb_all(void);
221 if (! ((asid
+= ASID_INC
) & ASID_MASK
) ) {
222 flush_tlb_all(); /* start new asid cycle */
223 if (!asid
) /* fix version if needed */
224 asid
= ASID_FIRST_VERSION
- ASID_FIRST_NONRESERVED
;
225 asid
+= ASID_FIRST_NONRESERVED
;
227 mm
->context
= asid_cache
= asid
;
231 #warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
233 /* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
234 really the best, but if you insist... */
236 static inline int validate_asid (unsigned long asid
)
239 case XCHAL_MMU_ASID_INVALID
:
240 case XCHAL_MMU_ASID_KERNEL
:
241 case ASID_RESERVED_1
:
242 case ASID_RESERVED_2
:
243 case ASID_RESERVED_3
:
244 return 0; /* can't use these values as ASIDs */
246 return 1; /* valid */
250 get_new_mmu_context(struct mm_struct
*mm
, unsigned long asid
)
252 extern void flush_tlb_all(void);
255 if ( ! (asid
& ASID_MASK
) ) {
256 flush_tlb_all(); /* start new asid cycle */
257 if (!asid
) /* fix version if needed */
258 asid
= ASID_FIRST_VERSION
- ASID_FIRST_NONRESERVED
;
259 asid
+= ASID_FIRST_NONRESERVED
;
260 break; /* no need to validate here */
262 if (validate_asid (asid
& ASID_MASK
))
265 mm
->context
= asid_cache
= asid
;
272 * Initialize the context related info for a new mm_struct
277 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
279 mm
->context
= NO_CONTEXT
;
283 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
284 struct task_struct
*tsk
)
286 unsigned long asid
= asid_cache
;
288 /* Check if our ASID is of an older version and thus invalid */
290 if ((next
->context
^ asid
) & ASID_VERSION_MASK
)
291 get_new_mmu_context(next
, asid
);
293 set_rasid_register (ASID_INSERT(next
->context
));
294 invalidate_page_directory();
297 #define deactivate_mm(tsk, mm) do { } while(0)
300 * Destroy context related info for an mm_struct that is about
303 static inline void destroy_context(struct mm_struct
*mm
)
309 * After we have set current->mm to a new value, this activates
310 * the context for the new mm so we see the new mappings.
313 activate_mm(struct mm_struct
*prev
, struct mm_struct
*next
)
315 /* Unconditionally get a new ASID. */
317 get_new_mmu_context(next
, asid_cache
);
318 set_rasid_register (ASID_INSERT(next
->context
));
319 invalidate_page_directory();
323 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
329 #endif /* _XTENSA_MMU_CONTEXT_H */