MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / include / asm-arm / tlbflush.h
blob90dadad81d13e21fd764f0ef37eca9f602ba052d
1 /*
2 * linux/include/asm-arm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #ifndef _ASMARM_TLBFLUSH_H
11 #define _ASMARM_TLBFLUSH_H
13 #include <linux/config.h>
14 #include <asm/glue.h>
16 #define TLB_V3_PAGE (1 << 0)
17 #define TLB_V4_U_PAGE (1 << 1)
18 #define TLB_V4_D_PAGE (1 << 2)
19 #define TLB_V4_I_PAGE (1 << 3)
20 #define TLB_V6_U_PAGE (1 << 4)
21 #define TLB_V6_D_PAGE (1 << 5)
22 #define TLB_V6_I_PAGE (1 << 6)
24 #define TLB_V3_FULL (1 << 8)
25 #define TLB_V4_U_FULL (1 << 9)
26 #define TLB_V4_D_FULL (1 << 10)
27 #define TLB_V4_I_FULL (1 << 11)
28 #define TLB_V6_U_FULL (1 << 12)
29 #define TLB_V6_D_FULL (1 << 13)
30 #define TLB_V6_I_FULL (1 << 14)
32 #define TLB_V6_U_ASID (1 << 16)
33 #define TLB_V6_D_ASID (1 << 17)
34 #define TLB_V6_I_ASID (1 << 18)
36 #define TLB_DCLEAN (1 << 30)
37 #define TLB_WB (1 << 31)
39 #if 1 // add by Victor Yu. 06-08-2005
40 #define TLB_DINVAL (1 << 28)
41 #define TLB_BTB (1 << 29)
42 #endif
45 * MMU TLB Model
46 * =============
48 * We have the following to choose from:
49 * v3 - ARMv3
50 * v4 - ARMv4 without write buffer
51 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
52 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
53 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
55 #undef _TLB
56 #undef MULTI_TLB
58 #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
60 #ifdef CONFIG_CPU_TLB_V3
61 # define v3_possible_flags v3_tlb_flags
62 # define v3_always_flags v3_tlb_flags
63 # ifdef _TLB
64 # define MULTI_TLB 1
65 # else
66 # define _TLB v3
67 # endif
68 #else
69 # define v3_possible_flags 0
70 # define v3_always_flags (-1UL)
71 #endif
73 #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
75 #ifdef CONFIG_CPU_TLB_V4WT
76 # define v4_possible_flags v4_tlb_flags
77 # define v4_always_flags v4_tlb_flags
78 # ifdef _TLB
79 # define MULTI_TLB 1
80 # else
81 # define _TLB v4
82 # endif
83 #else
84 # define v4_possible_flags 0
85 # define v4_always_flags (-1UL)
86 #endif
88 #if 1 // add by Victor Yu. 06-08-2005
89 #ifdef CONFIG_CPU_FA_BTB
90 #define __TLB_BTB TLB_BTB
91 #else
92 #define __TLB_BTB 0
93 #endif
95 #ifdef CONFIG_CPU_FA_WB_DISABLE
96 #define __TLB_WB 0
97 #else
98 #define __TLB_WB TLB_WB
99 #endif
101 #ifdef CONFIG_CPU_FA520
102 #define __TLB_DINVAL TLB_DINVAL
103 #elif defined(CONFIG_CPU_FA526)
104 #define __TLB_DINVAL 0
105 #else
106 #define __TLB_DINVAL 0
107 #endif
109 #define fa_tlb_flags (__TLB_WB | __TLB_BTB | __TLB_DINVAL | TLB_DCLEAN | \
110 TLB_V4_U_FULL | TLB_V4_U_PAGE)
112 #ifdef CONFIG_CPU_TLB_FA
113 # define fa_possible_flags fa_tlb_flags
114 # define fa_always_flags fa_tlb_flags
115 # ifdef _TLB
116 # define MULTI_TLB 1
117 # else
118 # define _TLB fa
119 # endif
120 #else
121 # define fa_possible_flags 0
122 # define fa_always_flags (-1UL)
123 #endif
124 #endif
126 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
127 TLB_V4_I_FULL | TLB_V4_D_FULL | \
128 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
130 #ifdef CONFIG_CPU_TLB_V4WBI
131 # define v4wbi_possible_flags v4wbi_tlb_flags
132 # define v4wbi_always_flags v4wbi_tlb_flags
133 # ifdef _TLB
134 # define MULTI_TLB 1
135 # else
136 # define _TLB v4wbi
137 # endif
138 #else
139 # define v4wbi_possible_flags 0
140 # define v4wbi_always_flags (-1UL)
141 #endif
143 #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
144 TLB_V4_I_FULL | TLB_V4_D_FULL | \
145 TLB_V4_D_PAGE)
147 #ifdef CONFIG_CPU_TLB_V4WB
148 # define v4wb_possible_flags v4wb_tlb_flags
149 # define v4wb_always_flags v4wb_tlb_flags
150 # ifdef _TLB
151 # define MULTI_TLB 1
152 # else
153 # define _TLB v4wb
154 # endif
155 #else
156 # define v4wb_possible_flags 0
157 # define v4wb_always_flags (-1UL)
158 #endif
160 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
161 TLB_V6_I_FULL | TLB_V6_D_FULL | \
162 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
163 TLB_V6_I_ASID | TLB_V6_D_ASID)
165 #ifdef CONFIG_CPU_TLB_V6
166 # define v6wbi_possible_flags v6wbi_tlb_flags
167 # define v6wbi_always_flags v6wbi_tlb_flags
168 # ifdef _TLB
169 # define MULTI_TLB 1
170 # else
171 # define _TLB v6wbi
172 # endif
173 #else
174 # define v6wbi_possible_flags 0
175 # define v6wbi_always_flags (-1UL)
176 #endif
178 #ifndef _TLB
179 #error Unknown TLB model
180 #endif
182 #ifndef __ASSEMBLY__
184 struct cpu_tlb_fns {
185 void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
186 void (*flush_kern_range)(unsigned long, unsigned long);
187 unsigned long tlb_flags;
191 * Select the calling method
193 #ifdef MULTI_TLB
195 #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
196 #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
198 #else
200 #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
201 #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
203 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
204 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
206 #endif
208 extern struct cpu_tlb_fns cpu_tlb;
210 #define __cpu_tlb_flags cpu_tlb.tlb_flags
213 * TLB Management
214 * ==============
216 * The arch/arm/mm/tlb-*.S files implement these methods.
218 * The TLB specific code is expected to perform whatever tests it
219 * needs to determine if it should invalidate the TLB for each
220 * call. Start addresses are inclusive and end addresses are
221 * exclusive; it is safe to round these addresses down.
223 * flush_tlb_all()
225 * Invalidate the entire TLB.
227 * flush_tlb_mm(mm)
229 * Invalidate all TLB entries in a particular address
230 * space.
231 * - mm - mm_struct describing address space
233 * flush_tlb_range(mm,start,end)
235 * Invalidate a range of TLB entries in the specified
236 * address space.
237 * - mm - mm_struct describing address space
238 * - start - start address (may not be aligned)
239 * - end - end address (exclusive, may not be aligned)
241 * flush_tlb_page(vaddr,vma)
243 * Invalidate the specified page in the specified address range.
244 * - vaddr - virtual address (may not be aligned)
245 * - vma - vma_struct describing address range
247 * flush_kern_tlb_page(kaddr)
249 * Invalidate the TLB entry for the specified page. The address
250 * will be in the kernels virtual memory space. Current uses
251 * only require the D-TLB to be invalidated.
252 * - kaddr - Kernel virtual memory address
256 * We optimise the code below by:
257 * - building a set of TLB flags that might be set in __cpu_tlb_flags
258 * - building a set of TLB flags that will always be set in __cpu_tlb_flags
259 * - if we're going to need __cpu_tlb_flags, access it once and only once
261 * This allows us to build optimal assembly for the single-CPU type case,
262 * and as close to optimal given the compiler constrants for multi-CPU
263 * case. We could do better for the multi-CPU case if the compiler
264 * implemented the "%?" method, but this has been discontinued due to too
265 * many people getting it wrong.
267 #if 1 // mask by Victor Yu. 06-08-2005
268 #define possible_tlb_flags (v3_possible_flags | \
269 v4_possible_flags | \
270 v4wbi_possible_flags | \
271 v4wb_possible_flags | \
272 v6wbi_possible_flags)
274 #define always_tlb_flags (v3_always_flags & \
275 v4_always_flags & \
276 v4wbi_always_flags & \
277 v4wb_always_flags & \
278 v6wbi_always_flags)
279 #else // add by Victor Yu. 06-08-2005
280 #define possible_tlb_flags (v3_possible_flags | \
281 v4_possible_flags | \
282 v4wbi_possible_flags | \
283 v4wb_possible_flags | \
284 fa_possible_flags | \
285 v6wbi_possible_flags)
287 #define always_tlb_flags (v3_always_flags & \
288 v4_always_flags & \
289 v4wbi_always_flags & \
290 v4wb_always_flags & \
291 fa_always_flags & \
292 v6wbi_always_flags)
293 #endif
295 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
297 static inline void flush_tlb_all(void)
299 const int zero = 0;
300 const unsigned int __tlb_flag = __cpu_tlb_flags;
302 if (tlb_flag(TLB_WB))
303 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
305 if (tlb_flag(TLB_V3_FULL))
306 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
307 if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
308 asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
309 if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
310 asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
311 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
312 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
314 #if 0 // add by Victor Yu. 06-08-2005
315 if (tlb_flag(TLB_BTB))
317 asm("mcr%? p15, 0, %0, c7, c5, 6" : : "r" (zero));
318 asm("mov r0, r0" : : );
319 asm("mov r0, r0" : : );
321 #endif
324 static inline void flush_tlb_mm(struct mm_struct *mm)
326 const int zero = 0;
327 const int asid = ASID(mm);
328 const unsigned int __tlb_flag = __cpu_tlb_flags;
330 if (tlb_flag(TLB_WB))
331 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
333 if (mm == current->active_mm) {
334 if (tlb_flag(TLB_V3_FULL))
335 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
336 if (tlb_flag(TLB_V4_U_FULL))
337 asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
338 if (tlb_flag(TLB_V4_D_FULL))
339 asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
340 if (tlb_flag(TLB_V4_I_FULL))
341 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
344 if (tlb_flag(TLB_V6_U_ASID))
345 asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid));
346 if (tlb_flag(TLB_V6_D_ASID))
347 asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid));
348 if (tlb_flag(TLB_V6_I_ASID))
349 asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid));
351 #if 0 // add by Victor Yu. 06-08-2005
352 if (tlb_flag(TLB_BTB))
354 asm("mcr%? p15, 0, %0, c7, c5, 6" : : "r" (zero));
355 asm("mov r0, r0" : : );
356 asm("mov r0, r0" : : );
358 #endif
361 static inline void
362 flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
364 const int zero = 0;
365 const unsigned int __tlb_flag = __cpu_tlb_flags;
367 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
369 if (tlb_flag(TLB_WB))
370 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
372 if (vma->vm_mm == current->active_mm) {
373 if (tlb_flag(TLB_V3_PAGE))
374 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr));
375 if (tlb_flag(TLB_V4_U_PAGE))
376 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
377 if (tlb_flag(TLB_V4_D_PAGE))
378 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
379 if (tlb_flag(TLB_V4_I_PAGE))
380 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
381 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
382 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
385 if (tlb_flag(TLB_V6_U_PAGE))
386 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
387 if (tlb_flag(TLB_V6_D_PAGE))
388 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
389 if (tlb_flag(TLB_V6_I_PAGE))
390 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
392 #if 0 // add by Victor Yu. 06-08-2005
393 if (tlb_flag(TLB_BTB))
395 asm("mcr%? p15, 0, %0, c7, c5, 6" : : "r" (zero));
396 asm("mov r0, r0" : : );
397 asm("mov r0, r0" : : );
399 #endif
402 static inline void flush_tlb_kernel_page(unsigned long kaddr)
404 const int zero = 0;
405 const unsigned int __tlb_flag = __cpu_tlb_flags;
407 kaddr &= PAGE_MASK;
409 if (tlb_flag(TLB_WB))
410 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
412 if (tlb_flag(TLB_V3_PAGE))
413 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr));
414 if (tlb_flag(TLB_V4_U_PAGE))
415 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
416 if (tlb_flag(TLB_V4_D_PAGE))
417 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
418 if (tlb_flag(TLB_V4_I_PAGE))
419 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
420 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
421 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
423 if (tlb_flag(TLB_V6_U_PAGE))
424 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
425 if (tlb_flag(TLB_V6_D_PAGE))
426 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
427 if (tlb_flag(TLB_V6_I_PAGE))
428 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
430 #if 0 // add by Victor Yu. 06-08-2005
431 if (tlb_flag(TLB_BTB))
433 asm("mcr%? p15, 0, %0, c7, c5, 6" : : "r" (zero));
434 asm("mov r0, r0" : : );
435 asm("mov r0, r0" : : );
437 #endif
441 * flush_pmd_entry
443 * Flush a PMD entry (word aligned, or double-word aligned) to
444 * RAM if the TLB for the CPU we are running on requires this.
445 * This is typically used when we are creating PMD entries.
447 * clean_pmd_entry
449 * Clean (but don't drain the write buffer) if the CPU requires
450 * these operations. This is typically used when we are removing
451 * PMD entries.
453 static inline void flush_pmd_entry(pmd_t *pmd)
455 const unsigned int zero = 0;
456 const unsigned int __tlb_flag = __cpu_tlb_flags;
458 if (tlb_flag(TLB_DCLEAN))
459 asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
460 : : "r" (pmd));
461 if (tlb_flag(TLB_WB))
462 asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
463 : : "r" (zero));
465 #if 0 // add by Victor Yu. 06-08-2005
466 if (tlb_flag(TLB_BTB))
468 asm("mcr%? p15, 0, %0, c7, c5, 6" : : "r" (zero));
469 asm("mov r0, r0" : : );
470 asm("mov r0, r0" : : );
472 #endif
475 static inline void clean_pmd_entry(pmd_t *pmd)
477 #if 0 // add by Victor Yu. 06-08-2005
478 const unsigned int zero = 0;
479 #endif
480 const unsigned int __tlb_flag = __cpu_tlb_flags;
482 if (tlb_flag(TLB_DCLEAN))
483 asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
484 : : "r" (pmd));
486 #if 0 // add by Victor Yu. 06-08-2005
487 if (tlb_flag(TLB_BTB))
489 asm("mcr%? p15, 0, %0, c7, c5, 6" : : "r" (zero));
490 asm("mov r0, r0" : : );
491 asm("mov r0, r0" : : );
493 #endif
496 #undef tlb_flag
497 #undef always_tlb_flags
498 #undef possible_tlb_flags
501 * Convert calls to our calling convention.
503 #define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
504 #define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
507 * if PG_dcache_dirty is set for the page, we need to ensure that any
508 * cache entries for the kernels virtual memory range are written
509 * back to the page.
511 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
514 * ARM processors do not cache TLB tables in RAM.
516 #define flush_tlb_pgtables(mm,start,end) do { } while (0)
518 #endif
520 #endif