allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / include / asm-xtensa / tlbflush.h
blob7c637b3c352cf0fcae1a903057e17ee30567d7c0
1 /*
2 * include/asm-xtensa/tlbflush.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_TLBFLUSH_H
12 #define _XTENSA_TLBFLUSH_H
14 #ifdef __KERNEL__
16 #include <linux/stringify.h>
17 #include <asm/processor.h>
19 #define DTLB_WAY_PGD 7
21 #define ITLB_ARF_WAYS 4
22 #define DTLB_ARF_WAYS 4
24 #define ITLB_HIT_BIT 3
25 #define DTLB_HIT_BIT 4
27 #ifndef __ASSEMBLY__
29 /* TLB flushing:
31 * - flush_tlb_all() flushes all processes TLB entries
32 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
33 * - flush_tlb_page(mm, vmaddr) flushes a single page
34 * - flush_tlb_range(mm, start, end) flushes a range of pages
37 extern void flush_tlb_all(void);
38 extern void flush_tlb_mm(struct mm_struct*);
39 extern void flush_tlb_page(struct vm_area_struct*,unsigned long);
40 extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
42 #define flush_tlb_kernel_range(start,end) flush_tlb_all()
45 /* This is calld in munmap when we have freed up some page-table pages.
46 * We don't need to do anything here, there's nothing special about our
47 * page-table pages.
50 static inline void flush_tlb_pgtables(struct mm_struct *mm,
51 unsigned long start, unsigned long end)
55 /* TLB operations. */
57 static inline unsigned long itlb_probe(unsigned long addr)
59 unsigned long tmp;
60 __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
61 return tmp;
64 static inline unsigned long dtlb_probe(unsigned long addr)
66 unsigned long tmp;
67 __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
68 return tmp;
71 static inline void invalidate_itlb_entry (unsigned long probe)
73 __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
76 static inline void invalidate_dtlb_entry (unsigned long probe)
78 __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
81 /* Use the .._no_isync functions with caution. Generally, these are
82 * handy for bulk invalidates followed by a single 'isync'. The
83 * caller must follow up with an 'isync', which can be relatively
84 * expensive on some Xtensa implementations.
86 static inline void invalidate_itlb_entry_no_isync (unsigned entry)
88 /* Caller must follow up with 'isync'. */
89 __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
92 static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
94 /* Caller must follow up with 'isync'. */
95 __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
98 static inline void set_itlbcfg_register (unsigned long val)
100 __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
101 : : "a" (val));
104 static inline void set_dtlbcfg_register (unsigned long val)
106 __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
107 : : "a" (val));
110 static inline void set_ptevaddr_register (unsigned long val)
112 __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
113 : : "a" (val));
116 static inline unsigned long read_ptevaddr_register (void)
118 unsigned long tmp;
119 __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
120 return tmp;
123 static inline void write_dtlb_entry (pte_t entry, int way)
125 __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
126 : : "r" (way), "r" (entry) );
129 static inline void write_itlb_entry (pte_t entry, int way)
131 __asm__ __volatile__("witlb %1, %0; isync\n\t"
132 : : "r" (way), "r" (entry) );
135 static inline void invalidate_page_directory (void)
137 invalidate_dtlb_entry (DTLB_WAY_PGD);
138 invalidate_dtlb_entry (DTLB_WAY_PGD+1);
139 invalidate_dtlb_entry (DTLB_WAY_PGD+2);
142 static inline void invalidate_itlb_mapping (unsigned address)
144 unsigned long tlb_entry;
145 if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
146 invalidate_itlb_entry(tlb_entry);
149 static inline void invalidate_dtlb_mapping (unsigned address)
151 unsigned long tlb_entry;
152 if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
153 invalidate_dtlb_entry(tlb_entry);
156 #define check_pgt_cache() do { } while (0)
160 * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
161 * ISA and exist only for test purposes..
162 * You may find it helpful for MMU debugging, however.
164 * 'at' is the unmodified input register
165 * 'as' is the output register, as follows (specific to the Linux config):
167 * as[31..12] contain the virtual address
168 * as[11..08] are meaningless
169 * as[07..00] contain the asid
172 static inline unsigned long read_dtlb_virtual (int way)
174 unsigned long tmp;
175 __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
176 return tmp;
179 static inline unsigned long read_dtlb_translation (int way)
181 unsigned long tmp;
182 __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
183 return tmp;
186 static inline unsigned long read_itlb_virtual (int way)
188 unsigned long tmp;
189 __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
190 return tmp;
193 static inline unsigned long read_itlb_translation (int way)
195 unsigned long tmp;
196 __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
197 return tmp;
200 #endif /* __ASSEMBLY__ */
201 #endif /* __KERNEL__ */
202 #endif /* _XTENSA_TLBFLUSH_H */