Linux-2.3.3 and a short hiatus..
[davej-history.git] / include / asm-sparc64 / spitfire.h
blob04667b1859fdae08004fe944986083d41520889e
1 /* $Id: spitfire.h,v 1.9 1998/04/28 08:23:33 davem Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
10 #include <asm/asi.h>
12 /* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
16 #define TSB_TAG_TARGET 0x0000000000000000
17 #define TLB_SFSR 0x0000000000000018
18 #define TSB_REG 0x0000000000000028
19 #define TLB_TAG_ACCESS 0x0000000000000030
21 /* These registers only exist as one entity, and are accessed
22 * via ASI_DMMU only.
24 #define PRIMARY_CONTEXT 0x0000000000000008
25 #define SECONDARY_CONTEXT 0x0000000000000010
26 #define DMMU_SFAR 0x0000000000000020
27 #define VIRT_WATCHPOINT 0x0000000000000038
28 #define PHYS_WATCHPOINT 0x0000000000000040
30 #ifndef __ASSEMBLY__
32 extern __inline__ unsigned long spitfire_get_isfsr(void)
34 unsigned long ret;
36 __asm__ __volatile__("ldxa [%1] %2, %0"
37 : "=r" (ret)
38 : "r" (TLB_SFSR), "i" (ASI_IMMU));
39 return ret;
42 extern __inline__ unsigned long spitfire_get_dsfsr(void)
44 unsigned long ret;
46 __asm__ __volatile__("ldxa [%1] %2, %0"
47 : "=r" (ret)
48 : "r" (TLB_SFSR), "i" (ASI_DMMU));
49 return ret;
52 extern __inline__ unsigned long spitfire_get_sfar(void)
54 unsigned long ret;
56 __asm__ __volatile__("ldxa [%1] %2, %0"
57 : "=r" (ret)
58 : "r" (DMMU_SFAR), "i" (ASI_DMMU));
59 return ret;
62 extern __inline__ void spitfire_put_isfsr(unsigned long sfsr)
64 __asm__ __volatile__("stxa %0, [%1] %2" :
65 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
68 extern __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
70 __asm__ __volatile__("stxa %0, [%1] %2" :
71 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
74 extern __inline__ unsigned long spitfire_get_primary_context(void)
76 unsigned long ctx;
78 __asm__ __volatile__("ldxa [%1] %2, %0"
79 : "=r" (ctx)
80 : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
81 return ctx;
84 extern __inline__ void spitfire_set_primary_context(unsigned long ctx)
86 __asm__ __volatile__("stxa %0, [%1] %2"
87 : /* No outputs */
88 : "r" (ctx & 0x3ff),
89 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
90 membar("#Sync");
93 extern __inline__ unsigned long spitfire_get_secondary_context(void)
95 unsigned long ctx;
97 __asm__ __volatile__("ldxa [%1] %2, %0"
98 : "=r" (ctx)
99 : "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU));
100 return ctx;
103 extern __inline__ void spitfire_set_secondary_context(unsigned long ctx)
105 __asm__ __volatile__("stxa %0, [%1] %2"
106 : /* No outputs */
107 : "r" (ctx & 0x3ff),
108 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU));
109 membar("#Sync");
112 /* The data cache is write through, so this just invalidates the
113 * specified line.
115 extern __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
117 __asm__ __volatile__("stxa %0, [%1] %2"
118 : /* No outputs */
119 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
122 /* The instruction cache lines are flushed with this, but note that
123 * this does not flush the pipeline. It is possible for a line to
124 * get flushed but stale instructions to still be in the pipeline,
125 * a flush instruction (to any address) is sufficient to handle
126 * this issue after the line is invalidated.
128 extern __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
130 __asm__ __volatile__("stxa %0, [%1] %2"
131 : /* No outputs */
132 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
135 extern __inline__ unsigned long spitfire_get_dtlb_data(int entry)
137 unsigned long data;
139 __asm__ __volatile__("ldxa [%1] %2, %0"
140 : "=r" (data)
141 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
142 return data;
145 extern __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
147 unsigned long tag;
149 __asm__ __volatile__("ldxa [%1] %2, %0"
150 : "=r" (tag)
151 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
152 return tag;
155 extern __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
157 __asm__ __volatile__("stxa %0, [%1] %2"
158 : /* No outputs */
159 : "r" (data), "r" (entry << 3),
160 "i" (ASI_DTLB_DATA_ACCESS));
163 extern __inline__ unsigned long spitfire_get_itlb_data(int entry)
165 unsigned long data;
167 __asm__ __volatile__("ldxa [%1] %2, %0"
168 : "=r" (data)
169 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
170 return data;
173 extern __inline__ unsigned long spitfire_get_itlb_tag(int entry)
175 unsigned long tag;
177 __asm__ __volatile__("ldxa [%1] %2, %0"
178 : "=r" (tag)
179 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
180 return tag;
183 extern __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
185 __asm__ __volatile__("stxa %0, [%1] %2"
186 : /* No outputs */
187 : "r" (data), "r" (entry << 3),
188 "i" (ASI_ITLB_DATA_ACCESS));
191 /* Spitfire hardware assisted TLB flushes. */
193 /* Context level flushes. */
194 extern __inline__ void spitfire_flush_dtlb_primary_context(void)
196 __asm__ __volatile__("stxa %%g0, [%0] %1"
197 : /* No outputs */
198 : "r" (0x40), "i" (ASI_DMMU_DEMAP));
201 extern __inline__ void spitfire_flush_itlb_primary_context(void)
203 __asm__ __volatile__("stxa %%g0, [%0] %1"
204 : /* No outputs */
205 : "r" (0x40), "i" (ASI_IMMU_DEMAP));
208 extern __inline__ void spitfire_flush_dtlb_secondary_context(void)
210 __asm__ __volatile__("stxa %%g0, [%0] %1"
211 : /* No outputs */
212 : "r" (0x50), "i" (ASI_DMMU_DEMAP));
215 extern __inline__ void spitfire_flush_itlb_secondary_context(void)
217 __asm__ __volatile__("stxa %%g0, [%0] %1"
218 : /* No outputs */
219 : "r" (0x50), "i" (ASI_IMMU_DEMAP));
222 extern __inline__ void spitfire_flush_dtlb_nucleus_context(void)
224 __asm__ __volatile__("stxa %%g0, [%0] %1"
225 : /* No outputs */
226 : "r" (0x60), "i" (ASI_DMMU_DEMAP));
229 extern __inline__ void spitfire_flush_itlb_nucleus_context(void)
231 __asm__ __volatile__("stxa %%g0, [%0] %1"
232 : /* No outputs */
233 : "r" (0x60), "i" (ASI_IMMU_DEMAP));
236 /* Page level flushes. */
237 extern __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
239 __asm__ __volatile__("stxa %%g0, [%0] %1"
240 : /* No outputs */
241 : "r" (page), "i" (ASI_DMMU_DEMAP));
244 extern __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
246 __asm__ __volatile__("stxa %%g0, [%0] %1"
247 : /* No outputs */
248 : "r" (page), "i" (ASI_IMMU_DEMAP));
251 extern __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
253 __asm__ __volatile__("stxa %%g0, [%0] %1"
254 : /* No outputs */
255 : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
258 extern __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
260 __asm__ __volatile__("stxa %%g0, [%0] %1"
261 : /* No outputs */
262 : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
265 extern __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
267 __asm__ __volatile__("stxa %%g0, [%0] %1"
268 : /* No outputs */
269 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
272 extern __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
274 __asm__ __volatile__("stxa %%g0, [%0] %1"
275 : /* No outputs */
276 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
279 #endif /* !(__ASSEMBLY__) */
281 #endif /* !(_SPARC64_SPITFIRE_H) */