Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / asm-sparc64 / spitfire.h
blobf24ff8415fca1efaa0880771cd324ef234928d16
1 /* $Id: spitfire.h,v 1.10 2000/10/06 13:10:29 anton Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
10 #include <asm/asi.h>
12 /* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
16 #define TSB_TAG_TARGET 0x0000000000000000
17 #define TLB_SFSR 0x0000000000000018
18 #define TSB_REG 0x0000000000000028
19 #define TLB_TAG_ACCESS 0x0000000000000030
21 /* These registers only exist as one entity, and are accessed
22 * via ASI_DMMU only.
24 #define PRIMARY_CONTEXT 0x0000000000000008
25 #define SECONDARY_CONTEXT 0x0000000000000010
26 #define DMMU_SFAR 0x0000000000000020
27 #define VIRT_WATCHPOINT 0x0000000000000038
28 #define PHYS_WATCHPOINT 0x0000000000000040
30 #ifndef __ASSEMBLY__
32 extern __inline__ unsigned long spitfire_get_isfsr(void)
34 unsigned long ret;
36 __asm__ __volatile__("ldxa [%1] %2, %0"
37 : "=r" (ret)
38 : "r" (TLB_SFSR), "i" (ASI_IMMU));
39 return ret;
42 extern __inline__ unsigned long spitfire_get_dsfsr(void)
44 unsigned long ret;
46 __asm__ __volatile__("ldxa [%1] %2, %0"
47 : "=r" (ret)
48 : "r" (TLB_SFSR), "i" (ASI_DMMU));
49 return ret;
52 extern __inline__ unsigned long spitfire_get_sfar(void)
54 unsigned long ret;
56 __asm__ __volatile__("ldxa [%1] %2, %0"
57 : "=r" (ret)
58 : "r" (DMMU_SFAR), "i" (ASI_DMMU));
59 return ret;
62 extern __inline__ void spitfire_put_isfsr(unsigned long sfsr)
64 __asm__ __volatile__("stxa %0, [%1] %2" :
65 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
68 extern __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
70 __asm__ __volatile__("stxa %0, [%1] %2" :
71 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
74 extern __inline__ unsigned long spitfire_get_primary_context(void)
76 unsigned long ctx;
78 __asm__ __volatile__("ldxa [%1] %2, %0"
79 : "=r" (ctx)
80 : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
81 return ctx;
84 extern __inline__ void spitfire_set_primary_context(unsigned long ctx)
86 __asm__ __volatile__("stxa %0, [%1] %2"
87 : /* No outputs */
88 : "r" (ctx & 0x3ff),
89 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
90 membar("#Sync");
93 extern __inline__ unsigned long spitfire_get_secondary_context(void)
95 unsigned long ctx;
97 __asm__ __volatile__("ldxa [%1] %2, %0"
98 : "=r" (ctx)
99 : "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU));
100 return ctx;
103 extern __inline__ void spitfire_set_secondary_context(unsigned long ctx)
105 __asm__ __volatile__("stxa %0, [%1] %2"
106 : /* No outputs */
107 : "r" (ctx & 0x3ff),
108 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU));
109 membar("#Sync");
112 /* The data cache is write through, so this just invalidates the
113 * specified line.
115 extern __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
117 __asm__ __volatile__("stxa %0, [%1] %2"
118 : /* No outputs */
119 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
120 membar("#Sync");
123 /* The instruction cache lines are flushed with this, but note that
124 * this does not flush the pipeline. It is possible for a line to
125 * get flushed but stale instructions to still be in the pipeline,
126 * a flush instruction (to any address) is sufficient to handle
127 * this issue after the line is invalidated.
129 extern __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
131 __asm__ __volatile__("stxa %0, [%1] %2"
132 : /* No outputs */
133 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
136 extern __inline__ unsigned long spitfire_get_dtlb_data(int entry)
138 unsigned long data;
140 __asm__ __volatile__("ldxa [%1] %2, %0"
141 : "=r" (data)
142 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
143 return data;
146 extern __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
148 unsigned long tag;
150 __asm__ __volatile__("ldxa [%1] %2, %0"
151 : "=r" (tag)
152 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
153 return tag;
156 extern __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
158 __asm__ __volatile__("stxa %0, [%1] %2"
159 : /* No outputs */
160 : "r" (data), "r" (entry << 3),
161 "i" (ASI_DTLB_DATA_ACCESS));
164 extern __inline__ unsigned long spitfire_get_itlb_data(int entry)
166 unsigned long data;
168 __asm__ __volatile__("ldxa [%1] %2, %0"
169 : "=r" (data)
170 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
171 return data;
174 extern __inline__ unsigned long spitfire_get_itlb_tag(int entry)
176 unsigned long tag;
178 __asm__ __volatile__("ldxa [%1] %2, %0"
179 : "=r" (tag)
180 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
181 return tag;
184 extern __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
186 __asm__ __volatile__("stxa %0, [%1] %2"
187 : /* No outputs */
188 : "r" (data), "r" (entry << 3),
189 "i" (ASI_ITLB_DATA_ACCESS));
192 /* Spitfire hardware assisted TLB flushes. */
194 /* Context level flushes. */
195 extern __inline__ void spitfire_flush_dtlb_primary_context(void)
197 __asm__ __volatile__("stxa %%g0, [%0] %1"
198 : /* No outputs */
199 : "r" (0x40), "i" (ASI_DMMU_DEMAP));
202 extern __inline__ void spitfire_flush_itlb_primary_context(void)
204 __asm__ __volatile__("stxa %%g0, [%0] %1"
205 : /* No outputs */
206 : "r" (0x40), "i" (ASI_IMMU_DEMAP));
209 extern __inline__ void spitfire_flush_dtlb_secondary_context(void)
211 __asm__ __volatile__("stxa %%g0, [%0] %1"
212 : /* No outputs */
213 : "r" (0x50), "i" (ASI_DMMU_DEMAP));
216 extern __inline__ void spitfire_flush_itlb_secondary_context(void)
218 __asm__ __volatile__("stxa %%g0, [%0] %1"
219 : /* No outputs */
220 : "r" (0x50), "i" (ASI_IMMU_DEMAP));
223 extern __inline__ void spitfire_flush_dtlb_nucleus_context(void)
225 __asm__ __volatile__("stxa %%g0, [%0] %1"
226 : /* No outputs */
227 : "r" (0x60), "i" (ASI_DMMU_DEMAP));
230 extern __inline__ void spitfire_flush_itlb_nucleus_context(void)
232 __asm__ __volatile__("stxa %%g0, [%0] %1"
233 : /* No outputs */
234 : "r" (0x60), "i" (ASI_IMMU_DEMAP));
237 /* Page level flushes. */
238 extern __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
240 __asm__ __volatile__("stxa %%g0, [%0] %1"
241 : /* No outputs */
242 : "r" (page), "i" (ASI_DMMU_DEMAP));
245 extern __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
247 __asm__ __volatile__("stxa %%g0, [%0] %1"
248 : /* No outputs */
249 : "r" (page), "i" (ASI_IMMU_DEMAP));
252 extern __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
254 __asm__ __volatile__("stxa %%g0, [%0] %1"
255 : /* No outputs */
256 : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
259 extern __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
261 __asm__ __volatile__("stxa %%g0, [%0] %1"
262 : /* No outputs */
263 : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
266 extern __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
268 __asm__ __volatile__("stxa %%g0, [%0] %1"
269 : /* No outputs */
270 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
273 extern __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
275 __asm__ __volatile__("stxa %%g0, [%0] %1"
276 : /* No outputs */
277 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
280 #endif /* !(__ASSEMBLY__) */
282 #endif /* !(_SPARC64_SPITFIRE_H) */