pre-2.3.4..
[davej-history.git] / include / asm-sparc / pgtsrmmu.h
blobfc569dcafd234a1b29437a3941cf1dea870d6012
1 /* $Id: pgtsrmmu.h,v 1.29 1998/07/26 03:05:42 davem Exp $
2 * pgtsrmmu.h: SRMMU page table defines and code.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
7 #ifndef _SPARC_PGTSRMMU_H
8 #define _SPARC_PGTSRMMU_H
10 #include <asm/page.h>
12 /* PMD_SHIFT determines the size of the area a second-level page table can map */
13 #define SRMMU_PMD_SHIFT 18
14 #define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT)
15 #define SRMMU_PMD_MASK (~(SRMMU_PMD_SIZE-1))
16 #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
18 /* PGDIR_SHIFT determines what a third-level page table entry can map */
19 #define SRMMU_PGDIR_SHIFT 24
20 #define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT)
21 #define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
22 #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
24 #define SRMMU_PTRS_PER_PTE 64
25 #define SRMMU_PTRS_PER_PMD 64
26 #define SRMMU_PTRS_PER_PGD 256
28 #define SRMMU_PTE_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
29 #define SRMMU_PMD_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
30 #define SRMMU_PGD_TABLE_SIZE 0x400 /* 256 entries, 4 bytes a piece */
32 #define SRMMU_VMALLOC_START (0xfe300000)
33 #define SRMMU_VMALLOC_END ~0x0UL
35 /* Definition of the values in the ET field of PTD's and PTE's */
36 #define SRMMU_ET_MASK 0x3
37 #define SRMMU_ET_INVALID 0x0
38 #define SRMMU_ET_PTD 0x1
39 #define SRMMU_ET_PTE 0x2
40 #define SRMMU_ET_REPTE 0x3 /* AIEEE, SuperSparc II reverse endian page! */
42 /* Physical page extraction from PTP's and PTE's. */
43 #define SRMMU_CTX_PMASK 0xfffffff0
44 #define SRMMU_PTD_PMASK 0xfffffff0
45 #define SRMMU_PTE_PMASK 0xffffff00
47 /* The pte non-page bits. Some notes:
48 * 1) cache, dirty, valid, and ref are frobbable
49 * for both supervisor and user pages.
50 * 2) exec and write will only give the desired effect
51 * on user pages
52 * 3) use priv and priv_readonly for changing the
53 * characteristics of supervisor ptes
55 #define SRMMU_CACHE 0x80
56 #define SRMMU_DIRTY 0x40
57 #define SRMMU_REF 0x20
58 #define SRMMU_EXEC 0x08
59 #define SRMMU_WRITE 0x04
60 #define SRMMU_VALID 0x02 /* SRMMU_ET_PTE */
61 #define SRMMU_PRIV 0x1c
62 #define SRMMU_PRIV_RDONLY 0x18
64 #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
66 /* Some day I will implement true fine grained access bits for
67 * user pages because the SRMMU gives us the capabilities to
68 * enforce all the protection levels that vma's can have.
69 * XXX But for now...
71 #define SRMMU_PAGE_NONE __pgprot(SRMMU_VALID | SRMMU_CACHE | \
72 SRMMU_PRIV | SRMMU_REF)
73 #define SRMMU_PAGE_SHARED __pgprot(SRMMU_VALID | SRMMU_CACHE | \
74 SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
75 #define SRMMU_PAGE_COPY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
76 SRMMU_EXEC | SRMMU_REF)
77 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
78 SRMMU_EXEC | SRMMU_REF)
79 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
80 SRMMU_DIRTY | SRMMU_REF)
82 /* SRMMU Register addresses in ASI 0x4. These are valid for all
83 * current SRMMU implementations that exist.
85 #define SRMMU_CTRL_REG 0x00000000
86 #define SRMMU_CTXTBL_PTR 0x00000100
87 #define SRMMU_CTX_REG 0x00000200
88 #define SRMMU_FAULT_STATUS 0x00000300
89 #define SRMMU_FAULT_ADDR 0x00000400
91 #ifndef __ASSEMBLY__
93 /* Accessing the MMU control register. */
94 extern __inline__ unsigned int srmmu_get_mmureg(void)
96 unsigned int retval;
97 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
98 "=r" (retval) :
99 "i" (ASI_M_MMUREGS));
100 return retval;
103 extern __inline__ void srmmu_set_mmureg(unsigned long regval)
105 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
106 "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
110 extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
112 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
113 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
114 "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
115 "i" (ASI_M_MMUREGS) :
116 "memory");
119 extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
121 unsigned int retval;
123 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
124 "=r" (retval) :
125 "r" (SRMMU_CTXTBL_PTR),
126 "i" (ASI_M_MMUREGS));
127 return (retval & SRMMU_CTX_PMASK) << 4;
130 extern __inline__ void srmmu_set_context(int context)
132 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
133 "r" (context), "r" (SRMMU_CTX_REG),
134 "i" (ASI_M_MMUREGS) : "memory");
137 extern __inline__ int srmmu_get_context(void)
139 register int retval;
140 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
141 "=r" (retval) :
142 "r" (SRMMU_CTX_REG),
143 "i" (ASI_M_MMUREGS));
144 return retval;
147 extern __inline__ unsigned int srmmu_get_fstatus(void)
149 unsigned int retval;
151 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
152 "=r" (retval) :
153 "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
154 return retval;
157 extern __inline__ unsigned int srmmu_get_faddr(void)
159 unsigned int retval;
161 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
162 "=r" (retval) :
163 "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
164 return retval;
167 /* This is guaranteed on all SRMMU's. */
168 extern __inline__ void srmmu_flush_whole_tlb(void)
170 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
171 "r" (0x400), /* Flush entire TLB!! */
172 "i" (ASI_M_FLUSH_PROBE) : "memory");
176 /* These flush types are not available on all chips... */
177 extern __inline__ void srmmu_flush_tlb_ctx(void)
179 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
180 "r" (0x300), /* Flush TLB ctx.. */
181 "i" (ASI_M_FLUSH_PROBE) : "memory");
185 extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
187 addr &= SRMMU_PGDIR_MASK;
188 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
189 "r" (addr | 0x200), /* Flush TLB region.. */
190 "i" (ASI_M_FLUSH_PROBE) : "memory");
195 extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
197 addr &= SRMMU_PMD_MASK;
198 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
199 "r" (addr | 0x100), /* Flush TLB segment.. */
200 "i" (ASI_M_FLUSH_PROBE) : "memory");
204 extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
206 page &= PAGE_MASK;
207 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
208 "r" (page), /* Flush TLB page.. */
209 "i" (ASI_M_FLUSH_PROBE) : "memory");
213 extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
215 unsigned long retval;
217 vaddr &= PAGE_MASK;
218 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
219 "=r" (retval) :
220 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
222 return retval;
225 extern __inline__ int
226 srmmu_get_pte (unsigned long addr)
228 register unsigned long entry;
230 __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
231 "=r" (entry):
232 "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
233 return entry;
236 extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
237 extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
239 #endif /* !(__ASSEMBLY__) */
241 #endif /* !(_SPARC_PGTSRMMU_H) */