- Peter Anvin: more P4 configuration parsing
[davej-history.git] / include / asm-m68k / sun3_pgalloc.h
blob32a810aa595b3a254538587b643add71b43b1fc6
1 /* sun3_pgalloc.h --
2 * reorganization around 2.3.39, routines moved from sun3_pgtable.h
4 * moved 1/26/2000 Sam Creasey
5 */
7 #ifndef _SUN3_PGALLOC_H
8 #define _SUN3_PGALLOC_H
10 /* Pagetable caches. */
11 //todo: should implement for at least ptes. --m
12 #define pgd_quicklist ((unsigned long *) 0)
13 #define pmd_quicklist ((unsigned long *) 0)
14 #define pte_quicklist ((unsigned long *) 0)
15 #define pgtable_cache_size (0L)
17 /* Allocation and deallocation of various flavours of pagetables. */
18 extern inline int free_pmd_fast (pmd_t *pmdp) { return 0; }
19 extern inline int free_pmd_slow (pmd_t *pmdp) { return 0; }
20 extern inline pmd_t *get_pmd_fast (void) { return (pmd_t *) 0; }
22 //todo: implement the following properly.
23 #define get_pte_fast() ((pte_t *) 0)
24 #define get_pte_slow pte_alloc
25 #define free_pte_fast(pte)
26 #define free_pte_slow pte_free
28 /* FIXME - when we get this compiling */
29 /* erm, now that it's compiling, what do we do with it? */
30 #define _KERNPG_TABLE 0
32 extern inline void pte_free_kernel(pte_t * pte)
34 free_page((unsigned long) pte);
37 extern const char bad_pmd_string[];
39 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
41 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
42 if (pmd_none(*pmd)) {
43 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
44 if (pmd_none(*pmd)) {
45 if (page) {
46 pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
47 return page + address;
49 pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
50 return NULL;
52 free_page((unsigned long) page);
54 if (pmd_bad(*pmd)) {
55 printk(bad_pmd_string, pmd_val(*pmd));
56 printk("at kernel pgd off %08x\n", (unsigned int)pmd);
57 pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
58 return NULL;
60 return (pte_t *) __pmd_page(*pmd) + address;
64 * allocating and freeing a pmd is trivial: the 1-entry pmd is
65 * inside the pgd, so has no extra memory associated with it.
67 extern inline void pmd_free_kernel(pmd_t * pmd)
69 // pmd_val(*pmd) = 0;
72 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
74 return (pmd_t *) pgd;
77 extern inline void pte_free(pte_t * pte)
79 free_page((unsigned long) pte);
82 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
84 address = (address >> (PAGE_SHIFT-2)) & 4*(PTRS_PER_PTE - 1);
86 repeat:
87 if (pmd_none(*pmd))
88 goto getnew;
89 if (pmd_bad(*pmd))
90 goto fix;
91 return (pte_t *) (__pmd_page(*pmd) + address);
93 getnew:
95 unsigned long page = __get_free_page(GFP_KERNEL);
96 if (!pmd_none(*pmd))
97 goto freenew;
98 if (!page)
99 goto oom;
100 memset((void *)page, 0, PAGE_SIZE);
101 // pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
102 pmd_val(*pmd) = __pa(page);
103 return (pte_t *) (page + address);
104 freenew:
105 free_page(page);
106 goto repeat;
109 fix:
110 printk(bad_pmd_string, pmd_val(*pmd));
111 printk("in normal pgd offset %08x\n", (unsigned int)pmd);
112 oom:
113 // pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(BAD_PAGETABLE);
114 pmd_val(*pmd) = __pa(BAD_PAGETABLE);
115 return NULL;
119 * allocating and freeing a pmd is trivial: the 1-entry pmd is
120 * inside the pgd, so has no extra memory associated with it.
122 extern inline void pmd_free(pmd_t * pmd)
124 pmd_val(*pmd) = 0;
127 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
129 return (pmd_t *) pgd;
132 extern inline void pgd_free(pgd_t * pgd)
134 free_page((unsigned long) pgd);
137 extern inline pgd_t * pgd_alloc(void)
139 pgd_t *new_pgd;
141 new_pgd = (pgd_t *)get_free_page(GFP_KERNEL);
142 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
143 memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
144 return new_pgd;
147 /* FIXME: the sun3 doesn't have a page table cache!
148 (but the motorola routine should just return 0) */
150 extern int do_check_pgt_cache(int, int);
152 extern inline void set_pgdir(unsigned long address, pgd_t entry)
156 /* Reserved PMEGs. */
157 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
158 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
159 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
160 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
162 /* Flush all userspace mappings one by one... (why no flush command,
163 sun?) */
164 static inline void flush_tlb_all(void)
166 unsigned long addr;
167 unsigned char ctx, oldctx;
169 oldctx = sun3_get_context();
170 for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
171 for(ctx = 0; ctx < 8; ctx++) {
172 sun3_put_context(ctx);
173 sun3_put_segmap(addr, SUN3_INVALID_PMEG);
177 sun3_put_context(oldctx);
178 /* erase all of the userspace pmeg maps, we've clobbered them
179 all anyway */
180 for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
181 if(pmeg_alloc[addr] == 1) {
182 pmeg_alloc[addr] = 0;
183 pmeg_ctx[addr] = 0;
184 pmeg_vaddr[addr] = 0;
190 /* Clear user TLB entries within the context named in mm */
191 static inline void flush_tlb_mm (struct mm_struct *mm)
193 unsigned char oldctx;
194 unsigned char seg;
195 unsigned long i;
197 oldctx = sun3_get_context();
198 sun3_put_context(mm->context);
200 for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
201 seg = sun3_get_segmap(i);
202 if(seg == SUN3_INVALID_PMEG)
203 continue;
205 sun3_put_segmap(i, SUN3_INVALID_PMEG);
206 pmeg_alloc[seg] = 0;
207 pmeg_ctx[seg] = 0;
208 pmeg_vaddr[seg] = 0;
211 sun3_put_context(oldctx);
215 /* Flush a single TLB page. In this case, we're limited to flushing a
216 single PMEG */
217 static inline void flush_tlb_page (struct vm_area_struct *vma,
218 unsigned long addr)
220 unsigned char oldctx;
221 unsigned char i;
223 oldctx = sun3_get_context();
224 sun3_put_context(vma->vm_mm->context);
225 addr &= ~SUN3_PMEG_MASK;
226 if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
228 pmeg_alloc[i] = 0;
229 pmeg_ctx[i] = 0;
230 pmeg_vaddr[i] = 0;
231 sun3_put_segmap (addr, SUN3_INVALID_PMEG);
233 sun3_put_context(oldctx);
236 /* Flush a range of pages from TLB. */
238 static inline void flush_tlb_range (struct mm_struct *mm,
239 unsigned long start, unsigned long end)
241 unsigned char seg, oldctx;
243 start &= ~SUN3_PMEG_MASK;
245 oldctx = sun3_get_context();
246 sun3_put_context(mm->context);
248 while(start < end)
250 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
251 goto next;
252 if(pmeg_ctx[seg] == mm->context) {
253 pmeg_alloc[seg] = 0;
254 pmeg_ctx[seg] = 0;
255 pmeg_vaddr[seg] = 0;
257 sun3_put_segmap(start, SUN3_INVALID_PMEG);
258 next:
259 start += SUN3_PMEG_SIZE;
263 /* Flush kernel page from TLB. */
264 static inline void flush_tlb_kernel_page (unsigned long addr)
266 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
269 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
270 unsigned long start, unsigned long end)
274 #endif /* SUN3_PGALLOC_H */