2 * reorganization around 2.3.39, routines moved from sun3_pgtable.h
4 * moved 1/26/2000 Sam Creasey
7 #ifndef _SUN3_PGALLOC_H
8 #define _SUN3_PGALLOC_H
10 /* Pagetable caches. */
11 //todo: should implement for at least ptes. --m
12 #define pgd_quicklist ((unsigned long *) 0)
13 #define pmd_quicklist ((unsigned long *) 0)
14 #define pte_quicklist ((unsigned long *) 0)
15 #define pgtable_cache_size (0L)
17 /* Allocation and deallocation of various flavours of pagetables. */
18 extern inline int free_pmd_fast (pmd_t
*pmdp
) { return 0; }
19 extern inline int free_pmd_slow (pmd_t
*pmdp
) { return 0; }
20 extern inline pmd_t
*get_pmd_fast (void) { return (pmd_t
*) 0; }
22 //todo: implement the following properly.
23 #define get_pte_fast() ((pte_t *) 0)
24 #define get_pte_slow pte_alloc
25 #define free_pte_fast(pte)
26 #define free_pte_slow pte_free
28 /* FIXME - when we get this compiling */
29 /* erm, now that it's compiling, what do we do with it? */
30 #define _KERNPG_TABLE 0
32 extern inline void pte_free_kernel(pte_t
* pte
)
34 free_page((unsigned long) pte
);
37 extern const char bad_pmd_string
[];
39 extern inline pte_t
* pte_alloc_kernel(pmd_t
* pmd
, unsigned long address
)
41 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
43 pte_t
* page
= (pte_t
*) get_free_page(GFP_KERNEL
);
46 pmd_val(*pmd
) = _KERNPG_TABLE
+ __pa(page
);
47 return page
+ address
;
49 pmd_val(*pmd
) = _KERNPG_TABLE
+ __pa((unsigned long)BAD_PAGETABLE
);
52 free_page((unsigned long) page
);
55 printk(bad_pmd_string
, pmd_val(*pmd
));
56 printk("at kernel pgd off %08x\n", (unsigned int)pmd
);
57 pmd_val(*pmd
) = _KERNPG_TABLE
+ __pa((unsigned long)BAD_PAGETABLE
);
60 return (pte_t
*) __pmd_page(*pmd
) + address
;
64 * allocating and freeing a pmd is trivial: the 1-entry pmd is
65 * inside the pgd, so has no extra memory associated with it.
67 extern inline void pmd_free_kernel(pmd_t
* pmd
)
72 extern inline pmd_t
* pmd_alloc_kernel(pgd_t
* pgd
, unsigned long address
)
77 extern inline void pte_free(pte_t
* pte
)
79 free_page((unsigned long) pte
);
82 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
84 address
= (address
>> (PAGE_SHIFT
-2)) & 4*(PTRS_PER_PTE
- 1);
91 return (pte_t
*) (__pmd_page(*pmd
) + address
);
95 unsigned long page
= __get_free_page(GFP_KERNEL
);
100 memset((void *)page
, 0, PAGE_SIZE
);
101 // pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
102 pmd_val(*pmd
) = __pa(page
);
103 return (pte_t
*) (page
+ address
);
110 printk(bad_pmd_string
, pmd_val(*pmd
));
111 printk("in normal pgd offset %08x\n", (unsigned int)pmd
);
113 // pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(BAD_PAGETABLE);
114 pmd_val(*pmd
) = __pa(BAD_PAGETABLE
);
119 * allocating and freeing a pmd is trivial: the 1-entry pmd is
120 * inside the pgd, so has no extra memory associated with it.
122 extern inline void pmd_free(pmd_t
* pmd
)
127 extern inline pmd_t
* pmd_alloc(pgd_t
* pgd
, unsigned long address
)
129 return (pmd_t
*) pgd
;
132 extern inline void pgd_free(pgd_t
* pgd
)
134 free_page((unsigned long) pgd
);
137 extern inline pgd_t
* pgd_alloc(void)
141 new_pgd
= (pgd_t
*)get_free_page(GFP_KERNEL
);
142 memcpy(new_pgd
, swapper_pg_dir
, PAGE_SIZE
);
143 memset(new_pgd
, 0, (PAGE_OFFSET
>> PGDIR_SHIFT
));
147 /* FIXME: the sun3 doesn't have a page table cache!
148 (but the motorola routine should just return 0) */
150 extern int do_check_pgt_cache(int, int);
152 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
156 /* Reserved PMEGs. */
157 extern char sun3_reserved_pmeg
[SUN3_PMEGS_NUM
];
158 extern unsigned long pmeg_vaddr
[SUN3_PMEGS_NUM
];
159 extern unsigned char pmeg_alloc
[SUN3_PMEGS_NUM
];
160 extern unsigned char pmeg_ctx
[SUN3_PMEGS_NUM
];
162 /* Flush all userspace mappings one by one... (why no flush command,
164 static inline void flush_tlb_all(void)
167 unsigned char ctx
, oldctx
;
169 oldctx
= sun3_get_context();
170 for(addr
= 0x00000000; addr
< TASK_SIZE
; addr
+= SUN3_PMEG_SIZE
) {
171 for(ctx
= 0; ctx
< 8; ctx
++) {
172 sun3_put_context(ctx
);
173 sun3_put_segmap(addr
, SUN3_INVALID_PMEG
);
177 sun3_put_context(oldctx
);
178 /* erase all of the userspace pmeg maps, we've clobbered them
180 for(addr
= 0; addr
< SUN3_INVALID_PMEG
; addr
++) {
181 if(pmeg_alloc
[addr
] == 1) {
182 pmeg_alloc
[addr
] = 0;
184 pmeg_vaddr
[addr
] = 0;
190 /* Clear user TLB entries within the context named in mm */
191 static inline void flush_tlb_mm (struct mm_struct
*mm
)
193 unsigned char oldctx
;
197 oldctx
= sun3_get_context();
198 sun3_put_context(mm
->context
);
200 for(i
= 0; i
< TASK_SIZE
; i
+= SUN3_PMEG_SIZE
) {
201 seg
= sun3_get_segmap(i
);
202 if(seg
== SUN3_INVALID_PMEG
)
205 sun3_put_segmap(i
, SUN3_INVALID_PMEG
);
211 sun3_put_context(oldctx
);
215 /* Flush a single TLB page. In this case, we're limited to flushing a
217 static inline void flush_tlb_page (struct vm_area_struct
*vma
,
220 unsigned char oldctx
;
223 oldctx
= sun3_get_context();
224 sun3_put_context(vma
->vm_mm
->context
);
225 addr
&= ~SUN3_PMEG_MASK
;
226 if((i
= sun3_get_segmap(addr
)) != SUN3_INVALID_PMEG
)
231 sun3_put_segmap (addr
, SUN3_INVALID_PMEG
);
233 sun3_put_context(oldctx
);
236 /* Flush a range of pages from TLB. */
238 static inline void flush_tlb_range (struct mm_struct
*mm
,
239 unsigned long start
, unsigned long end
)
241 unsigned char seg
, oldctx
;
243 start
&= ~SUN3_PMEG_MASK
;
245 oldctx
= sun3_get_context();
246 sun3_put_context(mm
->context
);
250 if((seg
= sun3_get_segmap(start
)) == SUN3_INVALID_PMEG
)
252 if(pmeg_ctx
[seg
] == mm
->context
) {
257 sun3_put_segmap(start
, SUN3_INVALID_PMEG
);
259 start
+= SUN3_PMEG_SIZE
;
263 /* Flush kernel page from TLB. */
264 static inline void flush_tlb_kernel_page (unsigned long addr
)
266 sun3_put_segmap (addr
& ~(SUN3_PMEG_SIZE
- 1), SUN3_INVALID_PMEG
);
269 extern inline void flush_tlb_pgtables(struct mm_struct
*mm
,
270 unsigned long start
, unsigned long end
)
274 #endif /* SUN3_PGALLOC_H */