1 #ifndef _MOTOROLA_PGALLOC_H
2 #define _MOTOROLA_PGALLOC_H
4 extern struct pgtable_cache_struct
{
5 unsigned long *pmd_cache
;
6 unsigned long *pte_cache
;
7 /* This counts in units of pointer tables, of which can be eight per page. */
8 unsigned long pgtable_cache_sz
;
11 #define pgd_quicklist ((unsigned long *)0)
12 #define pmd_quicklist (quicklists.pmd_cache)
13 #define pte_quicklist (quicklists.pte_cache)
14 /* This isn't accurate because of fragmentation of allocated pages for
15 pointer tables, but that should not be a problem. */
16 #define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
18 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
);
19 extern pmd_t
*get_pmd_slow(pgd_t
*pgd
, unsigned long offset
);
21 extern pmd_t
*get_pointer_table(void);
22 extern int free_pointer_table(pmd_t
*);
24 extern inline pte_t
*get_pte_fast(void)
30 pte_quicklist
= (unsigned long *)*ret
;
32 quicklists
.pgtable_cache_sz
-= 8;
37 extern inline void free_pte_fast(pte_t
*pte
)
39 *(unsigned long *)pte
= (unsigned long)pte_quicklist
;
40 pte_quicklist
= (unsigned long *)pte
;
41 quicklists
.pgtable_cache_sz
+= 8;
44 extern inline void free_pte_slow(pte_t
*pte
)
46 cache_page((unsigned long)pte
);
47 free_page((unsigned long) pte
);
50 extern inline pmd_t
*get_pmd_fast(void)
56 pmd_quicklist
= (unsigned long *)*ret
;
58 quicklists
.pgtable_cache_sz
--;
63 extern inline void free_pmd_fast(pmd_t
*pmd
)
65 *(unsigned long *)pmd
= (unsigned long)pmd_quicklist
;
66 pmd_quicklist
= (unsigned long *) pmd
;
67 quicklists
.pgtable_cache_sz
++;
70 extern inline int free_pmd_slow(pmd_t
*pmd
)
72 return free_pointer_table(pmd
);
75 /* The pgd cache is folded into the pmd cache, so these are dummy routines. */
76 extern inline pgd_t
*get_pgd_fast(void)
81 extern inline void free_pgd_fast(pgd_t
*pgd
)
85 extern inline void free_pgd_slow(pgd_t
*pgd
)
89 extern void __bad_pte(pmd_t
*pmd
);
90 extern void __bad_pmd(pgd_t
*pgd
);
92 extern inline void pte_free(pte_t
*pte
)
97 extern inline pte_t
*pte_alloc(pmd_t
*pmd
, unsigned long address
)
99 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
100 if (pmd_none(*pmd
)) {
101 pte_t
*page
= get_pte_fast();
104 return get_pte_slow(pmd
, address
);
106 return page
+ address
;
112 return (pte_t
*)__pmd_page(*pmd
) + address
;
115 extern inline void pmd_free(pmd_t
*pmd
)
120 extern inline pmd_t
*pmd_alloc(pgd_t
*pgd
, unsigned long address
)
122 address
= (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
123 if (pgd_none(*pgd
)) {
124 pmd_t
*page
= get_pmd_fast();
127 return get_pmd_slow(pgd
, address
);
129 return page
+ address
;
135 return (pmd_t
*)__pgd_page(*pgd
) + address
;
138 extern inline void pte_free_kernel(pte_t
*pte
)
143 extern inline pte_t
*pte_alloc_kernel(pmd_t
*pmd
, unsigned long address
)
145 return pte_alloc(pmd
, address
);
148 extern inline void pmd_free_kernel(pmd_t
*pmd
)
153 extern inline pmd_t
*pmd_alloc_kernel(pgd_t
*pgd
, unsigned long address
)
155 return pmd_alloc(pgd
, address
);
158 extern inline void pgd_free(pgd_t
*pgd
)
160 free_pmd_fast((pmd_t
*)pgd
);
163 extern inline pgd_t
*pgd_alloc(void)
165 pgd_t
*pgd
= (pgd_t
*)get_pmd_fast();
167 pgd
= (pgd_t
*)get_pointer_table();
171 extern int do_check_pgt_cache(int, int);
173 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
179 * flush all user-space atc entries.
181 static inline void __flush_tlb(void)
183 if (CPU_IS_040_OR_060
)
184 __asm__
__volatile__(".chip 68040\n\t"
188 __asm__
__volatile__("pflush #0,#4");
191 static inline void __flush_tlb040_one(unsigned long addr
)
193 __asm__
__volatile__(".chip 68040\n\t"
199 static inline void __flush_tlb_one(unsigned long addr
)
201 if (CPU_IS_040_OR_060
)
202 __flush_tlb040_one(addr
);
204 __asm__
__volatile__("pflush #0,#4,(%0)" : : "a" (addr
));
207 #define flush_tlb() __flush_tlb()
210 * flush all atc entries (both kernel and user-space entries).
212 static inline void flush_tlb_all(void)
214 if (CPU_IS_040_OR_060
)
215 __asm__
__volatile__(".chip 68040\n\t"
219 __asm__
__volatile__("pflusha");
222 static inline void flush_tlb_mm(struct mm_struct
*mm
)
224 if (mm
== current
->mm
)
228 static inline void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
230 if (vma
->vm_mm
== current
->mm
)
231 __flush_tlb_one(addr
);
234 static inline void flush_tlb_range(struct mm_struct
*mm
,
235 unsigned long start
, unsigned long end
)
237 if (mm
== current
->mm
)
241 extern inline void flush_tlb_kernel_page(unsigned long addr
)
243 if (CPU_IS_040_OR_060
) {
244 mm_segment_t old_fs
= get_fs();
246 __asm__
__volatile__(".chip 68040\n\t"
252 __asm__
__volatile__("pflush #4,#4,(%0)" : : "a" (addr
));
255 extern inline void flush_tlb_pgtables(struct mm_struct
*mm
,
256 unsigned long start
, unsigned long end
)
260 #endif /* _MOTOROLA_PGALLOC_H */