- Peter Anvin: more P4 configuration parsing
[davej-history.git] / include / asm-m68k / motorola_pgalloc.h
blob9257aeb439ea194fd40da13459400ef5b0130a40
1 #ifndef _MOTOROLA_PGALLOC_H
2 #define _MOTOROLA_PGALLOC_H
4 extern struct pgtable_cache_struct {
5 unsigned long *pmd_cache;
6 unsigned long *pte_cache;
7 /* This counts in units of pointer tables, of which can be eight per page. */
8 unsigned long pgtable_cache_sz;
9 } quicklists;
11 #define pgd_quicklist ((unsigned long *)0)
12 #define pmd_quicklist (quicklists.pmd_cache)
13 #define pte_quicklist (quicklists.pte_cache)
14 /* This isn't accurate because of fragmentation of allocated pages for
15 pointer tables, but that should not be a problem. */
16 #define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
18 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
19 extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset);
21 extern pmd_t *get_pointer_table(void);
22 extern int free_pointer_table(pmd_t *);
24 extern inline pte_t *get_pte_fast(void)
26 unsigned long *ret;
28 ret = pte_quicklist;
29 if (ret) {
30 pte_quicklist = (unsigned long *)*ret;
31 ret[0] = 0;
32 quicklists.pgtable_cache_sz -= 8;
34 return (pte_t *)ret;
37 extern inline void free_pte_fast(pte_t *pte)
39 *(unsigned long *)pte = (unsigned long)pte_quicklist;
40 pte_quicklist = (unsigned long *)pte;
41 quicklists.pgtable_cache_sz += 8;
44 extern inline void free_pte_slow(pte_t *pte)
46 cache_page((unsigned long)pte);
47 free_page((unsigned long) pte);
50 extern inline pmd_t *get_pmd_fast(void)
52 unsigned long *ret;
54 ret = pmd_quicklist;
55 if (ret) {
56 pmd_quicklist = (unsigned long *)*ret;
57 ret[0] = 0;
58 quicklists.pgtable_cache_sz--;
60 return (pmd_t *)ret;
63 extern inline void free_pmd_fast(pmd_t *pmd)
65 *(unsigned long *)pmd = (unsigned long)pmd_quicklist;
66 pmd_quicklist = (unsigned long *) pmd;
67 quicklists.pgtable_cache_sz++;
70 extern inline int free_pmd_slow(pmd_t *pmd)
72 return free_pointer_table(pmd);
75 /* The pgd cache is folded into the pmd cache, so these are dummy routines. */
76 extern inline pgd_t *get_pgd_fast(void)
78 return (pgd_t *)0;
81 extern inline void free_pgd_fast(pgd_t *pgd)
85 extern inline void free_pgd_slow(pgd_t *pgd)
89 extern void __bad_pte(pmd_t *pmd);
90 extern void __bad_pmd(pgd_t *pgd);
92 extern inline void pte_free(pte_t *pte)
94 free_pte_fast(pte);
97 extern inline pte_t *pte_alloc(pmd_t *pmd, unsigned long address)
99 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
100 if (pmd_none(*pmd)) {
101 pte_t *page = get_pte_fast();
103 if (!page)
104 return get_pte_slow(pmd, address);
105 pmd_set(pmd,page);
106 return page + address;
108 if (pmd_bad(*pmd)) {
109 __bad_pte(pmd);
110 return NULL;
112 return (pte_t *)__pmd_page(*pmd) + address;
115 extern inline void pmd_free(pmd_t *pmd)
117 free_pmd_fast(pmd);
120 extern inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address)
122 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
123 if (pgd_none(*pgd)) {
124 pmd_t *page = get_pmd_fast();
126 if (!page)
127 return get_pmd_slow(pgd, address);
128 pgd_set(pgd, page);
129 return page + address;
131 if (pgd_bad(*pgd)) {
132 __bad_pmd(pgd);
133 return NULL;
135 return (pmd_t *)__pgd_page(*pgd) + address;
138 extern inline void pte_free_kernel(pte_t *pte)
140 free_pte_fast(pte);
143 extern inline pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address)
145 return pte_alloc(pmd, address);
148 extern inline void pmd_free_kernel(pmd_t *pmd)
150 free_pmd_fast(pmd);
153 extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
155 return pmd_alloc(pgd, address);
158 extern inline void pgd_free(pgd_t *pgd)
160 free_pmd_fast((pmd_t *)pgd);
163 extern inline pgd_t *pgd_alloc(void)
165 pgd_t *pgd = (pgd_t *)get_pmd_fast();
166 if (!pgd)
167 pgd = (pgd_t *)get_pointer_table();
168 return pgd;
171 extern int do_check_pgt_cache(int, int);
173 extern inline void set_pgdir(unsigned long address, pgd_t entry)
179 * flush all user-space atc entries.
181 static inline void __flush_tlb(void)
183 if (CPU_IS_040_OR_060)
184 __asm__ __volatile__(".chip 68040\n\t"
185 "pflushan\n\t"
186 ".chip 68k");
187 else
188 __asm__ __volatile__("pflush #0,#4");
191 static inline void __flush_tlb040_one(unsigned long addr)
193 __asm__ __volatile__(".chip 68040\n\t"
194 "pflush (%0)\n\t"
195 ".chip 68k"
196 : : "a" (addr));
199 static inline void __flush_tlb_one(unsigned long addr)
201 if (CPU_IS_040_OR_060)
202 __flush_tlb040_one(addr);
203 else
204 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
207 #define flush_tlb() __flush_tlb()
210 * flush all atc entries (both kernel and user-space entries).
212 static inline void flush_tlb_all(void)
214 if (CPU_IS_040_OR_060)
215 __asm__ __volatile__(".chip 68040\n\t"
216 "pflusha\n\t"
217 ".chip 68k");
218 else
219 __asm__ __volatile__("pflusha");
222 static inline void flush_tlb_mm(struct mm_struct *mm)
224 if (mm == current->mm)
225 __flush_tlb();
228 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
230 if (vma->vm_mm == current->mm)
231 __flush_tlb_one(addr);
234 static inline void flush_tlb_range(struct mm_struct *mm,
235 unsigned long start, unsigned long end)
237 if (mm == current->mm)
238 __flush_tlb();
241 extern inline void flush_tlb_kernel_page(unsigned long addr)
243 if (CPU_IS_040_OR_060) {
244 mm_segment_t old_fs = get_fs();
245 set_fs(KERNEL_DS);
246 __asm__ __volatile__(".chip 68040\n\t"
247 "pflush (%0)\n\t"
248 ".chip 68k"
249 : : "a" (addr));
250 set_fs(old_fs);
251 } else
252 __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
255 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
256 unsigned long start, unsigned long end)
260 #endif /* _MOTOROLA_PGALLOC_H */