2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2000 by Ralf Baechle at alii
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #define _ASM_PGALLOC_H
12 #include <linux/config.h>
16 * - flush_tlb_all() flushes all processes TLB entries
17 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
18 * - flush_tlb_page(mm, vmaddr) flushes a single page
19 * - flush_tlb_range(mm, start, end) flushes a range of pages
20 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
22 extern void (*_flush_tlb_all
)(void);
23 extern void (*_flush_tlb_mm
)(struct mm_struct
*mm
);
24 extern void (*_flush_tlb_range
)(struct mm_struct
*mm
, unsigned long start
,
26 extern void (*_flush_tlb_page
)(struct vm_area_struct
*vma
, unsigned long page
);
30 #define flush_tlb_all() _flush_tlb_all()
31 #define flush_tlb_mm(mm) _flush_tlb_mm(mm)
32 #define flush_tlb_range(mm,vmaddr,end) _flush_tlb_range(mm, vmaddr, end)
33 #define flush_tlb_page(vma,page) _flush_tlb_page(vma, page)
35 #else /* CONFIG_SMP */
37 extern void flush_tlb_all(void);
38 extern void flush_tlb_mm(struct mm_struct
*);
39 extern void flush_tlb_range(struct mm_struct
*, unsigned long, unsigned long);
40 extern void flush_tlb_page(struct vm_area_struct
*, unsigned long);
42 #endif /* CONFIG_SMP */
44 extern inline void flush_tlb_pgtables(struct mm_struct
*mm
,
45 unsigned long start
, unsigned long end
)
47 /* Nothing to do on MIPS. */
52 * Allocate and free page tables. The xxx_kernel() versions are
53 * used to allocate a kernel page table - this turns on ASN bits
57 #define pgd_quicklist (current_cpu_data.pgd_quick)
58 #define pmd_quicklist (current_cpu_data.pmd_quick)
59 #define pte_quicklist (current_cpu_data.pte_quick)
60 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
62 extern pgd_t
*get_pgd_slow(void);
64 extern inline pgd_t
*get_pgd_fast(void)
68 if((ret
= pgd_quicklist
) != NULL
) {
69 pgd_quicklist
= (unsigned long *)(*ret
);
75 ret
= (unsigned long *) get_pgd_slow();
79 extern inline void free_pgd_fast(pgd_t
*pgd
)
81 *(unsigned long *)pgd
= (unsigned long) pgd_quicklist
;
82 pgd_quicklist
= (unsigned long *) pgd
;
86 extern inline void free_pgd_slow(pgd_t
*pgd
)
88 free_pages((unsigned long)pgd
, 1);
91 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
92 extern pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
94 extern inline pte_t
*get_pte_fast(void)
98 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
99 pte_quicklist
= (unsigned long *)(*ret
);
101 pgtable_cache_size
--;
106 extern inline void free_pte_fast(pte_t
*pte
)
108 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
109 pte_quicklist
= (unsigned long *) pte
;
110 pgtable_cache_size
++;
113 extern inline void free_pte_slow(pte_t
*pte
)
115 free_pages((unsigned long)pte
, 0);
118 extern pmd_t
*get_pmd_slow(pgd_t
*pgd
, unsigned long address_preadjusted
);
119 extern pmd_t
*get_pmd_kernel_slow(pgd_t
*pgd
, unsigned long address_preadjusted
);
121 extern inline pmd_t
*get_pmd_fast(void)
125 if ((ret
= (unsigned long *)pmd_quicklist
) != NULL
) {
126 pmd_quicklist
= (unsigned long *)(*ret
);
128 pgtable_cache_size
--;
135 extern inline void free_pmd_fast(pmd_t
*pmd
)
137 *(unsigned long *)pmd
= (unsigned long) pmd_quicklist
;
138 pmd_quicklist
= (unsigned long *) pmd
;
139 pgtable_cache_size
++;
142 extern inline void free_pmd_slow(pmd_t
*pmd
)
144 free_pages((unsigned long)pmd
, 1);
147 extern void __bad_pte(pmd_t
*pmd
);
148 extern void __bad_pte_kernel(pmd_t
*pmd
);
149 extern void __bad_pmd(pgd_t
*pgd
);
151 #define pte_free(pte) free_pte_fast(pte)
152 #define pmd_free(pte) free_pmd_fast(pte)
153 #define pgd_free(pgd) free_pgd_fast(pgd)
154 #define pgd_alloc() get_pgd_fast()
156 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
158 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
160 if (pmd_none(*pmd
)) {
161 pte_t
*page
= get_pte_fast();
163 pmd_val(*pmd
) = (unsigned long) page
;
164 return page
+ address
;
166 return get_pte_slow(pmd
, address
);
172 return (pte_t
*) pmd_page(*pmd
) + address
;
175 extern inline pmd_t
*pmd_alloc(pgd_t
* pgd
, unsigned long address
)
177 address
= (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
178 if (pgd_none(*pgd
)) {
179 pmd_t
*page
= get_pmd_fast();
182 return get_pmd_slow(pgd
, address
);
184 return page
+ address
;
190 return (pmd_t
*) pgd_page(*pgd
) + address
;
193 extern pte_t kptbl
[(PAGE_SIZE
<<KPTBL_PAGE_ORDER
)/sizeof(pte_t
)];
194 extern pmd_t kpmdtbl
[PTRS_PER_PMD
];
196 #define pmd_alloc_kernel(d,a) (pmd_t *)kpmdtbl
198 extern inline pte_t
* pte_alloc_kernel(pmd_t
* pmd
, unsigned long address
)
200 return (kptbl
+ (address
>> PAGE_SHIFT
));
203 extern int do_check_pgt_cache(int, int);
205 #endif /* _ASM_PGALLOC_H */