- Stephen Rothwell: APM updates
[davej-history.git] / include / asm-mips64 / pgalloc.h
blob198b0bb3be932ab845a321c4acc9a8f901047943
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 2000 by Ralf Baechle at alii
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGALLOC_H
10 #define _ASM_PGALLOC_H
12 #include <linux/config.h>
14 /* TLB flushing:
16 * - flush_tlb_all() flushes all processes TLB entries
17 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
18 * - flush_tlb_page(mm, vmaddr) flushes a single page
19 * - flush_tlb_range(mm, start, end) flushes a range of pages
20 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
22 extern void (*_flush_tlb_all)(void);
23 extern void (*_flush_tlb_mm)(struct mm_struct *mm);
24 extern void (*_flush_tlb_range)(struct mm_struct *mm, unsigned long start,
25 unsigned long end);
26 extern void (*_flush_tlb_page)(struct vm_area_struct *vma, unsigned long page);
28 #ifndef CONFIG_SMP
30 #define flush_tlb_all() _flush_tlb_all()
31 #define flush_tlb_mm(mm) _flush_tlb_mm(mm)
32 #define flush_tlb_range(mm,vmaddr,end) _flush_tlb_range(mm, vmaddr, end)
33 #define flush_tlb_page(vma,page) _flush_tlb_page(vma, page)
35 #else /* CONFIG_SMP */
37 extern void flush_tlb_all(void);
38 extern void flush_tlb_mm(struct mm_struct *);
39 extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
40 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
42 #endif /* CONFIG_SMP */
44 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
45 unsigned long start, unsigned long end)
47 /* Nothing to do on MIPS. */
52 * Allocate and free page tables. The xxx_kernel() versions are
53 * used to allocate a kernel page table - this turns on ASN bits
54 * if any.
57 #define pgd_quicklist (current_cpu_data.pgd_quick)
58 #define pmd_quicklist (current_cpu_data.pmd_quick)
59 #define pte_quicklist (current_cpu_data.pte_quick)
60 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
62 extern pgd_t *get_pgd_slow(void);
64 extern inline pgd_t *get_pgd_fast(void)
66 unsigned long *ret;
68 if((ret = pgd_quicklist) != NULL) {
69 pgd_quicklist = (unsigned long *)(*ret);
70 ret[0] = ret[1];
71 pgtable_cache_size--;
72 return (pgd_t *)ret;
75 ret = (unsigned long *) get_pgd_slow();
76 return (pgd_t *)ret;
79 extern inline void free_pgd_fast(pgd_t *pgd)
81 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
82 pgd_quicklist = (unsigned long *) pgd;
83 pgtable_cache_size++;
86 extern inline void free_pgd_slow(pgd_t *pgd)
88 free_pages((unsigned long)pgd, 1);
91 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
92 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
94 extern inline pte_t *get_pte_fast(void)
96 unsigned long *ret;
98 if((ret = (unsigned long *)pte_quicklist) != NULL) {
99 pte_quicklist = (unsigned long *)(*ret);
100 ret[0] = ret[1];
101 pgtable_cache_size--;
103 return (pte_t *)ret;
106 extern inline void free_pte_fast(pte_t *pte)
108 *(unsigned long *)pte = (unsigned long) pte_quicklist;
109 pte_quicklist = (unsigned long *) pte;
110 pgtable_cache_size++;
113 extern inline void free_pte_slow(pte_t *pte)
115 free_pages((unsigned long)pte, 0);
118 extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_preadjusted);
119 extern pmd_t *get_pmd_kernel_slow(pgd_t *pgd, unsigned long address_preadjusted);
121 extern inline pmd_t *get_pmd_fast(void)
123 unsigned long *ret;
125 if ((ret = (unsigned long *)pmd_quicklist) != NULL) {
126 pmd_quicklist = (unsigned long *)(*ret);
127 ret[0] = ret[1];
128 pgtable_cache_size--;
129 return (pmd_t *)ret;
132 return (pmd_t *)ret;
135 extern inline void free_pmd_fast(pmd_t *pmd)
137 *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
138 pmd_quicklist = (unsigned long *) pmd;
139 pgtable_cache_size++;
142 extern inline void free_pmd_slow(pmd_t *pmd)
144 free_pages((unsigned long)pmd, 1);
147 extern void __bad_pte(pmd_t *pmd);
148 extern void __bad_pte_kernel(pmd_t *pmd);
149 extern void __bad_pmd(pgd_t *pgd);
151 #define pte_free(pte) free_pte_fast(pte)
152 #define pmd_free(pte) free_pmd_fast(pte)
153 #define pgd_free(pgd) free_pgd_fast(pgd)
154 #define pgd_alloc() get_pgd_fast()
156 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
158 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
160 if (pmd_none(*pmd)) {
161 pte_t *page = get_pte_fast();
162 if (page) {
163 pmd_val(*pmd) = (unsigned long) page;
164 return page + address;
166 return get_pte_slow(pmd, address);
168 if (pmd_bad(*pmd)) {
169 __bad_pte(pmd);
170 return NULL;
172 return (pte_t *) pmd_page(*pmd) + address;
175 extern inline pmd_t *pmd_alloc(pgd_t * pgd, unsigned long address)
177 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
178 if (pgd_none(*pgd)) {
179 pmd_t *page = get_pmd_fast();
181 if (!page)
182 return get_pmd_slow(pgd, address);
183 pgd_set(pgd, page);
184 return page + address;
186 if (pgd_bad(*pgd)) {
187 __bad_pmd(pgd);
188 return NULL;
190 return (pmd_t *) pgd_page(*pgd) + address;
193 extern pte_t kptbl[(PAGE_SIZE<<KPTBL_PAGE_ORDER)/sizeof(pte_t)];
194 extern pmd_t kpmdtbl[PTRS_PER_PMD];
196 #define pmd_alloc_kernel(d,a) (pmd_t *)kpmdtbl
198 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
200 return (kptbl + (address >> PAGE_SHIFT));
203 extern int do_check_pgt_cache(int, int);
205 #endif /* _ASM_PGALLOC_H */