1 /* $Id: pgalloc.h,v 1.3 2000/02/23 00:41:38 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 1994 - 2000 by Ralf Baechle at alii
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #ifndef _ASM_PGALLOC_H
11 #define _ASM_PGALLOC_H
13 #include <linux/config.h>
17 * - flush_tlb_all() flushes all processes TLB entries
18 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
19 * - flush_tlb_page(mm, vmaddr) flushes a single page
20 * - flush_tlb_range(mm, start, end) flushes a range of pages
22 extern void flush_tlb_all(void);
23 extern void flush_tlb_mm(struct mm_struct
*mm
);
24 extern void flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
26 extern void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
);
28 extern inline void flush_tlb_pgtables(struct mm_struct
*mm
,
29 unsigned long start
, unsigned long end
)
31 /* Nothing to do on MIPS. */
36 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
41 #define pgd_quicklist (current_cpu_data.pgd_quick)
42 #define pmd_quicklist ((unsigned long *)0)
43 #define pte_quicklist (current_cpu_data.pte_quick)
44 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
46 extern __inline__ pgd_t
*get_pgd_slow(void)
48 pgd_t
*ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
), *init
;
51 init
= pgd_offset(&init_mm
, 0);
52 pgd_init((unsigned long)ret
);
53 memcpy (ret
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
54 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
59 extern __inline__ pgd_t
*get_pgd_fast(void)
63 if((ret
= pgd_quicklist
) != NULL
) {
64 pgd_quicklist
= (unsigned long *)(*ret
);
68 ret
= (unsigned long *)get_pgd_slow();
72 extern __inline__
void free_pgd_fast(pgd_t
*pgd
)
74 *(unsigned long *)pgd
= (unsigned long) pgd_quicklist
;
75 pgd_quicklist
= (unsigned long *) pgd
;
79 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
81 free_page((unsigned long)pgd
);
84 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
85 extern pte_t
*get_pte_kernel_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
87 extern __inline__ pte_t
*get_pte_fast(void)
91 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
92 pte_quicklist
= (unsigned long *)(*ret
);
99 extern __inline__
void free_pte_fast(pte_t
*pte
)
101 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
102 pte_quicklist
= (unsigned long *) pte
;
103 pgtable_cache_size
++;
106 extern __inline__
void free_pte_slow(pte_t
*pte
)
108 free_page((unsigned long)pte
);
111 /* We don't use pmd cache, so these are dummy routines */
112 extern __inline__ pmd_t
*get_pmd_fast(void)
117 extern __inline__
void free_pmd_fast(pmd_t
*pmd
)
121 extern __inline__
void free_pmd_slow(pmd_t
*pmd
)
125 extern void __bad_pte(pmd_t
*pmd
);
126 extern void __bad_pte_kernel(pmd_t
*pmd
);
128 #define pte_free_kernel(pte) free_pte_fast(pte)
129 #define pte_free(pte) free_pte_fast(pte)
130 #define pgd_free(pgd) free_pgd_fast(pgd)
131 #define pgd_alloc() get_pgd_fast()
133 extern inline pte_t
* pte_alloc_kernel(pmd_t
* pmd
, unsigned long address
)
135 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
137 if (pmd_none(*pmd
)) {
138 pte_t
*page
= get_pte_fast();
140 pmd_val(*pmd
) = (unsigned long)page
;
141 return page
+ address
;
143 return get_pte_kernel_slow(pmd
, address
);
146 __bad_pte_kernel(pmd
);
149 return (pte_t
*) pmd_page(*pmd
) + address
;
152 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
154 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
156 if (pmd_none(*pmd
)) {
157 pte_t
*page
= get_pte_fast();
159 pmd_val(*pmd
) = (unsigned long)page
;
160 return page
+ address
;
162 return get_pte_slow(pmd
, address
);
168 return (pte_t
*) pmd_page(*pmd
) + address
;
172 * allocating and freeing a pmd is trivial: the 1-entry pmd is
173 * inside the pgd, so has no extra memory associated with it.
175 extern inline void pmd_free(pmd_t
* pmd
)
179 extern inline pmd_t
* pmd_alloc(pgd_t
* pgd
, unsigned long address
)
181 return (pmd_t
*) pgd
;
184 #define pmd_free_kernel pmd_free
185 #define pmd_alloc_kernel pmd_alloc
187 extern int do_check_pgt_cache(int, int);
189 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
191 struct task_struct
* p
;
197 read_lock(&tasklist_lock
);
201 *pgd_offset(p
->mm
,address
) = entry
;
203 read_unlock(&tasklist_lock
);
205 for (pgd
= (pgd_t
*)pgd_quicklist
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
206 pgd
[address
>> PGDIR_SHIFT
] = entry
;
208 /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our
209 callee, so we can modify pgd caches of other CPUs as well. -jj */
210 for (i
= 0; i
< NR_CPUS
; i
++)
211 for (pgd
= (pgd_t
*)cpu_data
[i
].pgd_quick
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
212 pgd
[address
>> PGDIR_SHIFT
] = entry
;
216 #endif /* _ASM_PGALLOC_H */