Make HZ_TO_STD macro name lowercase.
[linux-2.6/linux-mips.git] / include / asm-i386 / pgalloc.h
blob78a22936226b461e62bc6c06c44aaf7baeefcf4a
1 #ifndef _I386_PGALLOC_H
2 #define _I386_PGALLOC_H
4 #include <linux/config.h>
5 #include <asm/processor.h>
6 #include <asm/fixmap.h>
7 #include <linux/threads.h>
9 #define pgd_quicklist (current_cpu_data.pgd_quick)
10 #define pmd_quicklist (current_cpu_data.pmd_quick)
11 #define pte_quicklist (current_cpu_data.pte_quick)
12 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
14 #if CONFIG_X86_PAE
15 # include <asm/pgalloc-3level.h>
16 #else
17 # include <asm/pgalloc-2level.h>
18 #endif
21 * Allocate and free page tables. The xxx_kernel() versions are
22 * used to allocate a kernel page table - this turns on ASN bits
23 * if any.
26 extern __inline__ pgd_t *get_pgd_slow(void)
28 pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
30 if (ret) {
31 #if CONFIG_X86_PAE
32 int i;
33 for (i = 0; i < USER_PTRS_PER_PGD; i++)
34 __pgd_clear(ret + i);
35 #else
36 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
37 #endif
38 memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
40 return ret;
43 extern __inline__ pgd_t *get_pgd_fast(void)
45 unsigned long *ret;
47 if ((ret = pgd_quicklist) != NULL) {
48 pgd_quicklist = (unsigned long *)(*ret);
49 ret[0] = 0;
50 pgtable_cache_size--;
51 } else
52 ret = (unsigned long *)get_pgd_slow();
53 return (pgd_t *)ret;
56 extern __inline__ void free_pgd_fast(pgd_t *pgd)
58 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
59 pgd_quicklist = (unsigned long *) pgd;
60 pgtable_cache_size++;
63 extern __inline__ void free_pgd_slow(pgd_t *pgd)
65 free_page((unsigned long)pgd);
68 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
69 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
71 extern __inline__ pte_t *get_pte_fast(void)
73 unsigned long *ret;
75 if((ret = (unsigned long *)pte_quicklist) != NULL) {
76 pte_quicklist = (unsigned long *)(*ret);
77 ret[0] = ret[1];
78 pgtable_cache_size--;
80 return (pte_t *)ret;
83 extern __inline__ void free_pte_fast(pte_t *pte)
85 *(unsigned long *)pte = (unsigned long) pte_quicklist;
86 pte_quicklist = (unsigned long *) pte;
87 pgtable_cache_size++;
90 extern __inline__ void free_pte_slow(pte_t *pte)
92 free_page((unsigned long)pte);
95 #define pte_free_kernel(pte) free_pte_slow(pte)
96 #define pte_free(pte) free_pte_slow(pte)
97 #define pgd_free(pgd) free_pgd_slow(pgd)
98 #define pgd_alloc() get_pgd_fast()
100 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
102 if (!pmd)
103 BUG();
104 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
105 if (pmd_none(*pmd)) {
106 pte_t * page = (pte_t *) get_pte_fast();
108 if (!page)
109 return get_pte_kernel_slow(pmd, address);
110 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(page)));
111 return page + address;
113 if (pmd_bad(*pmd)) {
114 __handle_bad_pmd_kernel(pmd);
115 return NULL;
117 return (pte_t *) pmd_page(*pmd) + address;
120 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
122 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
124 if (pmd_none(*pmd))
125 goto getnew;
126 if (pmd_bad(*pmd))
127 goto fix;
128 return (pte_t *)pmd_page(*pmd) + address;
129 getnew:
131 unsigned long page = (unsigned long) get_pte_fast();
133 if (!page)
134 return get_pte_slow(pmd, address);
135 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(page)));
136 return (pte_t *)page + address;
138 fix:
139 __handle_bad_pmd(pmd);
140 return NULL;
144 * allocating and freeing a pmd is trivial: the 1-entry pmd is
145 * inside the pgd, so has no extra memory associated with it.
146 * (In the PAE case we free the page.)
148 #define pmd_free(pmd) free_pmd_slow(pmd)
150 #define pmd_free_kernel pmd_free
151 #define pmd_alloc_kernel pmd_alloc
153 extern int do_check_pgt_cache(int, int);
155 extern inline void set_pgdir(unsigned long address, pgd_t entry)
157 struct task_struct * p;
158 pgd_t *pgd;
159 #ifdef CONFIG_SMP
160 int i;
161 #endif
163 read_lock(&tasklist_lock);
164 for_each_task(p) {
165 if (!p->mm)
166 continue;
167 *pgd_offset(p->mm,address) = entry;
169 read_unlock(&tasklist_lock);
170 #ifndef CONFIG_SMP
171 for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
172 pgd[address >> PGDIR_SHIFT] = entry;
173 #else
174 /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
175 modify pgd caches of other CPUs as well. -jj */
176 for (i = 0; i < NR_CPUS; i++)
177 for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
178 pgd[address >> PGDIR_SHIFT] = entry;
179 #endif
183 * TLB flushing:
185 * - flush_tlb() flushes the current mm struct TLBs
186 * - flush_tlb_all() flushes all processes TLBs
187 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
188 * - flush_tlb_page(vma, vmaddr) flushes one page
189 * - flush_tlb_range(mm, start, end) flushes a range of pages
190 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
192 * ..but the i386 has somewhat limited tlb flushing capabilities,
193 * and page-granular flushes are available only on i486 and up.
196 #ifndef CONFIG_SMP
198 #define flush_tlb() __flush_tlb()
199 #define flush_tlb_all() __flush_tlb_all()
200 #define local_flush_tlb() __flush_tlb()
202 static inline void flush_tlb_mm(struct mm_struct *mm)
204 if (mm == current->active_mm)
205 __flush_tlb();
208 static inline void flush_tlb_page(struct vm_area_struct *vma,
209 unsigned long addr)
211 if (vma->vm_mm == current->active_mm)
212 __flush_tlb_one(addr);
215 static inline void flush_tlb_range(struct mm_struct *mm,
216 unsigned long start, unsigned long end)
218 if (mm == current->active_mm)
219 __flush_tlb();
222 #else
224 #include <asm/smp.h>
226 #define local_flush_tlb() \
227 __flush_tlb()
229 extern void flush_tlb_all(void);
230 extern void flush_tlb_current_task(void);
231 extern void flush_tlb_mm(struct mm_struct *);
232 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
234 #define flush_tlb() flush_tlb_current_task()
236 static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
238 flush_tlb_mm(mm);
241 #define TLBSTATE_OK 1
242 #define TLBSTATE_LAZY 2
243 #define TLBSTATE_OLD 3
245 struct tlb_state
247 struct mm_struct *active_mm;
248 int state;
250 extern struct tlb_state cpu_tlbstate[NR_CPUS];
253 #endif
255 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
256 unsigned long start, unsigned long end)
258 /* i386 does not keep any page table caches in TLB */
261 #endif /* _I386_PGALLOC_H */