- pre3:
[davej-history.git] / include / asm-arm / pgalloc.h
blob30bbdb42c562bf63281f2e7377fe78a4733acecd
1 /*
2 * linux/include/asm-arm/pgalloc.h
3 */
4 #ifndef _ASMARM_PGALLOC_H
5 #define _ASMARM_PGALLOC_H
7 #include <linux/config.h>
9 #include <asm/processor.h>
12 * Get the cache handling stuff now.
14 #include <asm/proc/cache.h>
17 * ARM processors do not cache TLB tables in RAM.
19 #define flush_tlb_pgtables(mm,start,end) do { } while (0)
22 * Page table cache stuff
24 #ifndef CONFIG_NO_PGT_CACHE
26 #ifdef CONFIG_SMP
27 #error Pgtable caches have to be per-CPU, so that no locking is needed.
28 #endif /* CONFIG_SMP */
30 extern struct pgtable_cache_struct {
31 unsigned long *pgd_cache;
32 unsigned long *pte_cache;
33 unsigned long pgtable_cache_sz;
34 } quicklists;
36 #define pgd_quicklist (quicklists.pgd_cache)
37 #define pmd_quicklist ((unsigned long *)0)
38 #define pte_quicklist (quicklists.pte_cache)
39 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
41 /* used for quicklists */
42 #define __pgd_next(pgd) (((unsigned long *)pgd)[1])
43 #define __pte_next(pte) (((unsigned long *)pte)[0])
45 extern __inline__ pgd_t *get_pgd_fast(void)
47 unsigned long *ret;
49 if ((ret = pgd_quicklist) != NULL) {
50 pgd_quicklist = (unsigned long *)__pgd_next(ret);
51 ret[1] = ret[2];
52 clean_cache_area(ret + 1, 4);
53 pgtable_cache_size--;
55 return (pgd_t *)ret;
58 extern __inline__ void free_pgd_fast(pgd_t *pgd)
60 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
61 pgd_quicklist = (unsigned long *) pgd;
62 pgtable_cache_size++;
65 /* We don't use pmd cache, so this is a dummy routine */
66 #define get_pmd_fast() ((pmd_t *)0)
68 extern __inline__ void free_pmd_fast(pmd_t *pmd)
72 extern __inline__ pte_t *get_pte_fast(void)
74 unsigned long *ret;
76 if((ret = pte_quicklist) != NULL) {
77 pte_quicklist = (unsigned long *)__pte_next(ret);
78 ret[0] = ret[1];
79 clean_cache_area(ret, 4);
80 pgtable_cache_size--;
82 return (pte_t *)ret;
85 extern __inline__ void free_pte_fast(pte_t *pte)
87 __pte_next(pte) = (unsigned long) pte_quicklist;
88 pte_quicklist = (unsigned long *) pte;
89 pgtable_cache_size++;
92 #else /* CONFIG_NO_PGT_CACHE */
94 #define pgd_quicklist ((unsigned long *)0)
95 #define pmd_quicklist ((unsigned long *)0)
96 #define pte_quicklist ((unsigned long *)0)
98 #define get_pgd_fast() ((pgd_t *)0)
99 #define get_pmd_fast() ((pmd_t *)0)
100 #define get_pte_fast() ((pte_t *)0)
102 #define free_pgd_fast(pgd) free_pgd_slow(pgd)
103 #define free_pmd_fast(pmd) free_pmd_slow(pmd)
104 #define free_pte_fast(pte) free_pte_slow(pte)
106 #endif /* CONFIG_NO_PGT_CACHE */
108 extern pgd_t *get_pgd_slow(void);
109 extern void free_pgd_slow(pgd_t *pgd);
111 #define free_pmd_slow(pmd) do { } while (0)
113 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long addr_preadjusted);
114 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long addr_preadjusted);
115 extern void free_pte_slow(pte_t *pte);
118 * Allocate and free page tables. The xxx_kernel() versions are
119 * used to allocate a kernel page table - this turns on ASN bits
120 * if any.
122 #define pte_free_kernel(pte) free_pte_fast(pte)
123 #define pte_free(pte) free_pte_fast(pte)
125 #ifndef pte_alloc_kernel
126 extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
128 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
129 if (pmd_none(*pmd)) {
130 pte_t *page = (pte_t *) get_pte_fast();
132 if (!page)
133 return get_pte_kernel_slow(pmd, address);
134 set_pmd(pmd, mk_kernel_pmd(page));
135 return page + address;
137 if (pmd_bad(*pmd)) {
138 __handle_bad_pmd_kernel(pmd);
139 return NULL;
141 return (pte_t *) pmd_page(*pmd) + address;
143 #endif
145 extern __inline__ pte_t *pte_alloc(pmd_t * pmd, unsigned long address)
147 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
148 if (pmd_none(*pmd)) {
149 pte_t *page = (pte_t *) get_pte_fast();
151 if (!page)
152 return get_pte_slow(pmd, address);
153 set_pmd(pmd, mk_user_pmd(page));
154 return page + address;
156 if (pmd_bad(*pmd)) {
157 __handle_bad_pmd(pmd);
158 return NULL;
160 return (pte_t *) pmd_page(*pmd) + address;
163 #define pmd_free_kernel pmd_free
164 #define pmd_free(pmd) do { } while (0)
166 #define pmd_alloc_kernel pmd_alloc
167 extern __inline__ pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address)
169 return (pmd_t *) pgd;
172 #define pgd_free(pgd) free_pgd_fast(pgd)
174 extern __inline__ pgd_t *pgd_alloc(void)
176 pgd_t *pgd;
178 pgd = get_pgd_fast();
179 if (!pgd)
180 pgd = get_pgd_slow();
182 return pgd;
185 extern int do_check_pgt_cache(int, int);
187 extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
189 struct task_struct * p;
191 read_lock(&tasklist_lock);
192 for_each_task(p) {
193 if (!p->mm)
194 continue;
195 *pgd_offset(p->mm,address) = entry;
197 read_unlock(&tasklist_lock);
199 #ifndef CONFIG_NO_PGT_CACHE
201 pgd_t *pgd;
202 for (pgd = (pgd_t *)pgd_quicklist; pgd;
203 pgd = (pgd_t *)__pgd_next(pgd))
204 pgd[address >> PGDIR_SHIFT] = entry;
206 #endif
209 #endif