- pre4:
[davej-history.git] / include / asm-arm / pgalloc.h
blob280009ecf99c441e3b7c8475955c35e310a83b12
1 /*
2 * linux/include/asm-arm/pgalloc.h
4 * Copyright (C) 2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #ifndef _ASMARM_PGALLOC_H
11 #define _ASMARM_PGALLOC_H
13 #include <linux/config.h>
15 #include <asm/processor.h>
18 * Get the cache handling stuff now.
20 #include <asm/proc/cache.h>
23 * ARM processors do not cache TLB tables in RAM.
25 #define flush_tlb_pgtables(mm,start,end) do { } while (0)
28 * Page table cache stuff
30 #ifndef CONFIG_NO_PGT_CACHE
32 #ifdef CONFIG_SMP
33 #error Pgtable caches have to be per-CPU, so that no locking is needed.
34 #endif /* CONFIG_SMP */
36 extern struct pgtable_cache_struct {
37 unsigned long *pgd_cache;
38 unsigned long *pte_cache;
39 unsigned long pgtable_cache_sz;
40 } quicklists;
42 #define pgd_quicklist (quicklists.pgd_cache)
43 #define pmd_quicklist ((unsigned long *)0)
44 #define pte_quicklist (quicklists.pte_cache)
45 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
47 /* used for quicklists */
48 #define __pgd_next(pgd) (((unsigned long *)pgd)[1])
49 #define __pte_next(pte) (((unsigned long *)pte)[0])
51 extern __inline__ pgd_t *get_pgd_fast(void)
53 unsigned long *ret;
55 if ((ret = pgd_quicklist) != NULL) {
56 pgd_quicklist = (unsigned long *)__pgd_next(ret);
57 ret[1] = ret[2];
58 clean_dcache_entry(ret + 1);
59 pgtable_cache_size--;
61 return (pgd_t *)ret;
64 extern __inline__ void free_pgd_fast(pgd_t *pgd)
66 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
67 pgd_quicklist = (unsigned long *) pgd;
68 pgtable_cache_size++;
71 /* We don't use pmd cache, so this is a dummy routine */
72 #define get_pmd_fast() ((pmd_t *)0)
74 extern __inline__ void free_pmd_fast(pmd_t *pmd)
78 extern __inline__ pte_t *get_pte_fast(void)
80 unsigned long *ret;
82 if((ret = pte_quicklist) != NULL) {
83 pte_quicklist = (unsigned long *)__pte_next(ret);
84 ret[0] = ret[1];
85 clean_dcache_entry(ret);
86 pgtable_cache_size--;
88 return (pte_t *)ret;
91 extern __inline__ void free_pte_fast(pte_t *pte)
93 __pte_next(pte) = (unsigned long) pte_quicklist;
94 pte_quicklist = (unsigned long *) pte;
95 pgtable_cache_size++;
98 #else /* CONFIG_NO_PGT_CACHE */
100 #define pgd_quicklist ((unsigned long *)0)
101 #define pmd_quicklist ((unsigned long *)0)
102 #define pte_quicklist ((unsigned long *)0)
104 #define get_pgd_fast() ((pgd_t *)0)
105 #define get_pmd_fast() ((pmd_t *)0)
106 #define get_pte_fast() ((pte_t *)0)
108 #define free_pgd_fast(pgd) free_pgd_slow(pgd)
109 #define free_pmd_fast(pmd) free_pmd_slow(pmd)
110 #define free_pte_fast(pte) free_pte_slow(pte)
112 #endif /* CONFIG_NO_PGT_CACHE */
114 extern pgd_t *get_pgd_slow(void);
115 extern void free_pgd_slow(pgd_t *pgd);
117 #define free_pmd_slow(pmd) do { } while (0)
119 extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long addr_preadjusted);
120 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long addr_preadjusted);
121 extern void free_pte_slow(pte_t *pte);
124 * Allocate and free page tables. The xxx_kernel() versions are
125 * used to allocate a kernel page table - this turns on ASN bits
126 * if any.
128 #define pte_free_kernel(pte) free_pte_fast(pte)
129 #define pte_free(pte) free_pte_fast(pte)
131 #ifndef pte_alloc_kernel
132 extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
134 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
135 if (pmd_none(*pmd)) {
136 pte_t *page = (pte_t *) get_pte_fast();
138 if (!page)
139 return get_pte_kernel_slow(pmd, address);
140 set_pmd(pmd, mk_kernel_pmd(page));
141 return page + address;
143 if (pmd_bad(*pmd)) {
144 __handle_bad_pmd_kernel(pmd);
145 return NULL;
147 return (pte_t *) pmd_page(*pmd) + address;
149 #endif
151 extern __inline__ pte_t *pte_alloc(pmd_t * pmd, unsigned long address)
153 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
154 if (pmd_none(*pmd)) {
155 pte_t *page = (pte_t *) get_pte_fast();
157 if (!page)
158 return get_pte_slow(pmd, address);
159 set_pmd(pmd, mk_user_pmd(page));
160 return page + address;
162 if (pmd_bad(*pmd)) {
163 __handle_bad_pmd(pmd);
164 return NULL;
166 return (pte_t *) pmd_page(*pmd) + address;
169 #define pmd_free_kernel pmd_free
170 #define pmd_free(pmd) do { } while (0)
172 #define pmd_alloc_kernel pmd_alloc
173 extern __inline__ pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address)
175 return (pmd_t *) pgd;
178 #define pgd_free(pgd) free_pgd_fast(pgd)
180 extern __inline__ pgd_t *pgd_alloc(void)
182 pgd_t *pgd;
184 pgd = get_pgd_fast();
185 if (!pgd)
186 pgd = get_pgd_slow();
188 return pgd;
191 extern int do_check_pgt_cache(int, int);
193 extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
195 struct task_struct * p;
197 read_lock(&tasklist_lock);
198 for_each_task(p) {
199 if (!p->mm)
200 continue;
201 *pgd_offset(p->mm,address) = entry;
203 read_unlock(&tasklist_lock);
205 #ifndef CONFIG_NO_PGT_CACHE
207 pgd_t *pgd;
208 for (pgd = (pgd_t *)pgd_quicklist; pgd;
209 pgd = (pgd_t *)__pgd_next(pgd))
210 pgd[address >> PGDIR_SHIFT] = entry;
212 #endif
215 #endif