- Alan Cox: synch. PA-RISC arch and bitops cleanups
[davej-history.git] / include / asm-parisc / pgalloc.h
blob1d9365252e239037705c5c4776ced9afd4596072
1 #ifndef _ASM_PGALLOC_H
2 #define _ASM_PGALLOC_H
4 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
5 * Unfortunately, that doesn't apply to PA-RISC. */
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
9 #include <linux/threads.h>
11 #include <asm/pgtable.h>
12 #include <asm/cache.h>
15 /* Internal use D/I cache flushing routines... */
16 /* XXX: these functions must not access memory between f[di]ce instructions. */
18 static inline void __flush_dcache_range(unsigned long start, unsigned long size)
20 #if 0
21 register unsigned long count = (size / L1_CACHE_BYTES);
22 register unsigned long loop = cache_info.dc_loop;
23 register unsigned long i, j;
25 if (size > 64 * 1024) {
26 /* Just punt and clear the whole damn thing */
27 flush_data_cache();
28 return;
31 for(i = 0; i <= count; i++, start += L1_CACHE_BYTES)
32 for(j = 0; j < loop; j++)
33 fdce(start);
34 #else
35 flush_data_cache();
36 #endif
40 static inline void __flush_icache_range(unsigned long start, unsigned long size)
42 #if 0
43 register unsigned long count = (size / L1_CACHE_BYTES);
44 register unsigned long loop = cache_info.ic_loop;
45 register unsigned long i, j;
47 if (size > 64 * 1024) {
48 /* Just punt and clear the whole damn thing */
49 flush_instruction_cache();
50 return;
53 for(i = 0; i <= count; i++, start += L1_CACHE_BYTES)
54 for(j = 0; j < loop; j++)
55 fice(start);
56 #else
57 flush_instruction_cache();
58 #endif
61 static inline void
62 flush_kernel_dcache_range(unsigned long start, unsigned long size)
64 register unsigned long end = start + size;
65 register unsigned long i;
67 start &= ~(L1_CACHE_BYTES - 1);
68 for (i = start; i < end; i += L1_CACHE_BYTES) {
69 kernel_fdc(i);
71 asm volatile("sync" : : );
72 asm volatile("syncdma" : : );
75 extern void __flush_page_to_ram(unsigned long address);
77 #define flush_cache_all() flush_all_caches()
78 #define flush_cache_mm(foo) flush_all_caches()
80 #if 0
81 /* This is how I think the cache flushing should be done -- mrw */
82 extern inline void flush_cache_mm(struct mm_struct *mm) {
83 if (mm == current->mm) {
84 flush_user_dcache_range(mm->start_data, mm->end_data);
85 flush_user_icache_range(mm->start_code, mm->end_code);
86 } else {
87 flush_other_dcache_range(mm->context, mm->start_data, mm->end_data);
88 flush_other_icache_range(mm->context, mm->start_code, mm->end_code);
91 #endif
93 #define flush_cache_range(mm, start, end) do { \
94 __flush_dcache_range(start, (unsigned long)end - (unsigned long)start); \
95 __flush_icache_range(start, (unsigned long)end - (unsigned long)start); \
96 } while(0)
98 #define flush_cache_page(vma, vmaddr) do { \
99 __flush_dcache_range(vmaddr, PAGE_SIZE); \
100 __flush_icache_range(vmaddr, PAGE_SIZE); \
101 } while(0)
103 #define flush_page_to_ram(page) \
104 __flush_page_to_ram((unsigned long)page_address(page))
106 #define flush_icache_range(start, end) \
107 __flush_icache_range(start, end - start)
109 #define flush_icache_page(vma, page) \
110 __flush_icache_range(page_address(page), PAGE_SIZE)
112 #define flush_dcache_page(page) \
113 __flush_dcache_range(page_address(page), PAGE_SIZE)
115 /* TLB flushing routines.... */
117 extern void flush_data_tlb(void);
118 extern void flush_instruction_tlb(void);
120 #define flush_tlb() do { \
121 flush_data_tlb(); \
122 flush_instruction_tlb(); \
123 } while(0);
125 #define flush_tlb_all() flush_tlb() /* XXX p[id]tlb */
127 extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
131 static inline void flush_instruction_tlb_range(unsigned long start,
132 unsigned long size)
134 #if 0
135 register unsigned long count = (size / PAGE_SIZE);
136 register unsigned long loop = cache_info.it_loop;
137 register unsigned long i, j;
139 for(i = 0; i <= count; i++, start += PAGE_SIZE)
140 for(j = 0; j < loop; j++)
141 pitlbe(start);
142 #else
143 flush_instruction_tlb();
144 #endif
147 static inline void flush_data_tlb_range(unsigned long start,
148 unsigned long size)
150 #if 0
151 register unsigned long count = (size / PAGE_SIZE);
152 register unsigned long loop = cache_info.dt_loop;
153 register unsigned long i, j;
155 for(i = 0; i <= count; i++, start += PAGE_SIZE)
156 for(j = 0; j < loop; j++)
157 pdtlbe(start);
158 #else
159 flush_data_tlb();
160 #endif
165 static inline void __flush_tlb_range(unsigned long space, unsigned long start,
166 unsigned long size)
168 unsigned long old_sr1;
170 if(!size)
171 return;
173 old_sr1 = mfsp(1);
174 mtsp(space, 1);
176 flush_data_tlb_range(start, size);
177 flush_instruction_tlb_range(start, size);
179 mtsp(old_sr1, 1);
182 extern void __flush_tlb_space(unsigned long space);
184 static inline void flush_tlb_mm(struct mm_struct *mm)
186 #if 0
187 __flush_tlb_space(mm->context);
188 #else
189 flush_tlb();
190 #endif
193 static inline void flush_tlb_page(struct vm_area_struct *vma,
194 unsigned long addr)
196 __flush_tlb_range(vma->vm_mm->context, addr, PAGE_SIZE);
200 static inline void flush_tlb_range(struct mm_struct *mm,
201 unsigned long start, unsigned long end)
203 __flush_tlb_range(mm->context, start, end - start);
207 * NOTE: Many of the below macros use PT_NLEVELS because
208 * it is convenient that PT_NLEVELS == LOG2(pte size in bytes),
209 * i.e. we use 3 level page tables when we use 8 byte pte's
210 * (for 64 bit) and 2 level page tables when we use 4 byte pte's
213 #ifdef __LP64__
214 #define PT_NLEVELS 3
215 #define PT_INITIAL 4 /* Number of initial page tables */
216 #else
217 #define PT_NLEVELS 2
218 #define PT_INITIAL 2 /* Number of initial page tables */
219 #endif
221 /* Definitions for 1st level */
223 #define PGDIR_SHIFT (PAGE_SHIFT + (PT_NLEVELS - 1)*(PAGE_SHIFT - PT_NLEVELS))
224 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
225 #define PGDIR_MASK (~(PGDIR_SIZE-1))
226 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT - PT_NLEVELS))
227 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
229 /* Definitions for 2nd level */
231 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PT_NLEVELS))
232 #define PMD_SIZE (1UL << PMD_SHIFT)
233 #define PMD_MASK (~(PMD_SIZE-1))
234 #if PT_NLEVELS == 3
235 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT - PT_NLEVELS))
236 #else
237 #define PTRS_PER_PMD 1
238 #endif
240 /* Definitions for 3rd level */
242 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT - PT_NLEVELS))
245 #define get_pgd_fast get_pgd_slow
246 #define free_pgd_fast free_pgd_slow
248 extern __inline__ pgd_t *get_pgd_slow(void)
250 extern unsigned long gateway_pgd_offset;
251 extern unsigned long gateway_pgd_entry;
252 pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
254 if (ret) {
255 memset (ret, 0, PTRS_PER_PGD * sizeof(pgd_t));
257 /* Install HP-UX and Linux gateway page translations */
259 pgd_val(*(ret + gateway_pgd_offset)) = gateway_pgd_entry;
261 return ret;
264 extern __inline__ void free_pgd_slow(pgd_t *pgd)
266 free_page((unsigned long)pgd);
269 #if PT_NLEVELS == 3
271 /* Three Level Page Table Support for pmd's */
273 extern __inline__ pmd_t *get_pmd_fast(void)
275 return NULL; /* la la */
278 #if 0
279 extern __inline__ void free_pmd_fast(pmd_t *pmd)
282 #else
283 #define free_pmd_fast free_pmd_slow
284 #endif
286 extern __inline__ pmd_t *get_pmd_slow(void)
288 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
290 if (pmd)
291 clear_page(pmd);
292 return pmd;
295 extern __inline__ void free_pmd_slow(pmd_t *pmd)
297 free_page((unsigned long)pmd);
300 extern void __bad_pgd(pgd_t *pgd);
302 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
304 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
306 if (pgd_none(*pgd))
307 goto getnew;
308 if (pgd_bad(*pgd))
309 goto fix;
310 return (pmd_t *) pgd_page(*pgd) + address;
311 getnew:
313 pmd_t *page = get_pmd_fast();
315 if (!page)
316 page = get_pmd_slow();
317 if (page) {
318 if (pgd_none(*pgd)) {
319 pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)page);
320 return page + address;
322 else
323 free_pmd_fast(page);
325 else {
326 return NULL;
329 fix:
330 __bad_pgd(pgd);
331 return NULL;
334 #else
336 /* Two Level Page Table Support for pmd's */
338 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
340 return (pmd_t *) pgd;
343 extern inline void free_pmd_fast(pmd_t * pmd)
347 #endif
349 extern __inline__ pte_t *get_pte_fast(void)
351 return NULL; /* la la */
354 #if 0
355 extern __inline__ void free_pte_fast(pte_t *pte)
358 #else
359 #define free_pte_fast free_pte_slow
360 #endif
362 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
364 extern __inline__ void free_pte_slow(pte_t *pte)
366 free_page((unsigned long)pte);
369 #define pmd_alloc_kernel pmd_alloc
370 #define pte_alloc_kernel pte_alloc
372 #define pte_free(pte) free_pte_fast(pte)
373 #define pmd_free(pmd) free_pmd_fast(pmd)
374 #define pgd_free(pgd) free_pgd_fast(pgd)
375 #define pgd_alloc() get_pgd_fast()
377 extern void __bad_pmd(pmd_t *pmd);
379 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
381 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
383 if (pmd_none(*pmd))
384 goto getnew;
385 if (pmd_bad(*pmd))
386 goto fix;
387 return (pte_t *) pmd_page(*pmd) + address;
388 getnew:
390 pte_t *page = get_pte_fast();
392 if (!page)
393 return get_pte_slow(pmd, address);
394 pmd_val(*pmd) = _PAGE_TABLE + __pa((unsigned long)page);
395 return page + address;
397 fix:
398 __bad_pmd(pmd);
399 return NULL;
402 extern int do_check_pgt_cache(int, int);
404 #endif