Import 2.3.18pre1
[davej-history.git] / include / asm-sparc / pgtable.h
blob39284c1b862810b05999baccf12e4453f3617b4d
1 /* $Id: pgtable.h,v 1.81 1999/06/27 00:38:28 davem Exp $ */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
5 /* asm-sparc/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12 #include <linux/mm.h>
13 #include <linux/config.h>
14 #include <linux/spinlock.h>
15 #include <asm/asi.h>
16 #ifdef CONFIG_SUN4
17 #include <asm/pgtsun4.h>
18 #else
19 #include <asm/pgtsun4c.h>
20 #endif
21 #include <asm/pgtsrmmu.h>
22 #include <asm/vac-ops.h>
23 #include <asm/oplib.h>
24 #include <asm/sbus.h>
25 #include <asm/btfixup.h>
27 extern void load_mmu(void);
28 extern int io_remap_page_range(unsigned long from, unsigned long to,
29 unsigned long size, pgprot_t prot, int space);
31 BTFIXUPDEF_CALL(void, quick_kernel_fault, unsigned long)
33 #define quick_kernel_fault(addr) BTFIXUP_CALL(quick_kernel_fault)(addr)
35 /* Allocate a block of RAM which is aligned to its size.
36 This procedure can be used until the call to mem_init(). */
37 extern void *sparc_init_alloc(unsigned long *kbrk, unsigned long size);
39 /* Routines for data transfer buffers. */
40 BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
41 BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
43 #define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
44 #define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
46 /* Routines for getting a dvma scsi buffer. */
47 struct mmu_sglist {
48 char *addr;
49 char *__dont_touch;
50 unsigned int len;
51 __u32 dvma_addr;
53 BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct linux_sbus *sbus)
54 BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct mmu_sglist *, int, struct linux_sbus *sbus)
55 BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct linux_sbus *sbus)
56 BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct mmu_sglist *, int, struct linux_sbus *sbus)
57 BTFIXUPDEF_CALL(void, mmu_map_dma_area, unsigned long addr, int len)
59 #define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
60 #define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
61 #define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
62 #define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
64 #define mmu_map_dma_area(addr,len) BTFIXUP_CALL(mmu_map_dma_area)(addr,len)
66 BTFIXUPDEF_SIMM13(pmd_shift)
67 BTFIXUPDEF_SETHI(pmd_size)
68 BTFIXUPDEF_SETHI(pmd_mask)
70 extern unsigned int pmd_align(unsigned int addr) __attribute__((const));
71 extern __inline__ unsigned int pmd_align(unsigned int addr)
73 return ((addr + ~BTFIXUP_SETHI(pmd_mask)) & BTFIXUP_SETHI(pmd_mask));
76 BTFIXUPDEF_SIMM13(pgdir_shift)
77 BTFIXUPDEF_SETHI(pgdir_size)
78 BTFIXUPDEF_SETHI(pgdir_mask)
80 extern unsigned int pgdir_align(unsigned int addr) __attribute__((const));
81 extern __inline__ unsigned int pgdir_align(unsigned int addr)
83 return ((addr + ~BTFIXUP_SETHI(pgdir_mask)) & BTFIXUP_SETHI(pgdir_mask));
86 BTFIXUPDEF_SIMM13(ptrs_per_pte)
87 BTFIXUPDEF_SIMM13(ptrs_per_pmd)
88 BTFIXUPDEF_SIMM13(ptrs_per_pgd)
89 BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
91 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
92 /* This is the same accross all platforms */
93 #define VMALLOC_START (0xfe300000)
94 #define VMALLOC_END ~0x0UL
96 BTFIXUPDEF_INT(page_none)
97 BTFIXUPDEF_INT(page_shared)
98 BTFIXUPDEF_INT(page_copy)
99 BTFIXUPDEF_INT(page_readonly)
100 BTFIXUPDEF_INT(page_kernel)
102 #define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift)
103 #define PMD_SIZE BTFIXUP_SETHI(pmd_size)
104 #define PMD_MASK BTFIXUP_SETHI(pmd_mask)
105 #define PMD_ALIGN(addr) pmd_align(addr)
106 #define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
107 #define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
108 #define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
109 #define PGDIR_ALIGN pgdir_align(addr)
110 #define PTRS_PER_PTE BTFIXUP_SIMM13(ptrs_per_pte)
111 #define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
112 #define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
113 #define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
115 #define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
116 #define PAGE_SHARED __pgprot(BTFIXUP_INT(page_shared))
117 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
118 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
119 #define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
121 BTFIXUPDEF_CALL(void, set_pgdir, unsigned long, pgd_t)
123 #define set_pgdir(address,entry) BTFIXUP_CALL(set_pgdir)(address,entry)
125 /* Top-level page directory */
126 extern pgd_t swapper_pg_dir[1024];
128 /* Page table for 0-4MB for everybody, on the Sparc this
129 * holds the same as on the i386.
131 extern pte_t pg0[1024];
132 extern pte_t pg1[1024];
133 extern pte_t pg2[1024];
134 extern pte_t pg3[1024];
136 extern unsigned long ptr_in_current_pgd;
138 /* the no. of pointers that fit on a page: this will go away */
139 #define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
141 /* Here is a trick, since mmap.c need the initializer elements for
142 * protection_map[] to be constant at compile time, I set the following
143 * to all zeros. I set it to the real values after I link in the
144 * appropriate MMU page table routines at boot time.
146 #define __P000 __pgprot(0)
147 #define __P001 __pgprot(0)
148 #define __P010 __pgprot(0)
149 #define __P011 __pgprot(0)
150 #define __P100 __pgprot(0)
151 #define __P101 __pgprot(0)
152 #define __P110 __pgprot(0)
153 #define __P111 __pgprot(0)
155 #define __S000 __pgprot(0)
156 #define __S001 __pgprot(0)
157 #define __S010 __pgprot(0)
158 #define __S011 __pgprot(0)
159 #define __S100 __pgprot(0)
160 #define __S101 __pgprot(0)
161 #define __S110 __pgprot(0)
162 #define __S111 __pgprot(0)
164 extern int num_contexts;
167 * BAD_PAGETABLE is used when we need a bogus page-table, while
168 * BAD_PAGE is used for a bogus page.
170 * ZERO_PAGE is a global shared page that is always zero: used
171 * for zero-mapped memory areas etc..
173 extern pte_t __bad_page(void);
174 extern pte_t * __bad_pagetable(void);
176 extern unsigned long empty_zero_page;
178 #define BAD_PAGETABLE __bad_pagetable()
179 #define BAD_PAGE __bad_page()
180 #define ZERO_PAGE(vaddr) ((unsigned long)(&(empty_zero_page)))
182 /* number of bits that fit into a memory pointer */
183 #define BITS_PER_PTR (8*sizeof(unsigned long))
185 /* to align the pointer to a pointer address */
186 #define PTR_MASK (~(sizeof(void*)-1))
188 #define SIZEOF_PTR_LOG2 2
190 BTFIXUPDEF_CALL_CONST(unsigned long, pte_page, pte_t)
191 BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
192 BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
194 #define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
195 #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
196 #define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
198 BTFIXUPDEF_CALL(void, sparc_update_rootmmu_dir, struct task_struct *, pgd_t *pgdir)
200 #define SET_PAGE_DIR(tsk,pgdir) BTFIXUP_CALL(sparc_update_rootmmu_dir)(tsk, pgdir)
202 /* to find an entry in a page-table */
203 #define PAGE_PTR(address) \
204 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
206 BTFIXUPDEF_SETHI(none_mask)
207 BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
208 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
210 extern __inline__ int pte_none(pte_t pte)
212 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
215 #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
216 #define pte_clear(pte) BTFIXUP_CALL(pte_clear)(pte)
218 BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
219 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
220 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
222 extern __inline__ int pmd_none(pmd_t pmd)
224 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
227 #define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
228 #define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
229 #define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
231 BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
232 BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
233 BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
234 BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
236 #define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
237 #define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
238 #define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
239 #define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
242 * The following only work if pte_present() is true.
243 * Undefined behaviour if not..
245 BTFIXUPDEF_HALF(pte_writei)
246 BTFIXUPDEF_HALF(pte_dirtyi)
247 BTFIXUPDEF_HALF(pte_youngi)
249 extern int pte_write(pte_t pte) __attribute__((const));
250 extern __inline__ int pte_write(pte_t pte)
252 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
255 extern int pte_dirty(pte_t pte) __attribute__((const));
256 extern __inline__ int pte_dirty(pte_t pte)
258 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
261 extern int pte_young(pte_t pte) __attribute__((const));
262 extern __inline__ int pte_young(pte_t pte)
264 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
267 BTFIXUPDEF_HALF(pte_wrprotecti)
268 BTFIXUPDEF_HALF(pte_mkcleani)
269 BTFIXUPDEF_HALF(pte_mkoldi)
271 extern pte_t pte_wrprotect(pte_t pte) __attribute__((const));
272 extern __inline__ pte_t pte_wrprotect(pte_t pte)
274 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
277 extern pte_t pte_mkclean(pte_t pte) __attribute__((const));
278 extern __inline__ pte_t pte_mkclean(pte_t pte)
280 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
283 extern pte_t pte_mkold(pte_t pte) __attribute__((const));
284 extern __inline__ pte_t pte_mkold(pte_t pte)
286 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
289 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
290 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
291 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
293 #define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
294 #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
295 #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
298 * Conversion functions: convert a page and protection to a page entry,
299 * and a page entry and page directory to the page they refer to.
301 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, unsigned long, pgprot_t)
302 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
303 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
305 #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
306 #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
307 #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
309 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
311 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
313 BTFIXUPDEF_INT(pte_modify_mask)
315 extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute__((const));
316 extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
318 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
319 pgprot_val(newprot));
322 BTFIXUPDEF_CALL(pgd_t *, pgd_offset, struct mm_struct *, unsigned long)
323 BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
324 BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
326 /* to find an entry in a kernel page-table-directory */
327 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
329 /* to find an entry in a page-table-directory */
330 #define pgd_offset(mm,addr) BTFIXUP_CALL(pgd_offset)(mm,addr)
332 /* Find an entry in the second-level page table.. */
333 #define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
335 /* Find an entry in the third-level page table.. */
336 #define pte_offset(dir,addr) BTFIXUP_CALL(pte_offset)(dir,addr)
338 extern struct pgtable_cache_struct {
339 unsigned long *pgd_cache;
340 unsigned long *pte_cache;
341 unsigned long pgtable_cache_sz;
342 unsigned long pgd_cache_sz;
343 spinlock_t pgd_spinlock;
344 spinlock_t pte_spinlock;
345 } pgt_quicklists;
346 #define pgd_quicklist (pgt_quicklists.pgd_cache)
347 #define pmd_quicklist ((unsigned long *)0)
348 #define pte_quicklist (pgt_quicklists.pte_cache)
349 #define pgd_spinlock (pgt_quicklists.pgd_spinlock)
350 #define pte_spinlock (pgt_quicklists.pte_spinlock)
351 #define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
352 #define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
354 BTFIXUPDEF_CALL(pte_t *, get_pte_fast, void)
355 BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
356 BTFIXUPDEF_CALL(void, free_pte_slow, pte_t *)
357 BTFIXUPDEF_CALL(void, free_pgd_slow, pgd_t *)
358 BTFIXUPDEF_CALL(int, do_check_pgt_cache, int, int)
360 #define get_pte_fast() BTFIXUP_CALL(get_pte_fast)()
361 extern __inline__ pmd_t *get_pmd_fast(void)
363 return (pmd_t *)0;
365 #define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
366 #define free_pte_slow(pte) BTFIXUP_CALL(free_pte_slow)(pte)
367 extern __inline__ void free_pmd_slow(pmd_t *pmd)
370 #define free_pgd_slow(pgd) BTFIXUP_CALL(free_pgd_slow)(pgd)
371 #define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
374 * Allocate and free page tables. The xxx_kernel() versions are
375 * used to allocate a kernel page table - this turns on ASN bits
376 * if any, and marks the page tables reserved.
378 BTFIXUPDEF_CALL(void, pte_free_kernel, pte_t *)
379 BTFIXUPDEF_CALL(pte_t *, pte_alloc_kernel, pmd_t *, unsigned long)
381 #define pte_free_kernel(pte) BTFIXUP_CALL(pte_free_kernel)(pte)
382 #define pte_alloc_kernel(pmd,addr) BTFIXUP_CALL(pte_alloc_kernel)(pmd,addr)
384 BTFIXUPDEF_CALL(void, pmd_free_kernel, pmd_t *)
385 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_kernel, pgd_t *, unsigned long)
387 #define pmd_free_kernel(pmd) BTFIXUP_CALL(pmd_free_kernel)(pmd)
388 #define pmd_alloc_kernel(pgd,addr) BTFIXUP_CALL(pmd_alloc_kernel)(pgd,addr)
390 BTFIXUPDEF_CALL(void, pte_free, pte_t *)
391 BTFIXUPDEF_CALL(pte_t *, pte_alloc, pmd_t *, unsigned long)
393 #define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
394 #define pte_alloc(pmd,addr) BTFIXUP_CALL(pte_alloc)(pmd,addr)
396 BTFIXUPDEF_CALL(void, pmd_free, pmd_t *)
397 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc, pgd_t *, unsigned long)
399 #define pmd_free(pmd) BTFIXUP_CALL(pmd_free)(pmd)
400 #define pmd_alloc(pgd,addr) BTFIXUP_CALL(pmd_alloc)(pgd,addr)
402 BTFIXUPDEF_CALL(void, pgd_free, pgd_t *)
403 BTFIXUPDEF_CALL(pgd_t *, pgd_alloc, void)
405 #define pgd_free(pgd) BTFIXUP_CALL(pgd_free)(pgd)
406 #define pgd_alloc() BTFIXUP_CALL(pgd_alloc)()
408 /* Fine grained cache/tlb flushing. */
410 #ifdef __SMP__
411 BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
412 BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
413 BTFIXUPDEF_CALL(void, local_flush_cache_range, struct mm_struct *, unsigned long, unsigned long)
414 BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
416 #define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
417 #define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
418 #define local_flush_cache_range(mm,start,end) BTFIXUP_CALL(local_flush_cache_range)(mm,start,end)
419 #define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
421 BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
422 BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
423 BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct mm_struct *, unsigned long, unsigned long)
424 BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
426 #define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
427 #define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
428 #define local_flush_tlb_range(mm,start,end) BTFIXUP_CALL(local_flush_tlb_range)(mm,start,end)
429 #define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
431 BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
432 BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
434 #define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
435 #define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
437 extern void smp_flush_cache_all(void);
438 extern void smp_flush_cache_mm(struct mm_struct *mm);
439 extern void smp_flush_cache_range(struct mm_struct *mm,
440 unsigned long start,
441 unsigned long end);
442 extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
444 extern void smp_flush_tlb_all(void);
445 extern void smp_flush_tlb_mm(struct mm_struct *mm);
446 extern void smp_flush_tlb_range(struct mm_struct *mm,
447 unsigned long start,
448 unsigned long end);
449 extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
450 extern void smp_flush_page_to_ram(unsigned long page);
451 extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
452 #endif
454 BTFIXUPDEF_CALL(void, flush_cache_all, void)
455 BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
456 BTFIXUPDEF_CALL(void, flush_cache_range, struct mm_struct *, unsigned long, unsigned long)
457 BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
459 #define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
460 #define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
461 #define flush_cache_range(mm,start,end) BTFIXUP_CALL(flush_cache_range)(mm,start,end)
462 #define flush_cache_page(vma,addr) BTFIXUP_CALL(flush_cache_page)(vma,addr)
463 #define flush_icache_range(start, end) do { } while (0)
465 BTFIXUPDEF_CALL(void, flush_tlb_all, void)
466 BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
467 BTFIXUPDEF_CALL(void, flush_tlb_range, struct mm_struct *, unsigned long, unsigned long)
468 BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
470 #define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
471 #define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
472 #define flush_tlb_range(mm,start,end) BTFIXUP_CALL(flush_tlb_range)(mm,start,end)
473 #define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
475 BTFIXUPDEF_CALL(void, flush_page_to_ram, unsigned long)
476 BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
478 #define flush_page_to_ram(addr) BTFIXUP_CALL(flush_page_to_ram)(addr)
479 #define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
481 /* The permissions for pgprot_val to make a page mapped on the obio space */
482 extern unsigned int pg_iobits;
484 /* MMU context switching. */
485 BTFIXUPDEF_CALL(void, switch_to_context, struct task_struct *)
487 #define switch_to_context(tsk) BTFIXUP_CALL(switch_to_context)(tsk)
489 /* Certain architectures need to do special things when pte's
490 * within a page table are directly modified. Thus, the following
491 * hook is made available.
494 BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
496 #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
498 BTFIXUPDEF_CALL(int, mmu_info, char *)
500 #define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
502 /* Fault handler stuff... */
503 #define FAULT_CODE_PROT 0x1
504 #define FAULT_CODE_WRITE 0x2
505 #define FAULT_CODE_USER 0x4
507 BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
509 #define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
511 extern int invalid_segment;
513 #define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
514 #define SWP_OFFSET(entry) (((entry) >> 9) & 0x3ffff)
515 #define SWP_ENTRY(type,offset) ((((type) & 0x7f) << 2) | (((offset) & 0x3ffff) << 9))
517 struct ctx_list {
518 struct ctx_list *next;
519 struct ctx_list *prev;
520 unsigned int ctx_number;
521 struct mm_struct *ctx_mm;
524 extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
525 extern struct ctx_list ctx_free; /* Head of free list */
526 extern struct ctx_list ctx_used; /* Head of used contexts list */
528 #define NO_CONTEXT -1
530 extern __inline__ void remove_from_ctx_list(struct ctx_list *entry)
532 entry->next->prev = entry->prev;
533 entry->prev->next = entry->next;
536 extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
538 entry->next = head;
539 (entry->prev = head->prev)->next = entry;
540 head->prev = entry;
542 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
543 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
545 extern __inline__ unsigned long
546 __get_phys (unsigned long addr)
548 switch (sparc_cpu_model){
549 case sun4:
550 case sun4c:
551 return sun4c_get_pte (addr) << PAGE_SHIFT;
552 case sun4m:
553 case sun4d:
554 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
555 default:
556 return 0;
560 extern __inline__ int
561 __get_iospace (unsigned long addr)
563 switch (sparc_cpu_model){
564 case sun4:
565 case sun4c:
566 return -1; /* Don't check iospace on sun4c */
567 case sun4m:
568 case sun4d:
569 return (srmmu_get_pte (addr) >> 28);
570 default:
571 return -1;
575 #define module_map vmalloc
576 #define module_unmap vfree
577 extern unsigned long *sparc_valid_addr_bitmap;
579 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
580 #define PageSkip(page) (test_bit(PG_skip, &(page)->flags))
581 #define kern_addr_valid(addr) (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
583 #endif /* !(_SPARC_PGTABLE_H) */