Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / asm-sparc / pgtable.h
blob020b68fbbb55523ab0347a31b58d4ae3d179e91f
1 /* $Id: pgtable.h,v 1.106 2000/11/08 04:49:24 davem Exp $ */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
5 /* asm-sparc/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12 #include <linux/config.h>
13 #include <linux/spinlock.h>
14 #include <asm/asi.h>
15 #ifdef CONFIG_SUN4
16 #include <asm/pgtsun4.h>
17 #else
18 #include <asm/pgtsun4c.h>
19 #endif
20 #include <asm/pgtsrmmu.h>
21 #include <asm/vac-ops.h>
22 #include <asm/oplib.h>
23 #include <asm/sbus.h>
24 #include <asm/btfixup.h>
25 #include <asm/system.h>
27 #ifndef __ASSEMBLY__
29 extern void load_mmu(void);
31 BTFIXUPDEF_CALL(void, quick_kernel_fault, unsigned long)
33 #define quick_kernel_fault(addr) BTFIXUP_CALL(quick_kernel_fault)(addr)
35 /* Routines for data transfer buffers. */
36 BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
37 BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
39 #define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
40 #define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
42 /* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
43 BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
44 BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
45 BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
46 BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
48 #define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
49 #define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
50 #define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
51 #define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
54 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
56 BTFIXUPDEF_CALL(void, mmu_map_dma_area, unsigned long va, __u32 addr, int len)
57 BTFIXUPDEF_CALL(unsigned long /*phys*/, mmu_translate_dvma, unsigned long busa)
58 BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
60 #define mmu_map_dma_area(va, ba,len) BTFIXUP_CALL(mmu_map_dma_area)(va,ba,len)
61 #define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
62 #define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
64 BTFIXUPDEF_SIMM13(pmd_shift)
65 BTFIXUPDEF_SETHI(pmd_size)
66 BTFIXUPDEF_SETHI(pmd_mask)
68 extern unsigned int pmd_align(unsigned int addr) __attribute__((const));
69 extern __inline__ unsigned int pmd_align(unsigned int addr)
71 return ((addr + ~BTFIXUP_SETHI(pmd_mask)) & BTFIXUP_SETHI(pmd_mask));
74 BTFIXUPDEF_SIMM13(pgdir_shift)
75 BTFIXUPDEF_SETHI(pgdir_size)
76 BTFIXUPDEF_SETHI(pgdir_mask)
78 extern unsigned int pgdir_align(unsigned int addr) __attribute__((const));
79 extern __inline__ unsigned int pgdir_align(unsigned int addr)
81 return ((addr + ~BTFIXUP_SETHI(pgdir_mask)) & BTFIXUP_SETHI(pgdir_mask));
84 BTFIXUPDEF_SIMM13(ptrs_per_pte)
85 BTFIXUPDEF_SIMM13(ptrs_per_pmd)
86 BTFIXUPDEF_SIMM13(ptrs_per_pgd)
87 BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
89 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
91 #define pte_ERROR(e) __builtin_trap()
92 #define pmd_ERROR(e) __builtin_trap()
93 #define pgd_ERROR(e) __builtin_trap()
95 BTFIXUPDEF_INT(page_none)
96 BTFIXUPDEF_INT(page_shared)
97 BTFIXUPDEF_INT(page_copy)
98 BTFIXUPDEF_INT(page_readonly)
99 BTFIXUPDEF_INT(page_kernel)
101 #define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift)
102 #define PMD_SIZE BTFIXUP_SETHI(pmd_size)
103 #define PMD_MASK BTFIXUP_SETHI(pmd_mask)
104 #define PMD_ALIGN(addr) pmd_align(addr)
105 #define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
106 #define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
107 #define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
108 #define PGDIR_ALIGN pgdir_align(addr)
109 #define PTRS_PER_PTE BTFIXUP_SIMM13(ptrs_per_pte)
110 #define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
111 #define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
112 #define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
113 #define FIRST_USER_PGD_NR 0
115 #define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
116 #define PAGE_SHARED __pgprot(BTFIXUP_INT(page_shared))
117 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
118 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
120 extern unsigned long page_kernel;
122 #ifdef MODULE
123 #define PAGE_KERNEL page_kernel
124 #else
125 #define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
126 #endif
128 /* Top-level page directory */
129 extern pgd_t swapper_pg_dir[1024];
131 /* Page table for 0-4MB for everybody, on the Sparc this
132 * holds the same as on the i386.
134 extern pte_t pg0[1024];
135 extern pte_t pg1[1024];
136 extern pte_t pg2[1024];
137 extern pte_t pg3[1024];
139 extern unsigned long ptr_in_current_pgd;
141 /* Here is a trick, since mmap.c need the initializer elements for
142 * protection_map[] to be constant at compile time, I set the following
143 * to all zeros. I set it to the real values after I link in the
144 * appropriate MMU page table routines at boot time.
146 #define __P000 __pgprot(0)
147 #define __P001 __pgprot(0)
148 #define __P010 __pgprot(0)
149 #define __P011 __pgprot(0)
150 #define __P100 __pgprot(0)
151 #define __P101 __pgprot(0)
152 #define __P110 __pgprot(0)
153 #define __P111 __pgprot(0)
155 #define __S000 __pgprot(0)
156 #define __S001 __pgprot(0)
157 #define __S010 __pgprot(0)
158 #define __S011 __pgprot(0)
159 #define __S100 __pgprot(0)
160 #define __S101 __pgprot(0)
161 #define __S110 __pgprot(0)
162 #define __S111 __pgprot(0)
164 extern int num_contexts;
166 /* First physical page can be anywhere, the following is needed so that
167 * va-->pa and vice versa conversions work properly without performance
168 * hit for all __pa()/__va() operations.
170 extern unsigned long phys_base;
173 * BAD_PAGETABLE is used when we need a bogus page-table, while
174 * BAD_PAGE is used for a bogus page.
176 * ZERO_PAGE is a global shared page that is always zero: used
177 * for zero-mapped memory areas etc..
179 extern pte_t * __bad_pagetable(void);
180 extern pte_t __bad_page(void);
181 extern unsigned long empty_zero_page;
183 #define BAD_PAGETABLE __bad_pagetable()
184 #define BAD_PAGE __bad_page()
185 #define ZERO_PAGE(vaddr) (mem_map + (((unsigned long)&empty_zero_page - PAGE_OFFSET + phys_base) >> PAGE_SHIFT))
187 /* number of bits that fit into a memory pointer */
188 #define BITS_PER_PTR (8*sizeof(unsigned long))
190 /* to align the pointer to a pointer address */
191 #define PTR_MASK (~(sizeof(void*)-1))
193 #define SIZEOF_PTR_LOG2 2
195 BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
196 BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
198 #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
199 #define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
201 BTFIXUPDEF_SETHI(none_mask)
202 BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
203 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
205 extern __inline__ int pte_none(pte_t pte)
207 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
210 #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
211 #define pte_clear(pte) BTFIXUP_CALL(pte_clear)(pte)
213 BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
214 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
215 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
217 extern __inline__ int pmd_none(pmd_t pmd)
219 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
222 #define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
223 #define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
224 #define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
226 BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
227 BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
228 BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
229 BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
231 #define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
232 #define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
233 #define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
234 #define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
237 * The following only work if pte_present() is true.
238 * Undefined behaviour if not..
240 BTFIXUPDEF_HALF(pte_writei)
241 BTFIXUPDEF_HALF(pte_dirtyi)
242 BTFIXUPDEF_HALF(pte_youngi)
244 extern int pte_write(pte_t pte) __attribute__((const));
245 extern __inline__ int pte_write(pte_t pte)
247 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
250 extern int pte_dirty(pte_t pte) __attribute__((const));
251 extern __inline__ int pte_dirty(pte_t pte)
253 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
256 extern int pte_young(pte_t pte) __attribute__((const));
257 extern __inline__ int pte_young(pte_t pte)
259 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
262 BTFIXUPDEF_HALF(pte_wrprotecti)
263 BTFIXUPDEF_HALF(pte_mkcleani)
264 BTFIXUPDEF_HALF(pte_mkoldi)
266 extern pte_t pte_wrprotect(pte_t pte) __attribute__((const));
267 extern __inline__ pte_t pte_wrprotect(pte_t pte)
269 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
272 extern pte_t pte_mkclean(pte_t pte) __attribute__((const));
273 extern __inline__ pte_t pte_mkclean(pte_t pte)
275 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
278 extern pte_t pte_mkold(pte_t pte) __attribute__((const));
279 extern __inline__ pte_t pte_mkold(pte_t pte)
281 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
284 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
285 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
286 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
288 #define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
289 #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
290 #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
292 #define page_pte_prot(page, prot) mk_pte(page, prot)
293 #define page_pte(page) page_pte_prot(page, __pgprot(0))
295 /* Permanent address of a page. */
296 #define page_address(page) ((page)->virtual)
298 BTFIXUPDEF_CALL(struct page *, pte_page, pte_t)
299 #define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
302 * Conversion functions: convert a page and protection to a page entry,
303 * and a page entry and page directory to the page they refer to.
305 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
307 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
308 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
310 #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
311 #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
312 #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
314 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
315 BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
317 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
318 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
320 BTFIXUPDEF_INT(pte_modify_mask)
322 extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute__((const));
323 extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
325 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
326 pgprot_val(newprot));
329 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
331 /* to find an entry in a page-table-directory */
332 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
334 /* to find an entry in a kernel page-table-directory */
335 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
337 BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
338 BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
340 /* Find an entry in the second-level page table.. */
341 #define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
343 /* Find an entry in the third-level page table.. */
344 #define pte_offset(dir,addr) BTFIXUP_CALL(pte_offset)(dir,addr)
346 /* The permissions for pgprot_val to make a page mapped on the obio space */
347 extern unsigned int pg_iobits;
349 #define flush_icache_page(vma, pg) do { } while(0)
351 /* Certain architectures need to do special things when pte's
352 * within a page table are directly modified. Thus, the following
353 * hook is made available.
356 BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
358 #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
360 BTFIXUPDEF_CALL(int, mmu_info, char *)
362 #define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
364 /* Fault handler stuff... */
365 #define FAULT_CODE_PROT 0x1
366 #define FAULT_CODE_WRITE 0x2
367 #define FAULT_CODE_USER 0x4
369 BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
371 #define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
373 extern int invalid_segment;
375 /* Encode and de-code a swap entry */
376 #define SWP_TYPE(x) (((x).val >> 2) & 0x7f)
377 #define SWP_OFFSET(x) (((x).val >> 9) & 0x3ffff)
378 #define SWP_ENTRY(type,offset) ((swp_entry_t) { (((type) & 0x7f) << 2) | (((offset) & 0x3ffff) << 9) })
379 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
380 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
382 struct ctx_list {
383 struct ctx_list *next;
384 struct ctx_list *prev;
385 unsigned int ctx_number;
386 struct mm_struct *ctx_mm;
389 extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
390 extern struct ctx_list ctx_free; /* Head of free list */
391 extern struct ctx_list ctx_used; /* Head of used contexts list */
393 #define NO_CONTEXT -1
395 extern __inline__ void remove_from_ctx_list(struct ctx_list *entry)
397 entry->next->prev = entry->prev;
398 entry->prev->next = entry->next;
401 extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
403 entry->next = head;
404 (entry->prev = head->prev)->next = entry;
405 head->prev = entry;
407 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
408 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
410 extern __inline__ unsigned long
411 __get_phys (unsigned long addr)
413 switch (sparc_cpu_model){
414 case sun4:
415 case sun4c:
416 return sun4c_get_pte (addr) << PAGE_SHIFT;
417 case sun4m:
418 case sun4d:
419 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
420 default:
421 return 0;
425 extern __inline__ int
426 __get_iospace (unsigned long addr)
428 switch (sparc_cpu_model){
429 case sun4:
430 case sun4c:
431 return -1; /* Don't check iospace on sun4c */
432 case sun4m:
433 case sun4d:
434 return (srmmu_get_pte (addr) >> 28);
435 default:
436 return -1;
440 extern unsigned long *sparc_valid_addr_bitmap;
442 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
443 #define kern_addr_valid(addr) \
444 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
446 extern int io_remap_page_range(unsigned long from, unsigned long to,
447 unsigned long size, pgprot_t prot, int space);
449 #include <asm-generic/pgtable.h>
451 #endif /* !(__ASSEMBLY__) */
453 /* We provide our own get_unmapped_area to cope with VA holes for userland */
454 #define HAVE_ARCH_UNMAPPED_AREA
456 #endif /* !(_SPARC_PGTABLE_H) */