Consolidate code in init.c and setup.c. Not yet there but getting
[linux-2.6/linux-mips.git] / include / asm-mips64 / pgtable.h
blob8ec8c3bcd67c8b87947abec260fb016027cabc6f
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 2001 by Ralf Baechle at alii
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
12 #include <linux/config.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
16 #ifndef __ASSEMBLY__
18 #include <linux/linkage.h>
19 #include <linux/mmzone.h>
20 #include <asm/cachectl.h>
23 * This flag is used to indicate that the page pointed to by a pte
24 * is dirty and requires cleaning before returning it to the user.
26 #define PG_dcache_dirty PG_arch_1
28 #define Page_dcache_dirty(page) \
29 test_bit(PG_dcache_dirty, &(page)->flags)
30 #define SetPageDcacheDirty(page) \
31 set_bit(PG_dcache_dirty, &(page)->flags)
32 #define ClearPageDcacheDirty(page) \
33 clear_bit(PG_dcache_dirty, &(page)->flags)
37 * Each address space has 2 4K pages as its page directory, giving 1024
38 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
39 * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to
40 * page tables. Each page table is a single 4K page, giving 512 (==
41 * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to
42 * invalid_pmd_table, each pmde is initialized to point to
43 * invalid_pte_table, each pte is initialized to 0. When memory is low,
44 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
45 * and empty_bad_page_table is returned back to higher layer code, so
46 * that the failure is recognized later on. Linux does not seem to
47 * handle these failures very well though. The empty_bad_page_table has
48 * invalid pte entries in it, to force page faults.
49 * Vmalloc handling: vmalloc uses swapper_pg_dir[0] (returned by
50 * pgd_offset_k), which is initalized to point to kpmdtbl. kpmdtbl is
51 * the only single page pmd in the system. kpmdtbl entries point into
52 * kptbl[] array. We reserve 1 << PGD_ORDER pages to hold the
53 * vmalloc range translations, which the fault handler looks at.
56 #endif /* !__ASSEMBLY__ */
58 /* PMD_SHIFT determines the size of the area a second-level page table can map */
59 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
60 #define PMD_SIZE (1UL << PMD_SHIFT)
61 #define PMD_MASK (~(PMD_SIZE-1))
63 /* PGDIR_SHIFT determines what a third-level page table entry can map */
64 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + 1 - 3))
65 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
66 #define PGDIR_MASK (~(PGDIR_SIZE-1))
68 /* Entries per page directory level: we use two-level, so we don't really
69 have any PMD directory physically. */
70 #define PTRS_PER_PGD 1024
71 #define PTRS_PER_PMD 1024
72 #define PTRS_PER_PTE 512
73 #define PGD_ORDER 1
74 #define PMD_ORDER 1
75 #define PTE_ORDER 0
77 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
78 #define FIRST_USER_PGD_NR 0
80 #define VMALLOC_START XKSEG
81 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
82 #define VMALLOC_END \
83 (VMALLOC_START + ((1 << PGD_ORDER) * PTRS_PER_PTE * PAGE_SIZE))
85 #include <asm/pgtable-bits.h>
87 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
88 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
89 PAGE_CACHABLE_DEFAULT)
90 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
91 PAGE_CACHABLE_DEFAULT)
92 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
93 PAGE_CACHABLE_DEFAULT)
94 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
95 _PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT)
96 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
97 PAGE_CACHABLE_DEFAULT)
98 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
99 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
102 * MIPS can't do page protection for execute, and considers that the same like
103 * read. Also, write permissions imply read permissions. This is the closest
104 * we can get by reasonable means..
106 #define __P000 PAGE_NONE
107 #define __P001 PAGE_READONLY
108 #define __P010 PAGE_COPY
109 #define __P011 PAGE_COPY
110 #define __P100 PAGE_READONLY
111 #define __P101 PAGE_READONLY
112 #define __P110 PAGE_COPY
113 #define __P111 PAGE_COPY
115 #define __S000 PAGE_NONE
116 #define __S001 PAGE_READONLY
117 #define __S010 PAGE_SHARED
118 #define __S011 PAGE_SHARED
119 #define __S100 PAGE_READONLY
120 #define __S101 PAGE_READONLY
121 #define __S110 PAGE_SHARED
122 #define __S111 PAGE_SHARED
124 #ifndef __ASSEMBLY__
126 #define pte_ERROR(e) \
127 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
128 #define pmd_ERROR(e) \
129 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
130 #define pgd_ERROR(e) \
131 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
134 * ZERO_PAGE is a global shared page that is always zero; used
135 * for zero-mapped memory areas etc..
138 extern unsigned long empty_zero_page;
139 extern unsigned long zero_page_mask;
141 #define ZERO_PAGE(vaddr) \
142 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
144 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
145 extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)];
146 extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
147 extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
150 * Conversion functions: convert a page and protection to a page entry,
151 * and a page entry and page directory to the page they refer to.
153 #define page_pte(page) page_pte_prot(page, __pgprot(0))
154 #define pmd_phys(pmd) (pmd_val(pmd) - PAGE_OFFSET)
155 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
156 #define pmd_page_kernel(pmd) pmd_val(pmd)
158 static inline unsigned long pgd_page(pgd_t pgd)
160 return pgd_val(pgd);
163 static inline int pte_none(pte_t pte)
165 return !(pte_val(pte) & ~_PAGE_GLOBAL);
168 static inline int pte_present(pte_t pte)
170 return pte_val(pte) & _PAGE_PRESENT;
174 * Certain architectures need to do special things when pte's
175 * within a page table are directly modified. Thus, the following
176 * hook is made available.
178 static inline void set_pte(pte_t *ptep, pte_t pteval)
180 *ptep = pteval;
181 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
182 if (pte_val(pteval) & _PAGE_GLOBAL) {
183 pte_t *buddy = ptep_buddy(ptep);
185 * Make sure the buddy is global too (if it's !none,
186 * it better already be global)
188 if (pte_none(*buddy))
189 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
191 #endif
194 static inline void pte_clear(pte_t *ptep)
196 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
197 /* Preserve global status for the pair */
198 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
199 set_pte(ptep, __pte(_PAGE_GLOBAL));
200 else
201 #endif
202 set_pte(ptep, __pte(0));
206 * (pmds are folded into pgds so this doesn't get actually called,
207 * but the define is needed for a generic inline function.)
209 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
210 #define set_pgd(pgdptr, pgdval) do { *(pgdptr) = (pgdval); } while(0)
213 * Empty pmd entries point to the invalid_pte_table.
215 static inline int pmd_none(pmd_t pmd)
217 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
220 #define pmd_bad(pmd) (pmd_val(pmd) &~ PAGE_MASK)
222 static inline int pmd_present(pmd_t pmd)
224 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
227 static inline void pmd_clear(pmd_t *pmdp)
229 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
233 * Empty pgd entries point to the invalid_pmd_table.
235 static inline int pgd_none(pgd_t pgd)
237 return pgd_val(pgd) == (unsigned long) invalid_pmd_table;
240 #define pgd_bad(pgd) (pgd_val(pgd) &~ PAGE_MASK)
242 static inline int pgd_present(pgd_t pgd)
244 return pgd_val(pgd) != (unsigned long) invalid_pmd_table;
247 static inline void pgd_clear(pgd_t *pgdp)
249 pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table);
252 #ifdef CONFIG_DISCONTIGMEM
254 #define pte_page(x) (NODE_MEM_MAP(PHYSADDR_TO_NID(pte_val(x))) + \
255 PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
257 #else
258 #define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
259 #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT))
260 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
261 #endif
263 #define PTE_FILE_MAX_BITS 27
265 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
268 * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset
269 * into this range:
272 #define pte_to_pgoff(_pte) \
273 ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 ))
275 #define pgoff_to_pte(off) \
276 ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE })
278 #else
281 * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset
282 * into this range:
284 #define pte_to_pgoff(_pte) \
285 ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
287 #define pgoff_to_pte(off) \
288 ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
290 #endif
293 * The following only work if pte_present() is true.
294 * Undefined behaviour if not..
296 static inline int pte_user(pte_t pte) { BUG(); return 0; }
297 static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
298 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
299 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
300 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
301 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
303 static inline pte_t pte_wrprotect(pte_t pte)
305 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
306 return pte;
309 static inline pte_t pte_rdprotect(pte_t pte)
311 pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ);
312 return pte;
315 static inline pte_t pte_mkclean(pte_t pte)
317 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
318 return pte;
321 static inline pte_t pte_mkold(pte_t pte)
323 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
324 return pte;
327 static inline pte_t pte_mkwrite(pte_t pte)
329 pte_val(pte) |= _PAGE_WRITE;
330 if (pte_val(pte) & _PAGE_MODIFIED)
331 pte_val(pte) |= _PAGE_SILENT_WRITE;
332 return pte;
335 static inline pte_t pte_mkread(pte_t pte)
337 pte_val(pte) |= _PAGE_READ;
338 if (pte_val(pte) & _PAGE_ACCESSED)
339 pte_val(pte) |= _PAGE_SILENT_READ;
340 return pte;
343 static inline pte_t pte_mkdirty(pte_t pte)
345 pte_val(pte) |= _PAGE_MODIFIED;
346 if (pte_val(pte) & _PAGE_WRITE)
347 pte_val(pte) |= _PAGE_SILENT_WRITE;
348 return pte;
351 static inline pte_t pte_mkyoung(pte_t pte)
353 pte_val(pte) |= _PAGE_ACCESSED;
354 if (pte_val(pte) & _PAGE_READ)
355 pte_val(pte) |= _PAGE_SILENT_READ;
356 return pte;
360 * Macro to make mark a page protection value as "uncacheable". Note
361 * that "protection" is really a misnomer here as the protection value
362 * contains the memory attribute bits, dirty bits, and various other
363 * bits as well.
365 #define pgprot_noncached pgprot_noncached
367 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
369 unsigned long prot = pgprot_val(_prot);
371 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
373 return __pgprot(prot);
377 * Conversion functions: convert a page and protection to a page entry,
378 * and a page entry and page directory to the page they refer to.
380 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
382 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
384 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
387 #define __pgd_offset(address) pgd_index(address)
388 #define page_pte(page) page_pte_prot(page, __pgprot(0))
390 /* to find an entry in a kernel page-table-directory */
391 #define pgd_offset_k(address) pgd_offset(&init_mm, 0)
393 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
395 /* to find an entry in a page-table-directory */
396 #define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr))
398 /* Find an entry in the second-level page table.. */
399 static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
401 return (pmd_t *) pgd_page(*dir) +
402 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
405 /* Find an entry in the third-level page table.. */
406 #define __pte_offset(address) \
407 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
408 #define pte_offset(dir, address) \
409 ((pte_t *) (pmd_page_kernel(*dir)) + __pte_offset(address))
410 #define pte_offset_kernel(dir, address) \
411 ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
412 #define pte_offset_map(dir, address) \
413 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
414 #define pte_offset_map_nested(dir, address) \
415 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
416 #define pte_unmap(pte) ((void)(pte))
417 #define pte_unmap_nested(pte) ((void)(pte))
420 * Initialize a new pgd / pmd table with invalid pointers.
422 extern void pgd_init(unsigned long page);
423 extern void pmd_init(unsigned long page, unsigned long pagetable);
425 extern pgd_t swapper_pg_dir[1024];
426 extern void paging_init(void);
428 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
429 pte_t pte);
430 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
431 pte_t pte);
433 static inline void update_mmu_cache(struct vm_area_struct *vma,
434 unsigned long address, pte_t pte)
436 __update_tlb(vma, address, pte);
437 __update_cache(vma, address, pte);
441 * Non-present pages: high 24 bits are offset, next 8 bits type,
442 * low 32 bits zero.
444 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
445 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
447 #define __swp_type(x) (((x).val >> 32) & 0xff)
448 #define __swp_offset(x) ((x).val >> 40)
449 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
450 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
451 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
453 #ifndef CONFIG_DISCONTIGMEM
454 #define kern_addr_valid(addr) (1)
455 #endif
458 * No page table caches to initialise
460 #define pgtable_cache_init() do { } while (0)
462 #include <asm-generic/pgtable.h>
464 typedef pte_t *pte_addr_t;
467 * We provide our own get_unmapped area to cope with the virtual aliasing
468 * constraints placed on us by the cache architecture.
470 #define HAVE_ARCH_UNMAPPED_AREA
472 #define io_remap_page_range remap_page_range
474 #endif /* !__ASSEMBLY__ */
476 #endif /* _ASM_PGTABLE_H */