Import 2.3.43pre4
[davej-history.git] / include / asm-arm / pgtable.h
blob033541764a6b4e65a0b695b137b315cc7fac547b
1 /*
2 * linux/include/asm-arm/pgtable.h
3 */
4 #ifndef _ASMARM_PGTABLE_H
5 #define _ASMARM_PGTABLE_H
7 #include <asm/arch/memory.h>
8 #include <asm/proc-fns.h>
9 #include <asm/system.h>
12 * PMD_SHIFT determines the size of the area a second-level page table can map
13 * PGDIR_SHIFT determines what a third-level page table entry can map
15 #define PMD_SHIFT 20
16 #define PGDIR_SHIFT 20
18 #define LIBRARY_TEXT_START 0x0c000000
20 #ifndef __ASSEMBLY__
21 extern void __pte_error(const char *file, int line, unsigned long val);
22 extern void __pmd_error(const char *file, int line, unsigned long val);
23 extern void __pgd_error(const char *file, int line, unsigned long val);
25 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
26 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
27 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
28 #endif /* !__ASSEMBLY__ */
30 #define PMD_SIZE (1UL << PMD_SHIFT)
31 #define PMD_MASK (~(PMD_SIZE-1))
32 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
33 #define PGDIR_MASK (~(PGDIR_SIZE-1))
35 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
38 * The table below defines the page protection levels that we insert into our
39 * Linux page table version. These get translated into the best that the
40 * architecture can perform. Note that on most ARM hardware:
41 * 1) We cannot do execute protection
42 * 2) If we could do execute protection, then read is implied
43 * 3) write implies read permissions
45 #define __P000 PAGE_NONE
46 #define __P001 PAGE_READONLY
47 #define __P010 PAGE_COPY
48 #define __P011 PAGE_COPY
49 #define __P100 PAGE_READONLY
50 #define __P101 PAGE_READONLY
51 #define __P110 PAGE_COPY
52 #define __P111 PAGE_COPY
54 #define __S000 PAGE_NONE
55 #define __S001 PAGE_READONLY
56 #define __S010 PAGE_SHARED
57 #define __S011 PAGE_SHARED
58 #define __S100 PAGE_READONLY
59 #define __S101 PAGE_READONLY
60 #define __S110 PAGE_SHARED
61 #define __S111 PAGE_SHARED
63 #ifndef __ASSEMBLY__
65 * ZERO_PAGE is a global shared page that is always zero: used
66 * for zero-mapped memory areas etc..
68 extern struct page *empty_zero_page;
69 #define ZERO_PAGE(vaddr) (empty_zero_page)
72 * Handling allocation failures during page table setup.
74 extern void __handle_bad_pmd(pmd_t *pmd);
75 extern void __handle_bad_pmd_kernel(pmd_t *pmd);
77 #define pte_none(pte) (!pte_val(pte))
78 #define pte_clear(ptep) set_pte((ptep), __pte(0))
79 #define pte_pagenr(pte) ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
81 #define pmd_none(pmd) (!pmd_val(pmd))
82 #define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
85 * Permanent address of a page.
87 #define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
88 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
89 #define pte_page(x) (mem_map + pte_pagenr(x))
92 * Conversion functions: convert a page and protection to a page entry,
93 * and a page entry and page directory to the page they refer to.
95 extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
97 pte_t pte;
98 pte_val(pte) = physpage | pgprot_val(pgprot);
99 return pte;
102 #define mk_pte(page,pgprot) \
103 ({ \
104 pte_t __pte; \
105 pte_val(__pte) = PHYS_OFFSET + \
106 (((page) - mem_map) << PAGE_SHIFT) + \
107 pgprot_val(pgprot); \
108 __pte; \
112 * The "pgd_xxx()" functions here are trivial for a folded two-level
113 * setup: the pgd is never bad, and a pmd always exists (as it's folded
114 * into the pgd entry)
116 #define pgd_none(pgd) (0)
117 #define pgd_bad(pgd) (0)
118 #define pgd_present(pgd) (1)
119 #define pgd_clear(pgdp)
121 #define page_pte_prot(page,prot) mk_pte(page, prot)
122 #define page_pte(page) mk_pte(page, __pgprot(0))
124 /* to find an entry in a page-table-directory */
125 #define __pgd_offset(addr) ((addr) >> PGDIR_SHIFT)
127 #define pgd_offset(mm, addr) ((mm)->pgd+__pgd_offset(addr))
129 /* to find an entry in a kernel page-table-directory */
130 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
132 /* Find an entry in the second-level page table.. */
133 #define pmd_offset(dir, addr) ((pmd_t *)(dir))
135 /* Find an entry in the third-level page table.. */
136 #define __pte_offset(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
137 #define pte_offset(dir, addr) ((pte_t *)pmd_page(*(dir)) + __pte_offset(addr))
139 #include <asm/proc/pgtable.h>
141 extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
143 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
144 return pte;
147 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
149 #define update_mmu_cache(vma,address,pte) do { } while (0)
151 /* Encode and decode a swap entry.
153 * We support up to 32GB of swap on 4k machines
155 #define SWP_TYPE(x) (((x).val >> 2) & 0x7f)
156 #define SWP_OFFSET(x) ((x).val >> 9)
157 #define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
158 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
159 #define swp_entry_to_pte(swp) ((pte_t) { (swp).val })
161 #define module_map vmalloc
162 #define module_unmap vfree
164 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
165 #define PageSkip(page) (machine_is_riscpc() && test_bit(PG_skip, &(page)->flags))
167 #define io_remap_page_range remap_page_range
169 #endif /* !__ASSEMBLY__ */
171 #endif /* _ASMARM_PGTABLE_H */