Optimize andes_clear_page() and andes_copy_page() with prefetch
[linux-2.6/linux-mips.git] / include / asm-arm / pgtable.h
blobb18e572d0723ed0fe983c72abfef3346e4685feb
1 /*
2 * linux/include/asm-arm/pgtable.h
3 */
4 #ifndef _ASMARM_PGTABLE_H
5 #define _ASMARM_PGTABLE_H
7 #include <linux/config.h>
8 #include <asm/arch/memory.h>
9 #include <asm/proc-fns.h>
10 #include <asm/system.h>
13 * PMD_SHIFT determines the size of the area a second-level page table can map
14 * PGDIR_SHIFT determines what a third-level page table entry can map
16 #define PMD_SHIFT 20
17 #define PGDIR_SHIFT 20
19 #define LIBRARY_TEXT_START 0x0c000000
21 #ifndef __ASSEMBLY__
22 extern void __pte_error(const char *file, int line, unsigned long val);
23 extern void __pmd_error(const char *file, int line, unsigned long val);
24 extern void __pgd_error(const char *file, int line, unsigned long val);
26 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
27 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
28 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
29 #endif /* !__ASSEMBLY__ */
31 #define PMD_SIZE (1UL << PMD_SHIFT)
32 #define PMD_MASK (~(PMD_SIZE-1))
33 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
34 #define PGDIR_MASK (~(PGDIR_SIZE-1))
36 #define FIRST_USER_PGD_NR 1
37 #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
40 * The table below defines the page protection levels that we insert into our
41 * Linux page table version. These get translated into the best that the
42 * architecture can perform. Note that on most ARM hardware:
43 * 1) We cannot do execute protection
44 * 2) If we could do execute protection, then read is implied
45 * 3) write implies read permissions
47 #define __P000 PAGE_NONE
48 #define __P001 PAGE_READONLY
49 #define __P010 PAGE_COPY
50 #define __P011 PAGE_COPY
51 #define __P100 PAGE_READONLY
52 #define __P101 PAGE_READONLY
53 #define __P110 PAGE_COPY
54 #define __P111 PAGE_COPY
56 #define __S000 PAGE_NONE
57 #define __S001 PAGE_READONLY
58 #define __S010 PAGE_SHARED
59 #define __S011 PAGE_SHARED
60 #define __S100 PAGE_READONLY
61 #define __S101 PAGE_READONLY
62 #define __S110 PAGE_SHARED
63 #define __S111 PAGE_SHARED
65 #ifndef __ASSEMBLY__
67 * ZERO_PAGE is a global shared page that is always zero: used
68 * for zero-mapped memory areas etc..
70 extern struct page *empty_zero_page;
71 #define ZERO_PAGE(vaddr) (empty_zero_page)
74 * Handling allocation failures during page table setup.
76 extern void __handle_bad_pmd(pmd_t *pmd);
77 extern void __handle_bad_pmd_kernel(pmd_t *pmd);
79 #define pte_none(pte) (!pte_val(pte))
80 #define pte_clear(ptep) set_pte((ptep), __pte(0))
82 #ifndef CONFIG_DISCONTIGMEM
83 #define pte_pagenr(pte) ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
84 #else
86 * I'm not happy with this - we needlessly convert a physical address
87 * to a virtual one, and then immediately back to a physical address,
88 * which, if __va and __pa are expensive causes twice the expense for
89 * zero gain. --rmk
91 #define pte_pagenr(pte) MAP_NR(__va(pte_val(pte)))
92 #endif
94 #define pmd_none(pmd) (!pmd_val(pmd))
95 #define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
98 * Permanent address of a page. We never have highmem, so this is trivial.
100 #define page_address(page) ((page)->virtual)
101 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
102 #define pte_page(x) (mem_map + pte_pagenr(x))
105 * Conversion functions: convert a page and protection to a page entry,
106 * and a page entry and page directory to the page they refer to.
108 extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
110 pte_t pte;
111 pte_val(pte) = physpage | pgprot_val(pgprot);
112 return pte;
115 #define mk_pte(page,pgprot) \
116 ({ \
117 pte_t __pte; \
118 pte_val(__pte) = __pa(page_address(page)) + \
119 pgprot_val(pgprot); \
120 __pte; \
124 * The "pgd_xxx()" functions here are trivial for a folded two-level
125 * setup: the pgd is never bad, and a pmd always exists (as it's folded
126 * into the pgd entry)
128 #define pgd_none(pgd) (0)
129 #define pgd_bad(pgd) (0)
130 #define pgd_present(pgd) (1)
131 #define pgd_clear(pgdp)
133 #define page_pte_prot(page,prot) mk_pte(page, prot)
134 #define page_pte(page) mk_pte(page, __pgprot(0))
136 /* to find an entry in a page-table-directory */
137 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
138 #define __pgd_offset(addr) pgd_index(addr)
140 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
142 /* to find an entry in a kernel page-table-directory */
143 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
145 /* Find an entry in the second-level page table.. */
146 #define pmd_offset(dir, addr) ((pmd_t *)(dir))
148 /* Find an entry in the third-level page table.. */
149 #define __pte_offset(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
150 #define pte_offset(dir, addr) ((pte_t *)pmd_page(*(dir)) + __pte_offset(addr))
152 #include <asm/proc/pgtable.h>
154 extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
156 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
157 return pte;
160 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
162 #define update_mmu_cache(vma,address,pte) do { } while (0)
164 /* Encode and decode a swap entry.
166 * We support up to 32GB of swap on 4k machines
168 #define SWP_TYPE(x) (((x).val >> 2) & 0x7f)
169 #define SWP_OFFSET(x) ((x).val >> 9)
170 #define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
171 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
172 #define swp_entry_to_pte(swp) ((pte_t) { (swp).val })
174 #define module_map vmalloc
175 #define module_unmap vfree
177 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
178 #define PageSkip(page) (machine_is_riscpc() && test_bit(PG_skip, &(page)->flags))
180 #define io_remap_page_range remap_page_range
182 #endif /* !__ASSEMBLY__ */
184 #endif /* _ASMARM_PGTABLE_H */