More Makefile cleanups, otherwise mainly noticeable are the netfilter fix
[davej-history.git] / include / asm-sh / pgtable.h
blob2246e5c0de905d1943733e506925c2469aabfbe9
1 #ifndef __ASM_SH_PGTABLE_H
2 #define __ASM_SH_PGTABLE_H
4 /* Copyright (C) 1999 Niibe Yutaka */
6 /*
7 * This file contains the functions and defines necessary to modify and use
8 * the SuperH page table tree.
9 */
10 #ifndef __ASSEMBLY__
11 #include <asm/processor.h>
12 #include <asm/addrspace.h>
13 #include <linux/threads.h>
15 extern pgd_t swapper_pg_dir[1024];
16 extern void paging_init(void);
18 #if defined(__sh3__)
19 /* Cache flushing:
21 * - flush_cache_all() flushes entire cache
22 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
23 * - flush_cache_page(mm, vmaddr) flushes a single page
24 * - flush_cache_range(mm, start, end) flushes a range of pages
26 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
27 * - flush_page_to_ram(page) write back kernel page to ram
28 * - flush_icache_range(start, end) flushes(invalidates) a range for icache
29 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
31 * Caches are indexed (effectively) by physical address on SH-3, so
32 * we don't need them.
34 #define flush_cache_all() do { } while (0)
35 #define flush_cache_mm(mm) do { } while (0)
36 #define flush_cache_range(mm, start, end) do { } while (0)
37 #define flush_cache_page(vma, vmaddr) do { } while (0)
38 #define flush_page_to_ram(page) do { } while (0)
39 #define flush_dcache_page(page) do { } while (0)
40 #define flush_icache_range(start, end) do { } while (0)
41 #define flush_icache_page(vma,pg) do { } while (0)
42 #elif defined(__SH4__)
44 * Caches are broken on SH-4, so we need them.
46 extern void flush_cache_all(void);
47 extern void flush_cache_mm(struct mm_struct *mm);
48 extern void flush_cache_range(struct mm_struct *mm, unsigned long start,
49 unsigned long end);
50 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
51 extern void flush_page_to_ram(struct page *page);
52 extern void flush_dcache_page(struct page *pg);
53 extern void flush_icache_range(unsigned long start, unsigned long end);
54 extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
55 #endif
58 * Basically we have the same two-level (which is the logical three level
59 * Linux page table layout folded) page tables as the i386.
63 * ZERO_PAGE is a global shared page that is always zero: used
64 * for zero-mapped memory areas etc..
66 extern unsigned long empty_zero_page[1024];
67 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
69 #endif /* !__ASSEMBLY__ */
71 #include <asm/pgtable-2level.h>
73 #define __beep() asm("")
75 #define PMD_SIZE (1UL << PMD_SHIFT)
76 #define PMD_MASK (~(PMD_SIZE-1))
77 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
78 #define PGDIR_MASK (~(PGDIR_SIZE-1))
80 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
81 #define FIRST_USER_PGD_NR 0
83 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
84 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
86 #define TWOLEVEL_PGDIR_SHIFT 22
87 #define BOOT_USER_PGD_PTRS (PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
88 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
90 #ifndef __ASSEMBLY__
91 #define VMALLOC_START P3SEG
92 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
93 #define VMALLOC_END P4SEG
95 /* 0x001 WT-bit on SH-4, 0 on SH-3 */
96 #define _PAGE_HW_SHARED 0x002 /* SH-bit : page is shared among processes */
97 #define _PAGE_DIRTY 0x004 /* D-bit : page changed */
98 #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
99 /* 0x010 SZ0-bit : Size of page */
100 #define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
101 #define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */
102 /* 0x080 SZ1-bit : Size of page (on SH-4) */
103 #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
104 #define _PAGE_PROTNONE 0x200 /* software: if not present */
105 #define _PAGE_ACCESSED 0x400 /* software: page referenced */
106 #define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */
108 /* Mask which drop software flags */
109 #define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff1ff
110 /* Hardware flags: SZ=1 (4k-byte) */
111 #define _PAGE_FLAGS_HARD 0x00000010
113 #if defined(__sh3__)
114 #define _PAGE_SHARED _PAGE_HW_SHARED
115 #elif defined(__SH4__)
116 #define _PAGE_SHARED _PAGE_U0_SHARED
117 #endif
119 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
120 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
121 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED)
123 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
124 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD)
125 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
126 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
127 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
128 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
131 * As i386 and MIPS, SuperH can't do page protection for execute, and
132 * considers that the same as a read. Also, write permissions imply
133 * read permissions. This is the closest we can get..
136 #define __P000 PAGE_NONE
137 #define __P001 PAGE_READONLY
138 #define __P010 PAGE_COPY
139 #define __P011 PAGE_COPY
140 #define __P100 PAGE_READONLY
141 #define __P101 PAGE_READONLY
142 #define __P110 PAGE_COPY
143 #define __P111 PAGE_COPY
145 #define __S000 PAGE_NONE
146 #define __S001 PAGE_READONLY
147 #define __S010 PAGE_SHARED
148 #define __S011 PAGE_SHARED
149 #define __S100 PAGE_READONLY
150 #define __S101 PAGE_READONLY
151 #define __S110 PAGE_SHARED
152 #define __S111 PAGE_SHARED
155 * Handling allocation failures during page table setup.
157 extern void __handle_bad_pmd(pmd_t * pmd);
158 extern void __handle_bad_pmd_kernel(pmd_t * pmd);
160 #define pte_none(x) (!pte_val(x))
161 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
162 #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
164 #define pmd_none(x) (!pmd_val(x))
165 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
166 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
167 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
170 * Permanent address of a page. Obviously must never be
171 * called on a highmem page.
173 #define page_address(page) ((page)->virtual)
174 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
175 #define pte_page(x) (mem_map+(unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
178 * The following only work if pte_present() is true.
179 * Undefined behaviour if not..
181 extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
182 extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
183 extern inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
184 extern inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
185 extern inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
186 extern inline int pte_shared(pte_t pte){ return pte_val(pte) & _PAGE_SHARED; }
188 extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
189 extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
190 extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
191 extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
192 extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
193 extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
194 extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
195 extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
196 extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
197 extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
200 * Conversion functions: convert a page and protection to a page entry,
201 * and a page entry and page directory to the page they refer to.
203 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
205 #define mk_pte(page,pgprot) \
206 ({ pte_t __pte; \
208 set_pte(&__pte, __pte(((page)-mem_map) * \
209 (unsigned long long)PAGE_SIZE + pgprot_val(pgprot) + \
210 __MEMORY_START)); \
211 __pte; \
214 /* This takes a physical page address that is used by the remapping functions */
215 #define mk_pte_phys(physpage, pgprot) \
216 ({ pte_t __pte; set_pte(&__pte, __pte(physpage + pgprot_val(pgprot))); __pte; })
218 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
219 { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
221 #define page_pte(page) page_pte_prot(page, __pgprot(0))
223 #define pmd_page(pmd) \
224 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
226 /* to find an entry in a page-table-directory. */
227 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
228 #define __pgd_offset(address) pgd_index(address)
229 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
231 /* to find an entry in a kernel page-table-directory */
232 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
234 #define __pmd_offset(address) \
235 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
237 /* Find an entry in the third-level page table.. */
238 #define __pte_offset(address) \
239 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
240 #define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
241 __pte_offset(address))
243 extern void update_mmu_cache(struct vm_area_struct * vma,
244 unsigned long address, pte_t pte);
246 /* Encode and de-code a swap entry */
248 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
249 * and _PAGE_PROTONOE bits
251 #define SWP_TYPE(x) ((x).val & 0xff)
252 #define SWP_OFFSET(x) ((x).val >> 10)
253 #define SWP_ENTRY(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) })
254 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
255 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
257 #include <asm-generic/pgtable.h>
259 #endif /* !__ASSEMBLY__ */
261 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
262 #define PageSkip(page) (0)
263 #define kern_addr_valid(addr) (1)
265 #define io_remap_page_range remap_page_range
267 #endif /* __ASM_SH_PAGE_H */